diff --git "a/2822.jsonl" "b/2822.jsonl" new file mode 100644--- /dev/null +++ "b/2822.jsonl" @@ -0,0 +1,815 @@ +{"seq_id":"181717523","text":"\r\n# find the first character that is repeated in a string\r\n\r\n# O(n), and minimal memory as worst case dict is len(#symbols-1)\r\n\r\ndef findFirstChar(x):\r\n\tchar_dict = {}\r\n\tfor char in x:\r\n\t\tif char in char_dict:\r\n\t\t\tprint(char)\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tchar_dict[char] = 1;\r\n\r\n\treturn None\r\n\r\n\r\nstr1 = \"absdfsdfasdfg adfgadfg\"\r\nfindFirstChar(str1)","sub_path":"same_character.py","file_name":"same_character.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"19343164","text":"# import platform\n# import subprocess\n# command = ['netstat', '-p', 'tcp', '-n']\n# p = subprocess.Popen(command, stdout=subprocess.PIPE)\n# print(\"Loading...\")\n# text = p.stdout.read()\n# # output fetched in the \"Text\" Variable as a string, later data can be extracted with regular expressions.\n# # one = line by line\n# one = list(text.decode().split(\"\\n\"))\n# # Get Column Names\n# two = one[3].split(\" \")\n# three = []\n# two = one[1]\n# two = two.split(\" \")\n# temp = two[1:]\n# temp2 = []\n# print(temp)\n# retcode = p.wait()\n\n# Code for IP Location\n\n# sample_ip = output_in_list[1][4]\n# import subprocess\n# sample_ip = '52.109.60.3'\n# command = ['python3','./IPGeoLocation-master/ipgeolocation.py','-t',sample_ip]\n# p = subprocess.Popen(command, stdout=subprocess.PIPE)\n# text = p.stdout.read()\n# # text = text[len(text)-15:]\n# text = text[-50:-1]\n# temp = str(text)\n# temp = list(temp)\n# location = []\n# flag = False\n# comma = 0\n# for i in range(0, len(temp)):\n# if temp[i] =='@' or comma <=2 and flag:\n# flag = True\n# location.append(temp[i])\n# if temp[i] == ',':\n# comma = comma+1\n# if comma == 2:\n# break\n# location = location[1:-1]\n# location = ''.join(location)\n# print(location)\n# retcode = p.wait()\nimport requests\nparams = {'apikey': '838b800fcc312492d40b7a665447b6027823817ac272fc96b48c78a0b3436ccb', 'url': '132.145.34.87'}\nresponse = requests.post('https://www.virustotal.com/vtapi/v2/url/scan', data=params)\njson_response = response.json()\n# print(json_response)\nheaders = {\n \"Accept-Encoding\": \"gzip, deflate\",\n \"User-Agent\" : \"gzip, My Python requests library example client or username\"\n }\nparams = {'apikey': '838b800fcc312492d40b7a665447b6027823817ac272fc96b48c78a0b3436ccb', 'resource':'132.145.34.87'}\nresponse = requests.post('https://www.virustotal.com/vtapi/v2/url/report',params=params, headers=headers)\njson_response = response.json()\n# print(json_response)\n\n\nimport csv\nimport requests\n# File scanning\nparams = {'apikey': '838b800fcc312492d40b7a665447b6027823817ac272fc96b48c78a0b3436ccb'}\nfiles = {'file': ('myfile.exe', open('../VT/myfile.exe', 'rb'))}\nresponse = requests.post('https://www.virustotal.com/vtapi/v2/file/scan', files=files, params=params)\njson_response = response.json()\njson_response = dict(json_response)\nresource = json_response['resource']\n\nheaders = {\n \"Accept-Encoding\": \"gzip, deflate\",\n \"User-Agent\" : \"gzip, My Python requests library example client or username\"\n }\nparams = {'apikey': '838b800fcc312492d40b7a665447b6027823817ac272fc96b48c78a0b3436ccb', 'resource':resource}\nresponse = requests.post('https://www.virustotal.com/vtapi/v2/file/report',params=params, headers=headers)\njson_response = response.json()\nresult = dict(json_response)\n\n\n\nwith open('mycsvfile.csv', 'w') as f: # Just use 'w' mode in 3.x\n w = csv.DictWriter(f, result.keys())\n w.writeheader()\n w.writerow(result)\n","sub_path":"Integrated/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134493908","text":"'''\ndef bin_search(arr, n, x):\n l = 0\n h = n - 1\n while l <= h:\n mid = (l + h)//2\n if arr[mid] <= x:\n l = mid + 1\n else:\n h = mid - 1\n return h\nimport heapq\nfor _ in range(int(input())):\n nov = int(input())\n sizes = []\n eater = []\n ans = 0\n for i in range(nov):\n a, b = [int(x) for x in input().split()]\n sizes.append(a)\n eater.append(b)\n heapq.heapify(sizes)\n ans = 0\n for i in eater:\n ans += i < sizes[0]\n\n\n\n for i in eater:\n indx = bin_search(sizes, nov, i)\n if indx == -1:\n ans += 1\n\n print(ans)\n '''\nfrom collections import defaultdict\nfor _ in range(int(input())):\n nof = int(input())\n eater= []\n sizes = []\n fish = defaultdict(list)\n for _ in range(nof):\n s, e = [int(x) for x in input().split()]\n sizes.append(s)\n fish[s].append(e)\n sets = set()\n #print(fish)\n mins = min(sizes)\n cnt = 0\n #print(mins)\n for i in fish.keys():\n print(i)\n for j in fish[i]:\n if j < mins:cnt += 1\n\n print(cnt)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"HackerEarth/august_circuit_18_4_Noor.py","file_name":"august_circuit_18_4_Noor.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167093283","text":"import csv, os, re, sys\nfrom decimal import Decimal\n\nfrom django.contrib.gis.geos import Point\n\nfrom .models import *\nfrom proposal.models import Proposal\n\ndef rows_import(rows):\n i = 0\n for row in rows:\n if row[\"Total/Subtotal\"] == \"Subtotal\":\n # Add BudgetItems\n continue\n\n project = Project()\n project.name = row[\"Project\"]\n project.department = row[\"Department\"]\n project.category = row[\"Type\"]\n project.approved = row[\"Status\"] == \"Approved\"\n\n project.save()\n\n i += 1\n\n for k, v in row.items():\n if not v or not k.isdigit():\n continue\n\n year = int(k)\n amount = Decimal(v[1:])\n project.budgetitem_set.create(year=year, budget=amount)\n\n address = row[\"Address\"]\n if address:\n m = re.search(r\"\\((-?\\d+\\.\\d+), (-?\\d+\\.\\d+)\\)\", address)\n if m:\n point = Point(x=float(m.group(2)),\n y=float(m.group(1)))\n address_line = address.split(\"\\n\")\n\n # Use placeholder for case number\n project.proposals.create(case_number=\"CP %i\" % i,\n address=address_line[0],\n location=point)\n\n i += 1\n\ndef csv_import(infile):\n with open(infile, \"r\") as f:\n rows_import(csv.DictReader(f))\n","sub_path":"server/project/project_import.py","file_name":"project_import.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648841718","text":"from __future__ import print_function\nimport sys\nimport os.path\nimport math\nimport collections\nif os.path.isfile(\"test.inp\"):\n sys.stdin = open(\"test.inp\", \"r\")\n sys.stdout = open(\"test.out\", \"w\")\nelif os.path.isfile(\"sodif.inp\"):\n sys.stdin = open(\"sodif.inp\", \"r\")\n sys.stdin = open(\"sodif.out\", \"w\")\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\ndef erange (a, b):\n return range (a, b + 1)\nntest = 1\n#ntest = int(input())\nfor itest in erange(1, ntest):\n s = \"a\"\n l = range (0, 9, 2)\n l = l * 3\n print (l)","sub_path":"sodif.py","file_name":"sodif.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"524222924","text":"'''\nanthor=sjie\n'''\nimport requests\nfrom lxml import etree\ndef get_movie():\n session = requests.Session()\n for id in range(0,251,25): #取0~250之间,每次相隔25个数\n URL = 'http://movie.douban.com/top250/?start=' + str(id) #定义爬取得url\n req = session.get(URL)\n req.encoding = 'utf8'\t\t\t # 设置网页编码格式\n root= etree.HTML(req.content)\n items = root.xpath('//ol/li/div[@class=\"item\"]')\n for item in items:\n rank,name,alias,rating_num,quote,url = \"\",\"\",\"\",\"\",\"\",\"\"\n try:\n url = item.xpath('./div[@class=\"pic\"]/a/@href')[0]\n rank = item.xpath('./div[@class=\"pic\"]/em/text()')[0]\n title = item.xpath('./div[@class=\"info\"]//a/span[@class=\"title\"]/text()')\n name = title[0].encode('gb2312','ignore').decode('gb2312')\n alias = title[1].encode('gb2312','ignore').decode('gb2312') if len(title)==2 else \"\"\n rating_num = item.xpath('.//div[@class=\"bd\"]//span[@class=\"rating_num\"]/text()')[0]\n quote_tag = item.xpath('.//div[@class=\"bd\"]//span[@class=\"inq\"]')\n print(name)\n except:\n print('falid!')\n pass\nget_movie()\n","sub_path":"python/lotlip/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378069926","text":"\n# python standard library\nimport re\nimport socket\n\n# this package\nfrom theape import ApeError\nfrom theape import BaseClass\n\nSTART_OF_STRING = r'^'\nANYTHING = r'.'\nZERO_OR_MORE = r'*'\nGROUP = r'({0})'\n\nEVERYTHING = GROUP.format(ANYTHING + ZERO_OR_MORE)\nNOTHING = r'a' + START_OF_STRING\nNEWLINE = '\\n'\n\nclass CommandConstants(object):\n \"\"\"\n Constants for the Command\n \"\"\"\n __slots__ = ()\n # defaults\n default_arguments = ''\n default_timeout = 5\n default_trap_errors = True\n default_data_expression = EVERYTHING\n default_error_expression = NOTHING\n\ndef socketerrors(method, *args, **kwargs):\n \"\"\"\n Traps errors if self.trap_errors is true, raises ApeError otherwise\n\n also expects that the object has connection,logger, not_available attributes\n\n :param:\n\n - `method`: method instance\n \"\"\"\n def wrapped(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except socket.error as error:\n message = \"{e}: Error with connection to {c}\".format(c=self.connection,\n e=type(error))\n self.logger.error(message)\n if not self.trap_errors:\n raise ApeError(\"Problem with connection executing '{0}'\".format(self.command_arguments))\n return self.not_available\n return wrapped\n\nclass TheCommand(BaseClass):\n \"\"\"\n Command to get output from a device\n \"\"\"\n def __init__(self, connection, command,\n data_expression=None,\n error_expression=None,\n arguments=None,\n identifier=None,\n timeout=CommandConstants.default_timeout,\n trap_errors=CommandConstants.default_trap_errors,\n not_available=None):\n \"\"\"\n The Command constructor\n\n :param:\n\n - `identifier`: string to identify this object\n - `connection`: Connection to send command to\n - `command`: string to send to the connection\n - `data_expression` regular expression to get data from command output\n - `error_expression`: regular expression to match fatal errors\n - `arguments`: string of arguments to add to the command\n - `timeout`: seconds to wait for output from device\n - `trap_errors`: if True, log but don't raise socket errors\n - `not_available`: What to return if data not matched in output\n \"\"\"\n super(TheCommand, self).__init__()\n self.connection = connection\n self._command = None\n self.command = command\n self._arguments = None\n self.arguments = arguments\n self._data_expression = None\n self.data_expression = data_expression\n self._error_expression = None\n self.error_expression = error_expression\n\n self.timeout = timeout\n self._identifier = identifier\n self.trap_errors = trap_errors\n self._command_arguments = None\n self.not_available = not_available\n return\n\n @property\n def command(self):\n \"\"\"\n String to send to the connection\n \"\"\"\n return self._command\n\n @command.setter\n def command(self, cmd):\n \"\"\"\n sets the command, resets the command_arguments\n \"\"\"\n self._command = cmd\n self._command_arguments = None\n return\n\n @property\n def arguments(self):\n \"\"\"\n arguments for the command (separated so they can be updated separately)\n \"\"\"\n return self._arguments\n\n @arguments.setter\n def arguments(self, args):\n \"\"\"\n sets the arguments, resets the command_arguments\n\n :param:\n\n - `args`: string of arguments for the command or None\n \"\"\"\n self._arguments = args\n self._command_arguments = None\n return\n\n @property\n def command_arguments(self):\n \"\"\"\n A compilation of command and arguments (with newline appended)\n \"\"\"\n if self._command_arguments is None:\n suffix = NEWLINE\n if self.arguments is not None:\n suffix = \" {0}{1}\".format(self.arguments, suffix)\n self._command_arguments = \"{0}{1}\".format(self.command, suffix)\n return self._command_arguments\n\n @property\n def data_expression(self):\n \"\"\"\n compiled regular expression to extract data from the command output\n \"\"\"\n if self._data_expression is None:\n self._data_expression = re.compile(CommandConstants.default_data_expression)\n return self._data_expression\n\n @data_expression.setter\n def data_expression(self, regex):\n \"\"\"\n compiles and sets the regular expression\n\n :param:\n\n - `regex`: regular expression to get data from the output\n \"\"\"\n if regex is not None:\n regex = re.compile(regex)\n self._data_expression = regex\n return\n\n @property\n def error_expression(self):\n \"\"\"\n regular expression -- if matched, raise Exception\n \"\"\"\n if self._error_expression is None:\n self._error_expression = re.compile(CommandConstants.default_error_expression)\n return self._error_expression\n\n @error_expression.setter\n def error_expression(self, regex):\n \"\"\"\n Compiles and sets the error_expression\n\n :param:\n\n - `regex`: regular expression to find fatal errors\n \"\"\"\n if regex is not None:\n regex = re.compile(regex)\n self._error_expression = regex\n return\n\n @property\n def identifier(self):\n \"\"\"\n A string identifier to distinguish this command\n\n * Uses the first token in the command-string if not set\n \"\"\"\n if self._identifier is None:\n self._identifier = self.command.split()[0]\n return self._identifier\n\n @socketerrors\n def __call__(self):\n \"\"\"\n Sends the command to the connection and extracts data from the output\n\n :raise: ApeError if data matched but no group found\n \"\"\"\n stdin, stdout, stderr = self.connection.exec_command(self.command_arguments,\n timeout=self.timeout)\n data = self.not_available\n for line in stdout:\n self.logger.debug(line)\n match = self.data_expression.search(line)\n if match:\n try:\n data = match.groups()[0]\n except IndexError as error:\n self.logger.error(error)\n raise ApeError(\"Data Expression '{0}' missing group to extract data\".format(self.data_expression))\n self.logger.debug(\"Matched: {0}\".format(data))\n break\n \n for line in stderr:\n self.logger.error(line)\n if self.error_expression.search(line):\n raise ApeError(\"Fatal Error: '{0}' running command '{1}1\".format(line,\n self.command_arguments))\n return data\n# end class TheCommand","sub_path":"theape/parts/command/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":7215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"145001016","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2017/7/6 17:35\r\n# @Author : zhl\r\n# @Site : \r\n# @File : email_send.py\r\n# @Software: PyCharm\r\nimport random, string\r\nimport hashlib\r\n\r\nfrom users.models import EmailVerifyRecord\r\n\r\nfrom django.core.mail import send_mail\r\nfrom django.conf import settings\r\n\r\n\r\ndef random_str():\r\n \"\"\"生成8位验证码\"\"\"\r\n str = ''\r\n chars = string.ascii_letters + string.digits\r\n length = len(chars) - 1\r\n ran = random.Random()\r\n for i in range(8):\r\n str += chars[ran.randint(0, length)]\r\n m = hashlib.md5() # 采用md5 加密\r\n m.update(str)\r\n ret = m.hexdigest() # 进行加密 不把加完密的字符串存入到数据库\r\n # 进行验证的时候 取出进行验证\r\n return str\r\n\r\n\r\ndef send_email(email, send_type=\"注册\"):\r\n email_record = EmailVerifyRecord()\r\n random_code = random_str()\r\n email_record.email = email\r\n email_record.code = random_code\r\n email_record.send_type = send_type\r\n email_record.save()\r\n email_title = \"\"\r\n email_body = \"\"\r\n if send_type == \"注册\":\r\n email_title = '欢迎注册'\r\n a_href = \"http://127.0.0.1:8000/user/active/{0}\".format(random_code)\r\n email_body = '欢迎注册 点击下面的连接进行激活:' + a_href + ''\r\n elif send_type == \"忘记密码\":\r\n email_title = '密码重置'\r\n a_href = \"http://127.0.0.1:8000/user/reset/{0}\".format(random_code)\r\n email_body = '点击下面的连接进行密码重置:' + a_href + ''\r\n statue = send_mail(email_title, '', settings.EMAIL_FROM, [email], html_message=email_body)\r\n return statue\r\n","sub_path":"eshop/utils/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181402301","text":"import os\nimport pandas as pd\n\n\nbase_path = './opinions'\nprint(*os.listdir(base_path))\n\nproduct_id = input('Product ID: ') or '85910996'\n\nopinions = pd.read_json(os.path.join(base_path, product_id + '.json'))\nopinions = opinions.set_index('opinion_id')\n\naverage_score = opinions.stars.mean().round(2)\npros = opinions.pros.count()\ncons = opinions.cons.count()\n\nprint(average_score)\nprint(pros, cons)\n","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368028688","text":"import random\nimport time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nbuttonPin = 23\nGPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# Import of classes\nfrom classes.print_manager import PrintManager\nfrom classes.random_manager import RandomManager\n\n# Managers\nprintManager = PrintManager()\nrandomManager = RandomManager();\n\npreviousButtonState = GPIO.input(buttonPin)\n\nrandomNumber = True\n\n# Event loop\nwhile randomNumber != None:\n\tfrontButtonState = GPIO.input(buttonPin)\n\t\n\tif (frontButtonState == False):\n\t\trandomNumber = randomManager.get()\n\t\t\n\t\tif (randomNumber != None):\n\t\t\tprintManager.printImage(\"advent.jpg\")\n\t\t\tprintManager.printText(\"Kaere kollega\");\n\t\t\tprintManager.printText(\"dit advents-gave-nummer er:\");\n\t\t\tprintManager.printBigText(randomNumber)\n\t\t\tprintManager.printText(\" \");\n\t\t\tprintManager.printText(\"Kaerlig hilsen\");\n\t\t\tprintManager.printManager(\"Disconnect\");\n\n","sub_path":"src/Disconnected.py","file_name":"Disconnected.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7437379","text":"import logging\nimport random\nfrom datetime import datetime\n\nimport emoji\nimport pymongo\nimport telebot\n\nfrom telebot import types\n\n# ------------------------------------ #\n# TIME FORMAT #\n# ------------------------------------ #\nTIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n# TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f-06:00' # FIX: HERE!\n\n# ------------------------------------ #\n# MESSAGES #\n# ------------------------------------ #\nMESSAGES = {}\n\n# absent/present\nmsg = ':bust_in_silhouette: '\nmsg += 'حضور غیاب جلسه' + '\\u200c' + 'ی' + ' ' + 'امروز.'\nMESSAGES['absent/present'] = msg\n\n# vote\nmsg = ':high_voltage: '\nmsg += 'لطفا به کتاب \"{book_name}\" امتیاز دهید.'\nMESSAGES['vote'] = msg\n\n# mark as read\nmsg = ':white_heavy_check_mark: کتاب \"{book_name}\" به فهرست کتاب های {status} اضافه شد.'\nMESSAGES['mark as read'] = msg\n\n# next session book\nmsg = ':closed_book: کتاب انتخاب شده برای جلسه' + '\\u200c' + 'ی' + ' ' + 'بعد: {book_name} ({book_type})'\nMESSAGES['next session book'] = msg\n\n# no book to read\nmsg = \":cross_mark: هیچ کتاب خوانده نشده ای برای هفته\" + \"\\u200c\" + \"ی\" + \" \" + \"بعد وجود ندارد.\"\nMESSAGES['no book to read'] = msg\n\n# not exist book\nmsg = \":cross_mark: چنین کتابی در فهرست کتاب ها وجود ندارد.\"\nMESSAGES['not exist book'] = msg\n\n# ------------------------------------ #\n# KEYBOARDS #\n# ------------------------------------ #\n# Keyboards\nkeyboards = {'inline': {},\n 'reply': {}\n }\nkeys = {\n 'inline': {},\n 'reply': {}\n}\n\n# ----------------------- #\n# INLINE #\n# ----------------------- #\n\n# ======================= #\n# ======== KEYs ========= #\n# ======================= #\nkeys['inline']['present'] = types.InlineKeyboardButton(\"حاضر\", callback_data=\"Present\")\nkeys['inline']['absent'] = types.InlineKeyboardButton(\"غایب\", callback_data=\"Absent\")\n\nkeys['inline']['next_'] = types.InlineKeyboardButton(\">>\", callback_data=\"Next Session\")\nkeys['inline']['back_'] = types.InlineKeyboardButton(\"<<\", callback_data=\"Back Session\")\n\nemoji_str = ':white_medium_star:'\nkeys['inline']['1s'] = types.InlineKeyboardButton(emoji.emojize(f'{emoji_str}'*1), callback_data=\"1\")\nkeys['inline']['2s'] = types.InlineKeyboardButton(emoji.emojize(f'{emoji_str}'*2), callback_data=\"2\")\nkeys['inline']['3s'] = types.InlineKeyboardButton(emoji.emojize(f'{emoji_str}'*3), callback_data=\"3\")\nkeys['inline']['4s'] = types.InlineKeyboardButton(emoji.emojize(f'{emoji_str}'*4), callback_data=\"4\")\nkeys['inline']['5s'] = types.InlineKeyboardButton(emoji.emojize(f'{emoji_str}'*5), callback_data=\"5\")\n\n# ======================= #\n# ===== KEYBOARDs ======= #\n# ======================= #\n# Absent/Presetn\nmarkup = types.InlineKeyboardMarkup()\nmarkup.add(keys['inline']['present'], keys['inline']['absent'])\nkeyboards['inline']['absent/present'] = markup\n\n# Rate\nmarkup = types.InlineKeyboardMarkup()\nmarkup.add(keys['inline']['1s'], keys['inline']['2s'], keys['inline']['3s'])\nmarkup.add(keys['inline']['4s'], keys['inline']['5s'])\nkeyboards['inline']['rating'] = markup\n\n# Next/Back\nmarkup = types.InlineKeyboardMarkup()\nmarkup.add(keys['inline']['back_'], keys['inline']['next_'])\nkeyboards['inline']['next/back'] = markup\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef format_book(book):\n return f\"{book['name']} - {book['author']}\"\n\n\n# NOTE: this is defined again to avoid defining slow duckling parser in utils\ndef get_random_doc(collection):\n selected = None\n try:\n count = collection.count({'status': False})\n selected = collection.find({'status': False})[random.randrange(count)]\n except Exception as e:\n logger.info(e)\n return selected\n\n\n# ---------------------- #\n# --------- DB --------- #\n# ---------------------- #\ndb = pymongo.MongoClient(\"mongodb://localhost:27017/\")['EDMBOOKBOT']\n\n# ---------------------- #\n# ------ Telebot ------- #\n# ---------------------- #\nTOKEN = \"887187741:AAFcQbld9U1ATVjkwZmGyzFH3rtPkq15PCM\"\ntelebot = telebot.TeleBot(TOKEN)\n\n\nENABLE = True\nIDs = {\n 'bookclub': -1001321564193,\n 'me': 73106435,\n 'temp': -388473766\n}\nmsgs = {'me': [],\n 'bookclub': [],\n 'temp': []}\n\ncurrentDT = datetime.now()\nsessionDT = None\nkeyboard = None\n\n# previous book\nsession = db.sessions.find_one({'next': True})\nif session:\n if session['date']:\n date = session['date']\n sessionDT = datetime.strptime(date, TIME_FORMAT)\n book = session['book']\n else:\n msg = ':cross_mark: ' + 'برای جلسه' + \"\\u200c\" + 'ی' + ' ' + 'فعلی، تاریخ و مکان تعیین نشده است.'\n keyboard = None\n msgs['me'].append(msg)\n msgs['temp'].append(msg)\nelse:\n msg = ':cross_mark: ' + 'جلسه' + \"\\u200c\" + 'ی' + ' ' + 'بعد تعیین نشده است.'\n msgs['me'].append(msg)\n msgs['temp'].append(msg)\n\n\nif sessionDT:\n c = {}\n c['day'] = currentDT.weekday() == sessionDT.weekday()\n c['hour'] = {}\n c['hour']['absent/present'] = currentDT.hour == (sessionDT.hour - 1)\n c['hour']['vote'] = currentDT.hour == (sessionDT.hour - 1)\n c['hour']['next session'] = currentDT.hour == (sessionDT.hour + 1)\n c['minute'] = currentDT.minute == sessionDT.minute\n\n conditions = {}\n conditions['absent/present'] = c['day'] and c['hour']['absent/present'] and c['minute']\n conditions['vote'] = c['day'] and c['hour']['vote'] and c['minute']\n conditions['next session'] = c['day'] and c['hour']['next session'] and c['minute']\n\n print(f'Current: day:{currentDT.weekday()} hour:{currentDT.hour} minute: {currentDT.minute}')\n print(f'Session: day:{sessionDT.weekday()} hour:{sessionDT.hour} minute: {sessionDT.minute}')\n\n # -------------------------- #\n # Absent/Present #\n # -------------------------- #\n # 1 hour before session\n if conditions['absent/present']:\n msg = MESSAGES['absent/present']\n keyboard = keyboards['inline']['absent/present']\n\n msgs['me'].append(msg)\n msgs['temp'].append(msg)\n if ENABLE:\n msgs['bookclub'].append(msg)\n\n # -------------------------- #\n # VOTE #\n # -------------------------- #\n # 1 hour after session starts\n if conditions['vote']:\n msg = MESSAGES['vote'].format(book_name=format_book(book))\n keyboard = keyboards['inline']['rating']\n\n msgs['me'].append(msg)\n msgs['temp'].append(msg)\n if ENABLE:\n msgs['bookclub'].append(msg)\n\n # -------------------------- #\n # NEXT SESSION #\n # -------------------------- #\n # 2 hour after session starts\n if conditions['next session']:\n\n # previous book\n pre_session = db.sessions.find_one({'next': True})\n if pre_session:\n db.sessions.update({'next': True}, {'$set': {'next': False}})\n pre_book = pre_session['book']\n if pre_book:\n db.books.update({'name': pre_book['name'], 'author': pre_book['author']}, {'$set': {'status': True}})\n msg = MESSAGES['mark as read'].format(book_name=format_book(book), status='خوانده شده')\n else:\n msg = \":cross_mark:\" + \"کتاب \" + f\"{format_book(book)} \" + \"از فهرست کتاب ها حذف شده است.\"\n\n msgs['me'].append(msg)\n msgs['temp'].append(msg)\n if ENABLE:\n msgs['bookclub'].append(msg)\n\n # next book\n keyboard = None\n book = get_random_doc(db.books)\n\n # select next book randomly\n if book:\n if book['type'] == 'novel':\n book_type = 'رمان'\n else:\n book_type = 'داستان کوتاه'\n\n session = {'date': None, 'people': [],\n 'book': {'name': book[\"name\"], 'author': book['author'], 'rating': {}},\n 'location': None, 'next': True}\n db.sessions.update({'book.name': book[\"name\"], 'book.author': book['author']}, session, upsert=True)\n\n msg = MESSAGES['next session book'].format(book_name=format_book(book), book_type=book_type)\n else:\n msg = MESSAGES['no book to read']\n\n msgs['me'].append(msg)\n msgs['temp'].append(msg)\n if ENABLE:\n msgs['bookclub'].append(msg)\n\n\n# Sending to groups and bot\nfor receiver, r_msgs in msgs.items():\n for msg in r_msgs:\n try:\n telebot.send_message(IDs[receiver], text=emoji.emojize(msg), reply_markup=keyboard, parse_mode='html')\n print(f'1 message sent! Receiver: {receiver}')\n except Exception as e:\n print(e)\n","sub_path":"edmbookbot/schedule/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":8898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230672955","text":"#Server \nimport socket\nimport sys\n\nHOST = '172.18.7.143'\nPORT = 8888 \n\ns = socket.socket()\ns.listen(10) # Aceita 10 tentativas de conexão\nsc, address = s.accept()\n\n#print address\ni=1\nf = open('transformador'+ str(i)+\".txt\",'wb') # abrindo a conexão\ni=i+1\nwhile (True):\n\n # Recebendo e escrevendo o arquivo\n l = sc.recv(1024)\n while (l):\n f.write(l)\n l = sc.recv(1024)\nf.close()\n\nsc.close()\ns.close()\n","sub_path":"Python_UDP_Server_Client/serverPython.py","file_name":"serverPython.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448772064","text":"# rewrite the high_low.py program from lesson 4c to use a random integer between\r\n# 0 and 00 and the user only gets 6 guesses use the python documentation to\r\n# find an appropriate module and cuntion to do this.\r\n# http://docs.python.org/2/library/\r\n\r\nimport random\r\nprint(\"Let's play a game! Try to guess my favorite number in under 6 tries!\")\r\nnumber = random.randint(1, 99)\r\ncount = 0\r\nguess = int(input(\"What is my favorite number? \"))\r\nwhile count < 5 and guess != number: \r\n if guess < number:\r\n print(\"It's not that small...\")\r\n if guess > number:\r\n print(\"It's not that big...\")\r\n guess = int(input(\"Sorry, try again \"))\r\n count = count + 1\r\n\r\nif guess != number:\r\n print(\"You are wrong!\") \r\n print(\"That must have been complicated...\")\r\n print(\"The number was\", number)\r\nif guess == number:\r\n print(\"Yes! My favorite number is\", number)\r\n\r\n \r\n","sub_path":"Brauer cole 9c.py","file_name":"Brauer cole 9c.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522792547","text":"from __future__ import annotations\nimport logging\nimport numpy as np\n\nfrom drawplayer import OnlyDrawPlayer\nfrom gamestate import GameState\n\n\nclass MonteCarloTreeSearchNode:\n # This action in the params is the action that the player from the parent MCTS node, i.e. the prev game state, has\n # played. This information is critical for when we're calculating the win ratio (see below).\n def __init__(self, game_state: GameState, parent: MonteCarloTreeSearchNode, action=None):\n self.game_state = game_state\n self.parent = parent\n self.action = action\n self.children = np.array([], dtype=MonteCarloTreeSearchNode)\n self.untried_actions = game_state.get_valid_moves()\n self.wins = dict(map(lambda player: (player, 0), game_state.players))\n self.wins[OnlyDrawPlayer] = 0\n self.visits = 0\n\n def select(self, c) -> MonteCarloTreeSearchNode:\n leaf_node = self\n while not leaf_node.is_terminal:\n if not leaf_node.is_fully_expanded:\n return leaf_node.expand()\n else:\n leaf_node = leaf_node.select_child_with_max_ucb(c)\n\n return leaf_node\n\n def expand(self) -> MonteCarloTreeSearchNode:\n logging.debug(f'Expanding for {self.__repr__()}')\n action = self.untried_actions.pop()\n new_game_state = self.game_state.make_move(action)\n child_node = MonteCarloTreeSearchNode(new_game_state, self, action)\n self.children = np.append(self.children, child_node)\n logging.debug(f'Created {child_node.__repr__()}')\n return child_node\n\n # The reason why we're not learning a lot is that we nudge our decision making mechanism towards states that are\n # only advantageous from a random agent's perspective, since the roll-out assumes that both agents play randomly\n # in future rounds.\n def rollout(self) -> float:\n logging.debug(f'Rollout now for {self.__repr__()}')\n rollout_state = self.game_state\n while not rollout_state.is_game_over:\n move = self.get_move_from_heuristic_rollout_strategy(rollout_state)\n rollout_state = rollout_state.make_move(move)\n logging.debug(f'The winner of this rollout: {rollout_state.winner}')\n return rollout_state.winner\n\n def backpropagate(self, who_won):\n self.visits += 1\n self.wins[who_won] += 1\n if self.parent is not None:\n self.parent.backpropagate(who_won)\n\n # This ratio tries to maximize the winning. It doesn't try to minimize losing. The significant difference can be\n # seen when the agent plays a few times.\n def select_child_with_max_ucb(self, c) -> MonteCarloTreeSearchNode:\n ucb_values = list(map(lambda child: MonteCarloTreeSearchNode.get_ucb(child, c), self.children))\n return self.children[np.argmax(ucb_values)]\n\n @staticmethod\n def get_ucb(child: MonteCarloTreeSearchNode, c):\n return child.win_ratio + c * np.sqrt(np.log(child.parent.visits) / child.visits)\n\n @staticmethod\n def get_move_from_heuristic_rollout_strategy(rollout_state: GameState) -> int:\n return rollout_state.get_heuristic_move()\n\n @staticmethod\n def get_move_from_simple_rollout_strategy(rollout_state: GameState) -> int:\n possible_moves = rollout_state.get_valid_moves()\n return possible_moves[np.random.randint(len(possible_moves))]\n\n @property\n def win_ratio(self):\n # If the node hasn't been visited, then the win_ratio (part of ucb) is inf. This means it will be selected.\n if self.visits == 0:\n return np.inf\n return self.wins[self.parent.game_state.current_player] / self.visits\n\n @property\n def is_fully_expanded(self):\n return len(self.children) == len(self.game_state.get_valid_moves())\n\n @property\n def is_terminal(self):\n return self.game_state.winner is not None\n\n def __repr__(self):\n return f'TreeNode: {id(self)}'\n\n def __str__(self):\n return f'TreeNode: {id(self)}, action: {self.action}, number of visits: {self.visits}, ' \\\n f'win ratio: {self.win_ratio}, fully expanded: {self.is_fully_expanded}, ' \\\n f'children: {self.children}'\n","sub_path":"montecarlotreesearchnode.py","file_name":"montecarlotreesearchnode.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218347583","text":"\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding='utf-8') as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name=\"StemLemPipe\", \n version=\"0.1.5\",\n author=\"Demetry Pascal\",\n author_email=\"qtckpuhdsa@gmail.com\",\n maintainer = ['Demetry Pascal'],\n description=\"simple text transformer used several stemming and lemmatization backends\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/PasaOpasen/Stem-Lem-Pipeline\",\n keywords=['text','nlp','nltk', 'ngrams','transformation', 'words', 'stemming', 'lemmatization'],\n packages = setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=['nltk','pymorphy2','pymystem3','stop_words', 'tqdm']\n \n )\n\n\n\n\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38643353","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nThis code was written for /r/formula1\r\nWritten by /u/Redbiertje\r\n31 Mar 2018\r\n\"\"\"\r\n\r\n#Imports\r\nfrom __future__ import division\r\nimport datetime\r\nimport sys\r\nimport time\r\nimport random\r\nimport numpy as np\r\nimport weekends\r\nimport webscraper as ws\r\nimport auxiliary as aux\r\nimport templates as tp\r\n\r\n\r\nclass Sidebar():\r\n\r\n def __init__(self, sub, r, settings):\r\n self.r = r\r\n self.sub = sub\r\n self.settings = settings\r\n \r\n def insertText(self, text, beginMarker=-1, endMarker=-1):\r\n \"\"\"\r\n Inserts the given text in between the two markers in the text. If one marker is not given, then it places it right before/after the existing marker\r\n \"\"\"\r\n try:\r\n #Load old sidebar text\r\n oldSidebar = self.sub.mod.settings()[\"description\"]\r\n \r\n #Check if any markers are given\r\n if beginMarker == -1 and endMarker == -1:\r\n return False\r\n \r\n #If so, check if the begin marker is missing\r\n elif beginMarker == -1:\r\n #In which case, check if the end marker is present\r\n endIndex = oldSidebar.find(endMarker)\r\n if endIndex == -1:\r\n return False\r\n \r\n #And inject the given text before the end marker\r\n newSidebar = oldSidebar[:endIndex] + text + oldSidebar[endIndex:]\r\n \r\n #Else check if the end marker is missing\r\n elif endMarker == -1:\r\n #In which case, check if the begin marker is present\r\n beginIndex = oldSidebar.find(beginMarker)+len(beginMarker)\r\n if beginIndex == -1:\r\n return False\r\n \r\n #And inject the given text after the begin marker\r\n newSidebar = oldSidebar[:beginIndex] + text + oldSidebar[beginIndex:]\r\n \r\n #If no markers are missing...\r\n else:\r\n #Check if the given markers are present\r\n beginIndex = oldSidebar.find(beginMarker)+len(beginMarker)\r\n endIndex = oldSidebar.find(endMarker)\r\n if beginIndex == -1 or endIndex == -1:\r\n return False\r\n \r\n #And inject the given text in between the two markers\r\n newSidebar = oldSidebar[:beginIndex] + text + oldSidebar[endIndex:]\r\n \r\n #Upload the new sidebar text to the subreddit\r\n wikiPage = self.sub.wiki['config/sidebar']\r\n wikiPage.edit(newSidebar)\r\n \r\n return True\r\n except Exception as e:\r\n print(\"Error in insertText: {}\".format(e))\r\n return False\r\n \r\n def updateCountdown(self, currentTime):\r\n \"\"\"\r\n Updates the countdown in the sidebar of the subreddit\r\n \"\"\"\r\n print(\"[ ] Updating the countdown in the F1 sidebar\", end=\"\\r\")\r\n \r\n #Figure out the relevant races\r\n nextRace = aux.nextDate().sessions[-1].startDate\r\n prevRace = aux.prevDate().sessions[-1].startDate\r\n \r\n #Define markers in sidebar\r\n beginMarker = \"[](/countDownBegin)\"\r\n endMarker = \"[](/countDownEnd)\"\r\n try:\r\n #Check if race is still ongoing\r\n if currentTime < (prevRace + datetime.timedelta(hours=2, minutes=30)) and prevRace < currentTime:\r\n countdown = \"In progress\"\r\n \r\n #Use insertText to update the countdown\r\n success = self.insertText(countdown, beginMarker, endMarker)\r\n else:\r\n #Figure out the text in the countdown\r\n delta = nextRace - currentTime\r\n hours = int((delta.seconds - delta.seconds%3600)/3600)\r\n minutes = int((delta.seconds%3600)/60)\r\n countdown = \"{0} day{1}, {2} hour{3} and {4} minute{5}\".format(delta.days, \"s\"*(delta.days != 1), hours, \"s\"*(hours != 1), minutes, \"s\"*(minutes != 1))\r\n \r\n #Use insertText to update the countdown\r\n success = self.insertText(countdown, beginMarker, endMarker)\r\n \r\n #Inform human of result\r\n if success:\r\n print(\"[x] Updating the countdown in the F1 sidebar\")\r\n else:\r\n print(\"Encountered a problem while updating the countdown\")\r\n except Exception as e:\r\n print(\"Error in updateCountdown: {}\".format(e))\r\n \r\n def updateSidebarInfo(self):\r\n \"\"\"\r\n Updates all the race-related information in the sidebar\r\n \"\"\"\r\n \r\n print(\"[ ] Updating the info in the F1 sidebar\", end=\"\\r\")\r\n \r\n try:\r\n #Define all the markers\r\n beginMarkerHead = \"[](/beginInfoHead)\"\r\n endMarkerHead = \"[](/endInfoHead)\"\r\n beginMarkerSched = \"[](/beginInfoSched)\"\r\n endMarkerSched = \"[](/endInfoSched)\"\r\n beginMarkerCirc = \"[](/beginInfoCirc)\"\r\n endMarkerCirc = \"[](/endInfoCirc)\"\r\n beginMarkerLast = \"[](/beginInfoLast)\"\r\n endMarkerLast = \"[](/endInfoLast)\"\r\n beginMarkerTrack = \"[](/beginTrackname)\"\r\n endMarkerTrack = \"[](/endTrackname)\"\r\n beginMarkerDriver = \"[](/beginDriverStand)\"\r\n endMarkerDriver = \"[](/endDriverStand)\"\r\n beginMarkerTeam = \"[](/beginTeamStand)\"\r\n endMarkerTeam = \"[](/endTeamStand)\"\r\n beginMarkerDriverSide = \"[](/beginDriverSide)\"\r\n endMarkerDriverSide = \"[](/endDriverSide)\"\r\n beginMarkerTeamSide = \"[](/beginTeamSide)\"\r\n endMarkerTeamSide = \"[](/endTeamSide)\"\r\n \r\n #Find the next race\r\n w = aux.nextDate()\r\n \r\n currentTime = datetime.datetime.utcnow()\r\n currentYear = currentTime.year\r\n \r\n #Format the section on the name of the next race\r\n infoHead = \"{0} Grand Prix\\n - {1}, {2}\".format(w.namean, w.city, w.country)\r\n \r\n #Format the section on the event schedule\r\n infoSched = w.getScheduleTable(use=\"sidebar\")\r\n \r\n #Format the section on the circuit\r\n if w.lastYear:\r\n infoCirc = \"{0}\\n>\\n|||\\n|-|-\\nLaps|{1}\\n|Circuit Length|{2} km ({3:.3f} mi)\\nRace Length|{4} ({5:.3f} mi)\\nFirst Held|{6}\\nLap Record|{7} ([]({8}) {9}, {10}, {11})\\nLinks|[Track Guide]({12}) - [Wikipedia]({13})\".format(w.circuit, w.laps, w.length, w.length*0.62137, w.distance, w.distance*0.62137, w.firstHeld, w.lapRecordTime, w.lapRecordFlag, aux.abbrevName(w.lapRecordHolder), w.lapRecordTeam, w.lapRecordYear, w.linkF1, w.linkWikiRace)\r\n else:\r\n infoCirc = \"{0}\\n>\\n|||\\n|-|-\\nLaps|{1}\\n|Circuit Length|{2} km ({3:.3f} mi)\\nRace Length|{4} ({5:.3f} mi)\\nFirst Held|{6}\\nLap Record|-\\nLinks|[Track Guide]({7}) - [Wikipedia]({8})\".format(w.circuit, w.laps, w.length, w.length*0.62137, w.distance, w.distance*0.62137, w.firstHeld, w.linkF1, w.linkWikiRace)\r\n \r\n #Format the section on the previous year\r\n if w.lastYear:\r\n infoLast = \"\\n[]({0}) {1}, {2}, {3}\\n> #Podium\\n[]({4}) {5}, {6}\\n> \\n[]({7}) {8}, {9}, {10}\\n> \\n[]({11}) {12}, {13}, {14}\\n> \\n#Fastest Lap\\n[]({15}) {16}, {17}, {18}\".format(w.prevYearPoleFlag, w.prevYearPoleHolder, w.prevYearPoleTeam, w.prevYearPoleTime, w.prevYearWinnerFlag, aux.abbrevName(w.prevYearWinner), w.prevYearWinnerTeam, w.prevYearSecondFlag, aux.abbrevName(w.prevYearSecond), w.prevYearSecondTeam, w.prevYearSecondDelta, w.prevYearThirdFlag, aux.abbrevName(w.prevYearThird), w.prevYearThirdTeam, w.prevYearThirdDelta, w.prevYearFastestFlag, w.prevYearFastestHolder, w.prevYearFastestTeam, w.prevYearFastestTime)\r\n else:\r\n infoLast = \"\\n>-[](\\dud)\\n> #Podium\\n>-[](\\dud)\\n> \\n>-[](\\dud)\\n> \\n>-[](\\dud)\\n> \\n#Fastest Lap\\n>-\"\r\n \r\n #Update all the information\r\n self.insertText(infoHead, beginMarkerHead, endMarkerHead)\r\n self.insertText(infoSched, beginMarkerSched, endMarkerSched)\r\n self.insertText(infoCirc, beginMarkerCirc, endMarkerCirc)\r\n self.insertText(infoLast, beginMarkerLast, endMarkerLast)\r\n \r\n #Try to upload the first sidebar image\r\n try:\r\n self.sub.stylesheet.upload(\"race-pic\", \"img/{}-1.png\".format(w.id_name.lower().replace(\" \", \"\")))\r\n except Exception as e:\r\n print(\"Failed to upload race pic 1: {}\".format(e))\r\n \r\n #Try to upload the second sidebar image\r\n try:\r\n self.sub.stylesheet.upload(\"race-pic-2\", \"img/{}-2.png\".format(w.id_name.lower().replace(\" \", \"\")))\r\n except Exception as e:\r\n print(\"Failed to upload race pic 2: {}\".format(e))\r\n \r\n #Try to upload the third sidebar image\r\n try:\r\n self.sub.stylesheet.upload(\"race-pic-3\", \"img/{}-3.png\".format(w.id_name.lower().replace(\" \", \"\")))\r\n except Exception as e:\r\n print(\"Failed to upload race pic 3: {}\".format(e))\r\n \r\n #Try to upload the circuit map\r\n try:\r\n self.sub.stylesheet.upload(\"circuit-map\", \"img/{}-circuit.png\".format(w.id_name.lower().replace(\" \", \"\")))\r\n \r\n #If that works, also update the text\r\n self.insertText(w.circuit, beginMarkerTrack, endMarkerTrack)\r\n except Exception as e:\r\n print(\"Failed to upload circuit map: {}\".format(e))\r\n \r\n #Reupload stylesheet to make sure the images work\r\n try:\r\n self.sub.stylesheet.update(self.sub.stylesheet().stylesheet, reason=\"Updating race pics and circuit map\")\r\n except Exception as e:\r\n print(\"Failed to update stylesheet: {}\".format(e))\r\n \r\n #Attempt to update championship standings\r\n try:\r\n ##Retrieve the dropdown menu standings\r\n #driverStand = ws.driverStandings(type=0)\r\n #teamStand = ws.teamStandings(type=0)\r\n \r\n #Retrieve the main sidebar standings (or the other way around, I'm not sure)\r\n driverStandSide = ws.driverStandings(type=1)\r\n teamStandSide = ws.teamStandings(type=1)\r\n \r\n #Retrieve the new reddit sidebar standings\r\n driverStandNew = ws.driverStandings(type=2)\r\n teamStandNew = ws.teamStandings(type=2)\r\n \r\n #If successfully retrieved...\r\n #if driverStand and teamStand:\r\n #self.insertText(driverStand, beginMarkerDriver, endMarkerDriver)\r\n #self.insertText(teamStand, beginMarkerTeam, endMarkerTeam)\r\n if driverStandSide and teamStandSide:\r\n self.insertText(driverStandSide, beginMarkerDriverSide, endMarkerDriverSide)\r\n self.insertText(teamStandSide, beginMarkerTeamSide, endMarkerTeamSide)\r\n if driverStandNew and teamStandNew:\r\n for widget in self.sub.widgets.sidebar:\r\n if widget.shortName == \"{} Driver Standings\".format(currentYear):\r\n widget.mod.update(text=driverStandNew)\r\n if widget.shortName == \"{} Constructor Standings\".format(currentYear):\r\n widget.mod.update(text=teamStandNew)\r\n except Exception as e:\r\n print(\"Failed to update driver and team standings: {}\".format(e))\r\n \r\n print(\"[x] Updating the info in the F1 sidebar ({} Grand Prix)\".format(w.namean))\r\n return \"{} Grand Prix\".format(w.namean)\r\n except Exception as e:\r\n print(\"Error in updateSidebarInfo: {}\".format(e))\r\n return False\r\n \r\n def updateWeatherPrediction(self, owm, forecast):\r\n \"\"\"\r\n Updates the weather prediction in the sidebar of the subreddit\r\n \"\"\"\r\n print(\"[ ] Updating the weather prediction\", end=\"\\r\")\r\n \r\n #Get current time\r\n currentTime = datetime.datetime.utcnow()\r\n \r\n #Define markers\r\n beginMarker = \"[](/weatherBegin)\"\r\n endMarker = \"[](/weatherEnd)\"\r\n \r\n #Find next weekend\r\n nextWeekend = aux.nextDate()\r\n \r\n try:\r\n #Get forecast object\r\n fc = aux.getForecast(owm)\r\n \r\n #If not successful, keep old one\r\n if fc == False:\r\n fc = forecast\r\n \r\n #If sufficiently close to the race\r\n if nextWeekend.sessions[-1].startDate - datetime.timedelta(days=4, hours=12, minutes=10) < currentTime:\r\n table = nextWeekend.getWeatherTable(fc, owm, use=\"sidebar\")\r\n \r\n #Upload the prediction to sidebar\r\n self.insertText(table, beginMarker, endMarker)\r\n else:\r\n #Upload the placeholder to sidebar\r\n self.insertText(\"Weather Prediction\\n> The weather prediction is not yet available.\", beginMarker, endMarker)\r\n print(\"[x] Updating the weather prediction ({})\".format(nextWeekend.city))\r\n return fc\r\n except Exception as e:\r\n print(\"Error in updateWeatherPrediction: {}\".format(e))\r\n return False\r\n \r\n def updateTopBar(self, post, weekend, abbrev):\r\n \"\"\"\r\n Updates the top bar of the subreddit\r\n \"\"\"\r\n \r\n #Define markers\r\n beginMarker = \"[](/topBegin)\"\r\n endMarker = \"[](/topEnd)\"\r\n \r\n try:\r\n #Don't change anything in testing mode\r\n if self.settings[\"testingMode\"] == True:\r\n return False\r\n \r\n #If Hub, then clear the top bar before adding Hub\r\n elif abbrev == \"Hub\":\r\n insertString = \"[]({0})\\n- [{1}]({2})\".format(weekend.flag, abbrev, post.shortlink)\r\n self.insertText(insertString, beginMarker, endMarker)\r\n \r\n #Else just add to it\r\n else:\r\n insertString = \" [{0}]({1})\".format(abbrev, post.shortlink)\r\n self.insertText(insertString, -1, endMarker)\r\n except Exception as e:\r\n print(\"Error in updateTopBar: {}\".format(e))\r\n \r\n def updateHeaderQuote(self):\r\n \"\"\"\r\n Updates the quote in the header of the /r/formula1 subreddit\r\n \"\"\"\r\n #Define markers\r\n beginMarker = \"[](/beginHeaderQuote)\"\r\n endMarker = \"[](/endHeaderQuote)\"\r\n \r\n #Define relevant lists\r\n drivers_list = [\"Hamilton\", \"Vettel\", \"Bottas\", \"Räikkönen\", \"Verstappen\", \"Ricciardo\", \"Pérez\", \"Sainz\", \"Gasly\", \"Leclerc\", \"Stroll\", \"Russell\", \"Giovinazzi\", \"Latifi\", \"Norris\", \"Ocon\", \"Alonso\", \"Tsunoda\", \"Schumacher\"]\r\n old_drivers_list = [\"Jim Clark\", \"Juan Manuel Fangio\", \"Jackie Stewart\", \"Alberto Ascari\", \"Guiseppe Farina\", \"Stirling Moss\", \"John Surtees\", \"Emerson Fittipaldi\", \"Nelson Piquet\", \"Ayrton Senna\", \"Alain Prost\", \"Niki Lauda\", \"Graham Hill\", \"Mika Häkkinen\", \"Michael Schumacher\", \"Nigel Mansell\", \"Jochen Rindt\", \"Jack Brabham\", \"Ronnie Peterson\", \"Gilles Villeneuve\", \"Bruce Mclaren\", \"Mario Andretti\"]\r\n shitty_old_drivers_list = [\"Tarso Marques\", \"Chanoch Nissany\", \"Yuji Ide\", \"Taki Inoue\", \"Andrea de Cesaris\", \"Marco Apicella\", \"Alex Yoong\", \"Rikky Von Opel\", \"Satoru Nakajima\", \"Andrea Montermini\", \"Ricardo Rosset\", \"Philippe Alliot\", \"Philippe Streiff\", \"Manfred Winkelhock\", \"Johathan Palmer\", \"Eliseo Salazar\", \"Ivan Capelli\", \"Johnny Dumfries\", \"Stefano Modena\", \"Gabriele Tarquini\", \"Pierre-Henry Raphanel\", \"Maurício Gugelmin\", \"Bruno Giacomelli\", \"Olivier Beretta\", \"Jos Verstappen\", \"Andrea Montermini\", \"Aguri Suzuki\", \"Gastón Mazzacane\"]\r\n teams_list = [\"Mercedes\", \"Ferrari\", \"Red Bull\", \"Alpine\", \"Haas\", \"McLaren\", \"Aston Martin\", \"AlphaTauri\", \"Alfa Romeo\", \"Williams\"]\r\n mods_list = [\"Mulsanne\", \"empw\", \"Redbiertje\", \"jeppe96\", \"elusive_username\", \"Effulgency\", \"Blanchimont\", \"AshKals\", \"AnilP228\", \"anneomoly\", \"overspeeed\", \"DAGilligan\", \"minardif1\", \"WP2OKB\"]\r\n \r\n try:\r\n print(\"[ ] Updating header quote: ...\", end=\"\\r\")\r\n \r\n #Retrieve templates from subreddit wiki\r\n wikiContent = self.sub.wiki['headertemplates'].content_md\r\n templates = [line[1:].lstrip() for line in wikiContent.split(\"---\")[1].lstrip().rstrip().split(\"\\r\\n\")]\r\n \r\n #Select random template\r\n selectedTemplate = random.choice(templates)+\" \"\r\n \r\n #Get substitutes\r\n rand_driver = random.choice(drivers_list)\r\n rand_old = random.choice(old_drivers_list)\r\n rand_shitty = random.choice(shitty_old_drivers_list)\r\n rand_team = random.choice(teams_list)\r\n rand_mod = random.choice(mods_list)\r\n rand_country = random.choice(weekends.allWeekends).country\r\n rand_city = random.choice(weekends.allWeekends).city\r\n next_country = aux.nextDate().country\r\n next_city = aux.nextDate().city\r\n prev_country = aux.prevDate().country\r\n prev_city = aux.prevDate().city\r\n \r\n #Get template and substitute placeholders\r\n headerQuote = selectedTemplate.replace(\"\", rand_mod).replace(\"\", rand_driver).replace(\"\", rand_old).replace(\"\", rand_shitty).replace(\"\", rand_team).replace(\"\", rand_country).replace(\"\", rand_city).replace(\"\", next_country).replace(\"\", next_city).replace(\"\", prev_country).replace(\"\", prev_city)\r\n \r\n #Report the selected quote\r\n print(\"[x] Updating header quote: {}\".format(headerQuote))\r\n \r\n #Upload new quote to sidebar\r\n self.insertText(headerQuote, beginMarker, endMarker)\r\n return True\r\n except Exception as e:\r\n print(\"Error in updateHeaderQuote: {}\".format(e))\r\n return False\r\n \r\n def updateNewScheduleWidget(self):\r\n \"\"\"\r\n Updates New Reddit schedule tab\r\n \"\"\"\r\n \r\n try:\r\n print(\"[ ] Updating the new Reddit sidebar schedule\", end=\"\\r\")\r\n \r\n currentTime = datetime.datetime.utcnow()\r\n currentYear = currentTime.year\r\n for idx, weekend in enumerate(weekends.allWeekends):\r\n if currentTime < weekend.sessions[-1].startDate+datetime.timedelta(hours=2):\r\n break\r\n \r\n if idx == 0:\r\n selected_weekends = weekends.allWeekends[:3]\r\n elif idx == len(weekends.allWeekends)-1:\r\n selected_weekends = weekends.allWeekends[-3:]\r\n else:\r\n selected_weekends = weekends.allWeekends[idx-1:idx+2]\r\n \r\n text = \"|**#**|**Grand Prix**|**Date**\\n|:-:|:--|:-:|\"\r\n \r\n #Set the weekend flag to False by default. If true: append a detailed schedule\r\n inWeekend = False\r\n for weekend in selected_weekends:\r\n \r\n raceWeekendBeginTime = weekend.sessions[0].startDate - datetime.timedelta(hours=weekend.sessions[0].startDate.hour, minutes=weekend.sessions[0].startDate.minute) - datetime.timedelta(days=1) - datetime.timedelta(hours=weekend.timezone)\r\n raceWeekendEndTime = weekend.sessions[-1].startDate + datetime.timedelta(hours=(24 - weekend.sessions[-1].startDate.hour)) + datetime.timedelta(days=1) - datetime.timedelta(hours=weekend.timezone)\r\n \r\n if weekend.sessions[-1].startDate + datetime.timedelta(hours=2) < currentTime:\r\n date = \"\\U0001F3C1\"\r\n else:\r\n if weekend.sessions[0].startDate.month == weekend.sessions[-1].startDate.month:\r\n date = \"{} - {} {}\".format(weekend.sessions[0].startDate.day, weekend.sessions[-1].startDate.day, aux.monthToWord(weekend.sessions[-1].startDate.month))\r\n else:\r\n date = \"{} {} - {} {}\".format(weekend.sessions[0].startDate.day, aux.monthToWord(weekend.sessions[0].startDate.month), weekend.sessions[-1].startDate.day, aux.monthToWord(weekend.sessions[-1].startDate.month))\r\n \r\n if raceWeekendBeginTime < currentTime and currentTime < raceWeekendEndTime:\r\n inWeekend=True\r\n text += \"\\n|{}|{} **[{} Grand Prix]({})**|{}|\".format(weekend.round, weekend.emojiFlag, weekend.namean, weekend.linkF1, date)\r\n else:\r\n text += \"\\n|{}|{} [{} Grand Prix]({})|{}|\".format(weekend.round, weekend.emojiFlag, weekend.namean, weekend.linkF1, date)\r\n \r\n if inWeekend:\r\n botActivity = self.r.user.me().new(limit=25)\r\n oldPosts = [post for post in botActivity if hasattr(post, 'title')]\r\n \r\n for weekend in selected_weekends:\r\n \r\n raceWeekendBeginTime = weekend.sessions[0].startDate - datetime.timedelta(hours=weekend.sessions[0].startDate.hour, minutes=weekend.sessions[0].startDate.minute) - datetime.timedelta(days=1) - datetime.timedelta(hours=weekend.timezone)\r\n raceWeekendEndTime = weekend.sessions[-1].startDate + datetime.timedelta(hours=(24 - weekend.sessions[-1].startDate.hour)) + datetime.timedelta(days=1) - datetime.timedelta(hours=weekend.timezone)\r\n \r\n if raceWeekendBeginTime < currentTime and currentTime < raceWeekendEndTime:\r\n text += \"\\n\\n|{} **{} Grand Prix**|**Day**|**Time (UTC)**|\\n|:-:|:--|:-:|\".format(weekend.emojiFlag, weekend.namean)\r\n \r\n for session in weekend.sessions:\r\n if session.startDate - datetime.timedelta(minutes=session.post_advance) < currentTime:\r\n #Iterate over old posts to find thread\r\n for oldPost in oldPosts:\r\n #Check if post has the correct title\r\n if oldPost.title == \"{0} {1} Grand Prix - {2} Discussion\".format(currentYear, weekend.namean, session.name) and oldPost.subreddit == \"formula1\":\r\n if session.startDate + datetime.timedelta(minutes=session.duration) < currentTime:\r\n text += \"\\n|\\U0001F3C1 \"\r\n else:\r\n text += \"\\n| \"\r\n if session.startDate < currentTime and currentTime < session.startDate + datetime.timedelta(minutes=session.duration):\r\n text += \"**[{0}]({1})**|{2}|{3:02d}:{4:02d}|\".format(session.name, oldPost.shortlink, aux.weekdayToWord(session.startDate.weekday()), session.startDate.hour, session.startDate.minute)\r\n else:\r\n text += \"[{0}]({1})|{2}|{3:02d}:{4:02d}|\".format(session.name, oldPost.shortlink, aux.weekdayToWord(session.startDate.weekday()), session.startDate.hour, session.startDate.minute)\r\n else:\r\n text += \"\\n|{0}|{1}|{2:02d}:{3:02d}|\".format(session.name, aux.weekdayToWord(session.startDate.weekday()), session.startDate.hour, session.startDate.minute)\r\n for widget in self.sub.widgets.sidebar:\r\n if widget.shortName == \"{} Race Schedule\".format(currentYear):\r\n widget.mod.update(text=text)\r\n print(\"[x] Updating the new Reddit sidebar schedule\")\r\n except Exception as e:\r\n print(\"Error in updateNewScheduleWidget: {}\".format(e))","sub_path":"sidebar.py","file_name":"sidebar.py","file_ext":"py","file_size_in_byte":24482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"13798992","text":"'''\nWe have to maintain a hashmap with prefix sum which stores the number of vals with same prefix sum.\n\nTime: O(N)\nSpace: O(N)\n'''\n\nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n d=defaultdict(int)\n curr_sum=0\n count=0\n for i in range(len(nums)):\n curr_sum+=nums[i]\n if(curr_sum==k):\n count+=1\n if(curr_sum-k in d):\n count+=d[curr_sum-k]\n d[curr_sum]+=1\n \n return count","sub_path":"subarray_sum_equals_k.py","file_name":"subarray_sum_equals_k.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70585751","text":"\nfrom Banco import Banco\n\n\nclass Usuarios(object):\n\n def __init__(self, codigo_livro=0, nome_do_livro=\"\", nome_do_autor=\"\", ano=\"\", edicao=\"\", data_de_indexacao=\"\"):\n self.codigo_livro=codigo_livro\n self.nome_do_livro=nome_do_livro\n self.nome_do_autor=nome_do_autor\n self.ano=ano\n self.edicao=edicao\n self.data_de_indexacao=data_de_indexacao\n\n def insertUser(self):\n\n banco = Banco()\n try:\n\n c = banco.conexao.cursor()\n c.execute(\"insert into livros (codigo_livro, nome_do_livro, nome_do_autor, ano, edicao,data_de_indexacao) values ('\" + self.codigo_livro + \"', '\" + self.nome_do_livro + \"', '\" + self.nome_do_autor + \"', '\" + self.ano + \"', '\" + self.edicao + \"','\" + self.data_de_indexacao + \"' )\")\n\n banco.conexao.commit()\n c.close()\n\n return \"Livro cadastrado com sucesso!\"\n except:\n return \"Ocorreu um erro na inserção do Livro\"\n\n\n def deleteUser(self):\n\n banco = Banco()\n try:\n\n c = banco.conexao.cursor()\n\n c.execute(\"delete from livros where codigo_do_livro = \" + self.codigo_livro + \" \")\n\n banco.conexao.commit()\n c.close()\n\n return \"Usuário excluído com sucesso!\"\n except:\n return \"Ocorreu um erro na exclusão do usuário\"\n\n\n def selectUser(self, codigo_livro):\n banco = Banco()\n try:\n\n c = banco.conexao.cursor()\n\n c.execute(\"select * from livros where cod_livro = \" + codigo_livro + \" \")\n\n for linha in c:\n self.codigo_livro = linha[0]\n self.nome_do_livro = linha[1]\n self.nome_do_autor = linha[2]\n self.ano = linha[3]\n self.edicao = linha[4]\n self.edicao = linha[5]\n self.data_de_indexacao= linha[6]\n\n c.close()\n\n return \"Busca feita com sucesso!\"\n except:\n return \"Ocorreu um erro na busca do usuário\"","sub_path":"vai_que_funciona/Usuarios.py","file_name":"Usuarios.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"283066819","text":"import shutil\nimport sys\nimport time\n\nimport h5py\nimport numpy as np\nfrom scipy.linalg import expm\n\nnp.seterr(over=\"ignore\")\n\n\n# http://xoroshiro.di.unimi.it/splitmix64.c\ndef rand_seed(x):\n x = np.uint64(x)\n rng = np.zeros(17, dtype=np.uint64)\n for i in range(16):\n x += np.uint64(0x9E3779B97F4A7C15)\n z = (x ^ (x >> np.uint64(30))) * np.uint64(0xBF58476D1CE4E5B9)\n z = (z ^ (z >> np.uint64(27))) * np.uint64(0x94D049BB133111EB)\n rng[i] = z ^ (z >> np.uint64(31))\n return rng\n\n\n# http://xoroshiro.di.unimi.it/xorshift1024star.c\ndef rand_uint(rng):\n s0 = rng[rng[16]]\n p = (int(rng[16]) + 1) & 15\n rng[16] = p\n s1 = rng[p]\n s1 ^= s1 << np.uint64(31)\n rng[p] = s1 ^ s0 ^ (s1 >> np.uint64(11)) ^ (s0 >> np.uint64(30))\n return rng[p] * np.uint64(1181783497276652981)\n\n\ndef rand_jump(rng):\n JMP = np.array((0x84242f96eca9c41d,\n 0xa3c65b8776f96855, 0x5b34a39f070b5837, 0x4489affce4f31a1e,\n 0x2ffeeb0a48316f40, 0xdc2d9891fe68c022, 0x3659132bb12fea70,\n 0xaac17d8efa43cab8, 0xc4cb815590989b13, 0x5ee975283d71c93b,\n 0x691548c86c1bd540, 0x7910c41d10a1e6a5, 0x0b5fc64563b3e2a8,\n 0x047f7684e9fc949d, 0xb99181f2d8f685ca, 0x284600e3f30e38c3\n ), dtype=np.uint64)\n\n t = np.zeros(16, dtype=np.uint64)\n for i in range(16):\n for b in range(64):\n if JMP[i] & (np.uint64(1) << np.uint64(b)):\n for j in range(16):\n t[j] ^= rng[(np.uint64(j) + rng[16]) & np.uint64(15)]\n rand_uint(rng)\n\n for j in range(16):\n rng[(np.uint64(j) + rng[16]) & np.uint64(15)] = t[j]\n\n\ndef create_1(filename=None, overwrite=False, seed=None,\n Nx=16, Ny=4, mu=0.0, tp=0.0, U=6.0, dt=0.115, L=40,\n n_delay=16, n_matmul=8, n_sweep_warm=200, n_sweep_meas=2000,\n period_eqlt=8, period_uneqlt=0,\n meas_bond_corr=1, meas_2bond_corr=0, meas_energy_corr=0, meas_nematic_corr=0):\n assert L % n_matmul == 0 and L % period_eqlt == 0\n N = Nx * Ny\n\n if seed is None:\n seed = int(time.time())\n init_rng = rand_seed(seed)\n init_hs = np.zeros((L, N), dtype=np.int32)\n\n for l in range(L):\n for i in range(N):\n init_hs[l, i] = rand_uint(init_rng) >> np.uint64(63)\n\n # 1 site mapping\n map_i = np.zeros(N, dtype=np.int32)\n degen_i = np.array((N,), dtype=np.int32)\n num_i = map_i.max() + 1\n assert num_i == degen_i.size\n\n # 2 site mapping\n map_ij = np.zeros((N, N), dtype=np.int32)\n degen_ij = np.zeros(N, dtype=np.int32)\n for jy in range(Ny):\n for jx in range(Nx):\n for iy in range(Ny):\n for ix in range(Nx):\n ky = (iy - jy) % Ny\n kx = (ix - jx) % Nx\n map_ij[jx + Nx*jy, ix + Nx*iy] = kx + Nx*ky\n degen_ij[kx + Nx*ky] += 1\n num_ij = map_ij.max() + 1\n assert num_ij == degen_ij.size\n\n # bond definitions\n bps = 4 if tp != 0.0 else 2 # bonds per site\n num_b = bps*N # total bonds in cluster\n bonds = np.zeros((2, num_b), dtype=np.int32)\n for iy in range(Ny):\n for ix in range(Nx):\n i = ix + Nx*iy\n iy1 = (iy + 1) % Ny\n ix1 = (ix + 1) % Nx\n bonds[0, i] = i # i0 = i\n bonds[1, i] = ix1 + Nx*iy # i1 = i + x\n bonds[0, i + N] = i # i0 = i\n bonds[1, i + N] = ix + Nx*iy1 # i1 = i + y\n if bps == 4:\n bonds[0, i + 2*N] = i # i0 = i\n bonds[1, i + 2*N] = ix1 + Nx*iy1 # i1 = i + x + y\n bonds[0, i + 3*N] = ix1 + Nx*iy # i0 = i + x\n bonds[1, i + 3*N] = ix + Nx*iy1 # i1 = i + y\n\n # 1 bond 1 site mapping\n num_bs = bps*N\n map_bs = np.zeros((N, num_b), dtype=np.int32)\n degen_bs = np.zeros(num_bs, dtype=np.int32)\n for jy in range(Ny):\n for jx in range(Nx):\n for iy in range(Ny):\n for ix in range(Nx):\n ky = (iy - jy) % Ny\n kx = (ix - jx) % Nx\n i = ix + Nx*iy\n j = jx + Nx*jy\n k = kx + Nx*ky\n for ii in range(bps):\n kk = k + N*ii\n map_bs[j, i + N*ii] = kk\n degen_bs[kk] += 1\n\n # 2 bond mapping\n num_bb = bps*bps*N\n map_bb = np.zeros((num_b, num_b), dtype=np.int32)\n degen_bb = np.zeros(num_bb, dtype = np.int32)\n for jy in range(Ny):\n for jx in range(Nx):\n for iy in range(Ny):\n for ix in range(Nx):\n ky = (iy - jy) % Ny\n kx = (ix - jx) % Nx\n i = ix + Nx*iy\n j = jx + Nx*jy\n k = kx + Nx*ky\n for jj in range(bps):\n for ii in range(bps):\n kk = k + N*(ii + bps*jj)\n map_bb[j + N*jj, i + N*ii] = kk\n degen_bb[kk] += 1\n\n # 2-bond definitions\n b2ps = 12 if tp != 0.0 else 6 # 2-bonds per site\n num_b2 = b2ps*N # total 2-bonds in cluster\n bond2s = np.zeros((2, num_b2), dtype=np.int32)\n for iy in range(Ny):\n for ix in range(Nx):\n i = ix + Nx*iy\n iy1 = (iy + 1) % Ny\n ix1 = (ix + 1) % Nx\n iy2 = (iy + 2) % Ny\n ix2 = (ix + 2) % Nx\n bond2s[0, i] = i # i0 = i\n bond2s[1, i] = ix1 + Nx*iy # i1 = i + x\n bond2s[0, i + N] = i # i0 = i\n bond2s[1, i + N] = ix + Nx*iy1 # i1 = i + y\n bond2s[0, i + 2*N] = i # i0 = i\n bond2s[1, i + 2*N] = ix1 + Nx*iy1 # i1 = i + x + y\n bond2s[0, i + 3*N] = ix1 + Nx*iy # i0 = i + x\n bond2s[1, i + 3*N] = ix + Nx*iy1 # i1 = i + y\n bond2s[0, i + 4*N] = i # i0 = i\n bond2s[1, i + 4*N] = ix2 + Nx*iy # i1 = i + 2x\n bond2s[0, i + 5*N] = i # i0 = i\n bond2s[1, i + 5*N] = ix + Nx*iy2 # i1 = i + 2y\n if b2ps == 12:\n bond2s[0, i + 6*N] = i # i0 = i\n bond2s[1, i + 6*N] = ix2 + Nx*iy1 # i1 = i + 2x + y\n bond2s[0, i + 7*N] = i # i0 = i \n bond2s[1, i + 7*N] = ix1 + Nx*iy2 # i1 = i + x + 2y\n bond2s[0, i + 8*N] = i # i0 = i\n bond2s[1, i + 8*N] = ix2 + Nx*iy2 # i1 = i + 2x + 2y\n bond2s[0, i + 9*N] = ix2 + Nx*iy # i0 = i + 2x\n bond2s[1, i + 9*N] = ix + Nx*iy1 # i1 = i + y\n bond2s[0, i + 10*N] = ix1 + Nx*iy # i0 = i + x\n bond2s[1, i + 10*N] = ix + Nx*iy2 # i1 = i + 2y\n bond2s[0, i + 11*N] = ix2 + Nx*iy # i0 = i + 2x\n bond2s[1, i + 11*N] = ix + Nx*iy2 # i1 = i + 2y\n # 2 2-bond mapping\n num_b2b2 = b2ps*b2ps*N\n map_b2b2 = np.zeros((num_b2, num_b2), dtype=np.int32)\n degen_b2b2 = np.zeros(num_b2b2, dtype = np.int32)\n for jy in range(Ny):\n for jx in range(Nx):\n for iy in range(Ny):\n for ix in range(Nx):\n ky = (iy - jy) % Ny\n kx = (ix - jx) % Nx\n i = ix + Nx*iy\n j = jx + Nx*jy\n k = kx + Nx*ky\n for jj in range(b2ps):\n for ii in range(b2ps):\n kk = k + N*(ii + b2ps*jj)\n map_b2b2[j + N*jj, i + N*ii] = kk\n degen_b2b2[kk] += 1\n\n K = np.zeros((N, N), dtype=np.float64)\n for iy in range(Ny):\n for ix in range(Nx):\n iy1 = (iy + 1) % Ny\n ix1 = (ix + 1) % Nx\n K[ix + Nx*iy1, ix + Nx*iy] -= 1.0\n K[ix + Nx*iy, ix + Nx*iy1] -= 1.0\n K[ix1 + Nx*iy, ix + Nx*iy] -= 1.0\n K[ix + Nx*iy, ix1 + Nx*iy] -= 1.0\n\n K[ix1 + Nx*iy1, ix + Nx*iy] -= tp\n K[ix + Nx*iy, ix1 + Nx*iy1] -= tp\n K[ix1 + Nx*iy, ix + Nx*iy1] -= tp\n K[ix + Nx*iy1, ix1 + Nx*iy] -= tp\n\n K[ix + Nx*iy, ix + Nx*iy] -= mu\n exp_K = expm(-dt * K)\n inv_exp_K = expm(dt * K)\n exp_halfK = expm(-dt/2 * K)\n inv_exp_halfK = expm(dt/2 * K)\n# exp_K = np.array(mpm.expm(mpm.matrix(-dt * K)).tolist(), dtype=np.float64)\n\n U_i = np.array((U,), dtype=np.float64)\n assert U_i.shape[0] == num_i\n\n exp_lmbd = np.exp(0.5*U_i*dt) + np.sqrt(np.expm1(U_i*dt))\n# exp_lmbd = np.exp(np.arccosh(np.exp(0.5*U_i*dt)))\n# exp_lmbd = float(mpm.exp(mpm.acosh(mpm.exp(0.5*float(U*dt)))))\n exp_lambda = np.array((1.0/exp_lmbd[map_i], exp_lmbd[map_i]))\n delll = np.array((exp_lmbd[map_i]**2 - 1, exp_lmbd[map_i]**-2 - 1))\n\n if filename is None:\n filename = \"{}.h5\".format(seed)\n with h5py.File(filename, \"w\" if overwrite else \"x\") as f:\n # parameters not used by dqmc code, but useful for analysis\n f.create_group(\"metadata\")\n f[\"metadata\"][\"version\"] = 0.0\n f[\"metadata\"][\"model\"] = \"Hubbard\"\n f[\"metadata\"][\"Nx\"] = Nx\n f[\"metadata\"][\"Ny\"] = Ny\n f[\"metadata\"][\"bps\"] = bps\n f[\"metadata\"][\"b2ps\"] = b2ps\n f[\"metadata\"][\"U\"] = U\n f[\"metadata\"][\"t'\"] = tp\n f[\"metadata\"][\"mu\"] = mu\n f[\"metadata\"][\"beta\"] = L*dt\n\n # parameters used by dqmc code\n f.create_group(\"params\")\n # model parameters\n f[\"params\"][\"N\"] = np.array(N, dtype=np.int32)\n f[\"params\"][\"L\"] = np.array(L, dtype=np.int32)\n f[\"params\"][\"map_i\"] = map_i\n f[\"params\"][\"map_ij\"] = map_ij\n f[\"params\"][\"bonds\"] = bonds\n f[\"params\"][\"bond2s\"] = bond2s\n f[\"params\"][\"map_bs\"] = map_bs\n f[\"params\"][\"map_bb\"] = map_bb\n f[\"params\"][\"map_b2b2\"] = map_b2b2\n f[\"params\"][\"K\"] = K\n f[\"params\"][\"U\"] = U_i\n f[\"params\"][\"dt\"] = np.array(dt, dtype=np.float64)\n\n # simulation parameters\n f[\"params\"][\"n_matmul\"] = np.array(n_matmul, dtype=np.int32)\n f[\"params\"][\"n_delay\"] = np.array(n_delay, dtype=np.int32)\n f[\"params\"][\"n_sweep_warm\"] = np.array(n_sweep_warm, dtype=np.int32)\n f[\"params\"][\"n_sweep_meas\"] = np.array(n_sweep_meas, dtype=np.int32)\n f[\"params\"][\"period_eqlt\"] = np.array(period_eqlt, dtype=np.int32)\n f[\"params\"][\"period_uneqlt\"] = np.array(period_uneqlt, dtype=np.int32)\n f[\"params\"][\"meas_bond_corr\"] = meas_bond_corr\n f[\"params\"][\"meas_2bond_corr\"] = meas_2bond_corr\n f[\"params\"][\"meas_energy_corr\"] = meas_energy_corr\n f[\"params\"][\"meas_nematic_corr\"] = meas_nematic_corr\n f[\"params\"][\"init_rng\"] = init_rng # save if need to replicate data\n\n # precalculated stuff\n f[\"params\"][\"num_i\"] = num_i\n f[\"params\"][\"num_ij\"] = num_ij\n f[\"params\"][\"num_b\"] = num_b\n f[\"params\"][\"num_b2\"] = num_b2\n f[\"params\"][\"num_bs\"] = num_bs\n f[\"params\"][\"num_bb\"] = num_bb\n f[\"params\"][\"num_b2b2\"] = num_b2b2\n f[\"params\"][\"degen_i\"] = degen_i\n f[\"params\"][\"degen_ij\"] = degen_ij\n f[\"params\"][\"degen_bs\"] = degen_bs\n f[\"params\"][\"degen_bb\"] = degen_bb\n f[\"params\"][\"degen_b2b2\"] = degen_b2b2\n f[\"params\"][\"exp_K\"] = exp_K\n f[\"params\"][\"inv_exp_K\"] = inv_exp_K\n f[\"params\"][\"exp_halfK\"] = exp_halfK\n f[\"params\"][\"inv_exp_halfK\"] = inv_exp_halfK\n f[\"params\"][\"exp_lambda\"] = exp_lambda\n f[\"params\"][\"del\"] = delll\n f[\"params\"][\"F\"] = np.array(L//n_matmul, dtype=np.int32)\n f[\"params\"][\"n_sweep\"] = np.array(n_sweep_warm + n_sweep_meas,\n dtype=np.int32)\n\n # simulation state\n f.create_group(\"state\")\n f[\"state\"][\"sweep\"] = np.array(0, dtype=np.int32)\n f[\"state\"][\"rng\"] = init_rng\n f[\"state\"][\"hs\"] = init_hs\n\n # measurements\n f.create_group(\"meas_eqlt\")\n f[\"meas_eqlt\"][\"n_sample\"] = np.array(0, dtype=np.int32)\n f[\"meas_eqlt\"][\"sign\"] = np.array(0.0, dtype=np.float64)\n f[\"meas_eqlt\"][\"density\"] = np.zeros(num_i, dtype=np.float64)\n f[\"meas_eqlt\"][\"double_occ\"] = np.zeros(num_i, dtype=np.float64)\n f[\"meas_eqlt\"][\"g00\"] = np.zeros(num_ij, dtype=np.float64)\n f[\"meas_eqlt\"][\"nn\"] = np.zeros(num_ij, dtype=np.float64)\n f[\"meas_eqlt\"][\"xx\"] = np.zeros(num_ij, dtype=np.float64)\n f[\"meas_eqlt\"][\"zz\"] = np.zeros(num_ij, dtype=np.float64)\n f[\"meas_eqlt\"][\"pair_sw\"] = np.zeros(num_ij, dtype=np.float64)\n if meas_energy_corr:\n f[\"meas_eqlt\"][\"kk\"] = np.zeros(num_bb, dtype=np.float64)\n f[\"meas_eqlt\"][\"kv\"] = np.zeros(num_bs, dtype=np.float64)\n f[\"meas_eqlt\"][\"kn\"] = np.zeros(num_bs, dtype=np.float64)\n f[\"meas_eqlt\"][\"vv\"] = np.zeros(num_ij, dtype=np.float64)\n f[\"meas_eqlt\"][\"vn\"] = np.zeros(num_ij, dtype=np.float64)\n\n if period_uneqlt > 0:\n f.create_group(\"meas_uneqlt\")\n f[\"meas_uneqlt\"][\"n_sample\"] = np.array(0, dtype=np.int32)\n f[\"meas_uneqlt\"][\"sign\"] = np.array(0.0, dtype=np.float64)\n f[\"meas_uneqlt\"][\"gt0\"] = np.zeros(num_ij*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"nn\"] = np.zeros(num_ij*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"xx\"] = np.zeros(num_ij*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"zz\"] = np.zeros(num_ij*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"pair_sw\"] = np.zeros(num_ij*L, dtype=np.float64)\n if meas_bond_corr:\n f[\"meas_uneqlt\"][\"pair_bb\"] = np.zeros(num_bb*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"jj\"] = np.zeros(num_bb*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"jsjs\"] = np.zeros(num_bb*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"kk\"] = np.zeros(num_bb*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"ksks\"] = np.zeros(num_bb*L, dtype=np.float64)\n if meas_2bond_corr:\n f[\"meas_uneqlt\"][\"pair_b2b2\"] = np.zeros(num_b2b2*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"j2j2\"] = np.zeros(num_b2b2*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"js2js2\"] = np.zeros(num_b2b2*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"k2k2\"] = np.zeros(num_b2b2*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"ks2ks2\"] = np.zeros(num_b2b2*L, dtype=np.float64)\n if meas_energy_corr:\n f[\"meas_uneqlt\"][\"kv\"] = np.zeros(num_bs*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"kn\"] = np.zeros(num_bs*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"vv\"] = np.zeros(num_ij*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"vn\"] = np.zeros(num_ij*L, dtype=np.float64)\n if meas_nematic_corr:\n f[\"meas_uneqlt\"][\"nem_nnnn\"] = np.zeros(num_bb*L, dtype=np.float64)\n f[\"meas_uneqlt\"][\"nem_ssss\"] = np.zeros(num_bb*L, dtype=np.float64)\n return filename\n\n\ndef create_batch(Nfiles=1, prefix=None, seed=None, Nx=16, Ny=4, L=40, **kwargs):\n if seed is None:\n seed = int(time.time())\n if prefix is None:\n prefix = str(seed)\n rng = rand_seed(seed)\n\n file_0 = \"{}_{}.h5\".format(prefix, 0)\n\n create_1(filename=file_0, seed=seed, Nx=Nx, Ny=Ny, L=L, **kwargs)\n\n for i in range(1, Nfiles):\n rand_jump(rng)\n init_rng = rng.copy()\n init_hs = np.zeros((L, Nx*Ny), dtype=np.int32)\n\n for l in range(L):\n for r in range(Nx*Ny):\n init_hs[l, r] = rand_uint(init_rng) >> np.uint64(63)\n\n file_i = \"{}_{}.h5\".format(prefix, i)\n shutil.copy2(file_0, file_i)\n with h5py.File(file_i, \"r+\") as f:\n f[\"params\"][\"init_rng\"][...] = init_rng\n f[\"state\"][\"rng\"][...] = init_rng\n f[\"state\"][\"hs\"][...] = init_hs\n return file_0 if Nfiles == 1 else \"{} ... {}\".format(file_0, file_i)\n\n\ndef main(argv):\n kwargs = {}\n for arg in argv[1:]:\n eq = arg.find(\"=\")\n if eq == -1:\n print(\"couldn't find \\\"=\\\" in argument \" + arg)\n return\n key = arg[:eq]\n val = arg[(eq + 1):]\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except:\n pass\n kwargs[key] = val\n print(\"created simulation files:\", create_batch(**kwargs))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"util/gen_1band.py","file_name":"gen_1band.py","file_ext":"py","file_size_in_byte":16575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564426282","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 08 14:24:20 2018\n\n@author: svc_ccg\n\"\"\"\n\nfrom __future__ import division\nimport os\nimport glob\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom visual_behavior.translator.foraging2 import data_to_change_detection_core\nfrom visual_behavior.translator.core import create_extended_dataframe\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nimport matplotlib.pyplot as plt\n\n\ndef calculateHitRate(hits,misses,adjusted=False):\n n = hits+misses\n if n==0:\n return np.nan\n hitRate = hits/n\n if adjusted:\n if hitRate==0:\n hitRate = 0.5/n\n elif hitRate==1:\n hitRate = 1-0.5/n\n return hitRate\n\ndef calculateDprime(hits,misses,falseAlarms,correctRejects):\n hitRate = calculateHitRate(hits,misses,adjusted=True)\n falseAlarmRate = calculateHitRate(falseAlarms,correctRejects,adjusted=True)\n z = [scipy.stats.norm.ppf(r) for r in (hitRate,falseAlarmRate)]\n return z[0]-z[1]\n\n\npickleDir = r'\\\\EphysRoom342\\Data\\behavior pickle files'\n\nmouseInfo = (('385533',('09072018','09102018','09112018','09172018')),\n ('390339',('09192018','09202018','09212018')),\n ('394873',('10042018','10052018')),\n ('403472',('10312018','11012018')),\n ('403468',('11142018','11152018')),\n ('412624',('11292018','11302018')),\n ('416656',('03122019','03132019','03142019')),\n ('409096',('03212019',)),\n ('417882',('03262019','03272019')),\n ('408528',('04042019','04052019')),\n ('408527',('04102019','04112019')),\n ('421323',('04252019','04262019')),\n ('422856',('04302019','05012019')),\n ('423749',('05162019','05172019')),\n ('427937',('06062019','06072019')),\n ('423745',('06122019','06132019')),\n )\n\nunloadablePklFiles = []\ntrainingDay = []\nisImages = []\nisRig = []\nisEphys = []\nrewardsEarned = []\ndprimeOverall = []\ndprimeEngaged = []\nprobEngaged = []\nimageHitRate = []\nimageHitRateEngaged = []\nframeRate = 60.0\nwindowFrames = 60*frameRate\nfor mouseID,ephysDates in mouseInfo: \n print('loading mouse '+mouseID)\n ephysDateTimes = [datetime.datetime.strptime(d,'%m%d%Y') for d in ephysDates] if ephysDates is not None else (None,)\n trainingDate = []\n trainingStage = []\n rigID = []\n rewardsEarned.append([])\n dprimeOverall.append([])\n dprimeEngaged.append([])\n probEngaged.append([])\n imageHitRate.append([])\n imageHitRateEngaged.append([])\n unloadablePklFiles.append([])\n for pklFile in glob.glob(os.path.join(pickleDir,mouseID,'*.pkl')):\n try:\n core_data = data_to_change_detection_core(pd.read_pickle(pklFile))\n trials = create_extended_dataframe(\n trials=core_data['trials'],\n metadata=core_data['metadata'],\n licks=core_data['licks'],\n time=core_data['time'])\n except:\n unloadablePklFiles[-1].append(pklFile)\n continue\n \n trainingDate.append(datetime.datetime.strptime(os.path.basename(pklFile)[:6],'%y%m%d'))\n trainingStage.append(core_data['metadata']['stage'])\n rigID.append(core_data['metadata']['rig_id'])\n \n autoRewarded = np.array(trials['auto_rewarded']).astype(bool)\n earlyResponse = np.array(trials['response_type']=='EARLY_RESPONSE')\n ignore = earlyResponse | autoRewarded\n miss = np.array(trials['response_type']=='MISS')\n hit = np.array(trials['response_type']=='HIT')\n falseAlarm = np.array(trials['response_type']=='FA')\n correctReject = np.array(trials['response_type']=='CR')\n \n rewardsEarned[-1].append(hit.sum())\n dprimeOverall[-1].append(calculateDprime(hit.sum(),miss.sum(),falseAlarm.sum(),correctReject.sum()))\n \n startFrame = int(trials['startframe'][0])\n endFrame = int(np.array(trials['endframe'])[-1])\n changeFrames = np.array(trials['change_frame'])\n hitFrames = np.zeros(endFrame,dtype=bool)\n hitFrames[changeFrames[hit].astype(int)] = True\n binSize = int(frameRate*60)\n halfBin = int(binSize/2)\n engagedThresh = 2\n rewardRate = np.zeros(hitFrames.size,dtype=int)\n rewardRate[halfBin:halfBin+hitFrames.size-binSize+1] = np.correlate(hitFrames,np.ones(binSize))\n rewardRate[:halfBin] = rewardRate[halfBin]\n rewardRate[-halfBin:] = rewardRate[-halfBin]\n probEngaged[-1].append(np.sum(rewardRate>engagedThresh)/rewardRate.size)\n engagedTrials = rewardRate[changeFrames[~ignore].astype(int)]>engagedThresh\n dprimeEngaged[-1].append(calculateDprime(*(r[~ignore][engagedTrials].sum() for r in (hit,miss,falseAlarm,correctReject))))\n \n imageNames = [i['image_name'] for i in core_data['image_set']['image_attributes']]\n changeImage = np.array(trials['change_image_name'])\n imageHitRate[-1].append([])\n imageHitRateEngaged[-1].append([])\n for img in imageNames:\n imgTrials = changeImage==img\n ind = imgTrials & (~ignore)\n imageHitRate[-1][-1].append(calculateHitRate(hit[ind].sum(),miss[ind].sum()))\n engaged = rewardRate[changeFrames[ind].astype(int)]>engagedThresh\n imageHitRateEngaged[-1][-1].append(calculateHitRate(hit[ind][engaged].sum(),miss[ind][engaged].sum()))\n \n trainingDay.append(np.array([(d-min(trainingDate)).days+1 for d in trainingDate]))\n isImages.append(np.array(['images' in s for s in trainingStage]))\n isRig.append(np.array([r=='NP3' for r in rigID]))\n isEphys.append(np.array([d in ephysDateTimes for d in trainingDate]))\n\n\nparams = (rewardsEarned,dprimeEngaged,probEngaged)\nlabels = ('Rewards Earned','d prime','prob. engaged')\nfor ind,(mouseID,ephysDates) in enumerate(mouseInfo): \n fig = plt.figure(facecolor='w')\n for i,(prm,lbl,ymax) in enumerate(zip(params,labels,(None,None,None))):\n ax = plt.subplot(len(params),1,i+1)\n for j,(d,p) in enumerate(zip(trainingDay[ind],prm[ind])):\n mec = 'r' if isEphys[ind][j] else 'k'\n mfc = mec if isRig[ind][j] else 'none'\n mrk = 'o' if isImages[ind][j] else 's'\n ax.plot(d,p,mrk,mec=mec,mfc=mfc,ms=8)\n for side in ('right','top'):\n ax.spines[side].set_visible(False)\n ax.tick_params(direction='out',top=False,right=False,labelsize=12)\n ax.set_xlim([0,max(trainingDay[ind])+1])\n ylimMax = np.nanmax(prm[ind]) if ymax is None else ymax\n ax.set_ylim([0,1.05*ylimMax])\n ax.set_ylabel(lbl,fontsize=14)\n if i==0:\n ax.set_title(mouseID,fontsize=14)\n if i==len(params)-1:\n ax.set_xlabel('Day',fontsize=14)\n plt.tight_layout()\n\n\nnumRewards = []\ndpr = []\nengaged = []\nfor day,rig,ephys,rewards,d,eng in zip(trainingDay,isRig,isEphys,rewardsEarned,dprimeEngaged,probEngaged):\n numRewards.append([])\n dpr.append([])\n engaged.append([])\n sortOrder = np.argsort(day)\n rig,ephys,rewards,d,eng = [np.array(a)[sortOrder] for a in (rig,ephys,rewards,d,eng)]\n if not all(rig):\n lastNSBDay = np.where(~rig)[0][-1]\n numRewards[-1].append(rewards[lastNSBDay])\n dpr[-1].append(d[lastNSBDay])\n engaged[-1].append(eng[lastNSBDay])\n firstRigDay = np.where(rig)[0][0]\n numRewards[-1].append(rewards[firstRigDay])\n dpr[-1].append(d[firstRigDay])\n engaged[-1].append(eng[firstRigDay])\n else:\n numRewards[-1].extend([np.nan]*2)\n dpr[-1].extend(([np.nan]*2))\n engaged[-1].extend(([np.nan]*2))\n ephysInd = np.where(ephys)[0]\n lastNonEphysDays = [ephysInd[0]-2,ephysInd[0]-1]\n numRewards[-1].extend(rewards[lastNonEphysDays])\n dpr[-1].extend(d[lastNonEphysDays])\n engaged[-1].extend(eng[lastNonEphysDays]) \n ephysDays = ephysInd[:2]\n numRewards[-1].extend(rewards[ephysDays])\n dpr[-1].extend(d[ephysDays])\n engaged[-1].extend(eng[ephysDays])\n if len(ephysDays)<2:\n numRewards[-1].append(np.nan)\n dpr[-1].append(np.nan)\n engaged[-1].append(np.nan)\n\nparams = (numRewards,engaged,dpr)\nparamNames = ('Rewards Earned','Prob. Engaged','d prime')\n\nshow = slice(0,6)\nfig = plt.figure(facecolor='w',figsize=(8,6))\nfor i,(prm,ylab,ylim) in enumerate(zip(params,paramNames,([0,300],[0,1],[0,4]))):\n ax = plt.subplot(len(params),1,i+1)\n ymax = 0\n for p,rig in zip(prm,isRig):\n if not all(rig):\n ax.plot(p[show],'o-',color='0.8',mec='0.8',ms=2)\n ymax = max(ymax,max(p[show]))\n prm = np.array([p for p,rig in zip(prm,isRig) if not all(rig)])\n meanPrm = np.nanmean(prm,axis=0)\n n = np.sum(~np.isnan(prm),axis=0)\n print(n)\n stdPrm = np.nanstd(prm,axis=0)\n semPrm = stdPrm/n**0.5\n ax.plot(meanPrm[show],'o',mfc='k',mec='k',ms=8)\n for x,(m,s) in enumerate(zip(meanPrm[show],semPrm[show])):\n ax.plot([x]*2,m+np.array([-s,s]),'k',linewidth=2)\n for side in ('right','top'):\n ax.spines[side].set_visible(False)\n ax.tick_params(direction='out',top=False,right=False,labelsize=12)\n ax.set_xlim([-0.25,show.stop-show.start-0.75])\n ymax = 1.05*max(ymax,np.nanmax((meanPrm+stdPrm)[show])) if ylim is None else ylim[1]\n ax.plot([(show.stop-show.start)-2.5]*2,[0,ymax],'k--')\n ax.set_ylim([0,ymax])\n ax.set_xticks(np.arange(len(labels[show])))\n if i==len(params)-1:\n ax.set_xticklabels(['Last NSB','First NP3',-2,-1,1,2])\n ax.set_xlabel('Day',fontsize=12)\n else:\n ax.set_xticklabels([])\n ax.set_ylabel(ylab,fontsize=12)\n ax.yaxis.set_label_coords(-0.075,0.5)\n ax.locator_params(axis='y',nbins=3)\nfig.text(0.37,0.95,'Training',fontsize=14,horizontalalignment='center')\nfig.text(0.79,0.95,'Ephys',fontsize=14,horizontalalignment='center')\n\n\nmeanImageHitRate = []\nfor m,day in zip(imageHitRateEngaged,trainingDay):\n fig = plt.figure(facecolor='w')\n ax = plt.subplot(1,1,1)\n sortOrder = np.argsort(day)\n h = np.array([m[i] for i in sortOrder if len(m[i])>0])\n meanImageHitRate.append(h[-4:].mean(axis=0))\n ax.imshow(h,clim=(0,1),cmap='gray',interpolation='none')\n for side in ('right','top'):\n ax.spines[side].set_visible(False)\n ax.tick_params(direction='out',top=False,right=False,labelsize=12)\n ax.set_xlabel('image')\n ax.set_ylabel('day')\n \n \n \nplt.imshow(np.array(meanImageHitRate)[3:],clim=(0,1),cmap='gray',interpolation='none')\n\n","sub_path":"behaviorTimeline.py","file_name":"behaviorTimeline.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118001562","text":"\"\"\"\n\nAuthor: Nigel Schuster \n\nThis module provides a simple abstraction for accessing Facebook messages data\nfrom a downloaded archive from facebook.com\n\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom bs4 import BeautifulSoup\nfrom fbchat_archive_parser.parser import MessageHtmlParser\nimport os\nimport pandas as pd\nimport warnings\n\n_messages_file = None\n\n\ndef initialize(dump_directory=\".\"):\n \"\"\"\n Asserts the messages.htm file in the Facebook dump can be found and saves its location for\n later usage in this module\n\n Args:\n dump_directory: path to the directory that contains messages.htm file\n \"\"\"\n global _messages_file\n fb_message_filename = \"messages.htm\"\n _messages_file = os.path.join(dump_directory, fb_message_filename)\n if not os.path.isfile(_messages_file):\n print(\"The directory provided did not contain messages.htm, the directory is usually \"\n \"/html within the archive downloaded from facebook.com\")\n _messages_file = None\n\n\ndef resolve_user_id(user_id):\n \"\"\"\n Tries to map the identifier facebook provides to the name of the user\n\n Args:\n user_id: identifier string that is provided in the messages file\n\n Returns:\n The name of the user if it is able to find it, otherwise the input\n \"\"\"\n # TODO (neitsch): This method is not implemented yet. Use the user_id\n # (format: 12345678@facebook.com) to get the actual name of the user\n return user_id\n\n\ndef get_cleaned_fully_merged_messages(strip_html_content=True,\n resolve_fb_id=False):\n \"\"\"\n Parses the messages file to create dataframes that contain the messages and their senders.\n\n Args:\n strip_html_content: The messages.htm file might contain some html tags in messages; this\n option will remove all html markup\n resolve_fb_id: The messages.htm file doesn't always print Facebook names, but sometimes ids\n instead; this will attempt to resolve them, but requires a web request per id and is not\n guaranteed to work\n\n Returns:\n a dataframe that contains all messages with info about their senders\n \"\"\"\n if not _messages_file:\n print(\"Please initialize the facebook_connector module.\")\n return\n chats = MessageHtmlParser(path=_messages_file).parse()\n me = chats.user\n addresses = set()\n messages = []\n # Suppressing warning that BS4 will display when a message only contains a URL\n warnings.filterwarnings(\"ignore\", category=UserWarning, module='bs4')\n try:\n threads = chats.threads.itervalues()\n except AttributeError:\n threads = chats.threads.values()\n for thread in threads:\n # This set holds the list of participants after their identifier has been\n # resolved to their name (see resolve_user_id)\n resolved_participants = set()\n for participant in thread.participants:\n if participant is not None and not participant.isspace():\n resolved_participant = resolve_user_id(\n participant) if resolve_fb_id else participant\n resolved_participants.add(resolved_participant)\n addresses.update(resolved_participants)\n for message in thread.messages:\n if not message.content or message.content.isspace():\n continue\n sender = resolve_user_id(\n message.sender) if resolve_fb_id else message.sender\n from_me = sender == me\n if strip_html_content:\n content = BeautifulSoup(message.content, \"html.parser\").text\n else:\n content = message.content\n # In the following we add a single message to our dataframe\n if from_me:\n # If the user is sending a message to a group, then we need to add\n # one message per group participant to the dataframe\n for participant in resolved_participants:\n messages.append({\n 'text': content,\n 'date': message.timestamp,\n 'is_from_me': from_me,\n 'full_name': participant\n })\n else:\n messages.append({\n 'text': content,\n 'date': message.timestamp,\n 'is_from_me': from_me,\n 'full_name': sender\n })\n address_book_df = pd.DataFrame(data=list(addresses), columns=[\"full_name\"])\n messages_df = pd.DataFrame.from_records(messages)\n return messages_df, address_book_df\n","sub_path":"facebook_connector.py","file_name":"facebook_connector.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357207179","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2012, Enthought, Inc.\n# All rights reserved.\n#------------------------------------------------------------------------------\nimport wx\n\nfrom ....noncomponents.abstract_image import AbstractTkImage\n\n\nFLAG_MAP = {\n 'bmp': wx.BITMAP_TYPE_BMP,\n 'gif': wx.BITMAP_TYPE_GIF,\n 'jpg': wx.BITMAP_TYPE_JPEG,\n 'jpeg': wx.BITMAP_TYPE_JPEG,\n 'png': wx.BITMAP_TYPE_PNG,\n 'pcx': wx.BITMAP_TYPE_PCX,\n 'pnm': wx.BITMAP_TYPE_PNM,\n 'tif': wx.BITMAP_TYPE_TIF,\n 'tiff': wx.BITMAP_TYPE_TIF,\n 'tga': wx.BITMAP_TYPE_TGA,\n 'xpm': wx.BITMAP_TYPE_XPM,\n 'ico': wx.BITMAP_TYPE_ICO,\n 'cur': wx.BITMAP_TYPE_CUR,\n 'ani': wx.BITMAP_TYPE_ANI,\n '': wx.BITMAP_TYPE_ANY,\n}\n\n\nclass WXImage(AbstractTkImage):\n \"\"\" A Wx implementation of AbstractTkImage.\n \n \"\"\"\n def __init__(self, wximage=None):\n \"\"\" Initialize a WXImage.\n\n Parameters\n ----------\n wximage : wx.Image instance or None, optional\n A wx.Image instance which holds the data. If None is passed, \n then the wx.NullImage will be used.\n\n \"\"\"\n if wximage is None:\n wximage = wx.NullImage\n self._wximage = wximage\n \n #--------------------------------------------------------------------------\n # Abstract API Implementation\n #--------------------------------------------------------------------------\n @classmethod\n def from_data(cls, data, size):\n \"\"\" Construct an image from a bytearray of RGBA bytes.\n\n Parameters\n ----------\n data : bytearray\n A bytearray of the image data in RGBA32 format.\n\n size : (width, height)\n The width, height size tuple of the image.\n\n Returns\n -------\n results : AbstractTkImage\n An appropriate image instance.\n\n \"\"\"\n w, h = size\n\n # Split the array into color and alpha to satisfy WX\n rgb_array = bytearray(w*h*3)\n alpha_array = bytearray(w*h)\n \n rgb_array[0::3] = data[0::4]\n rgb_array[1::3] = data[1::4]\n rgb_array[2::3] = data[2::4]\n alpha_array[:] = data[3::4]\n\n return cls(wx.ImageFromDataWithAlpha(w, h, rgb_array, alpha_array))\n\n @classmethod\n def from_file(cls, path, format=''):\n \"\"\" Read in the image data from a file.\n\n Parameters\n ----------\n path : string\n The path to the image file on disk.\n\n format : string, optional\n The image format of the data in the file. If not given,\n then the image format will be determined automatically\n if possible.\n \n Returns\n -------\n results : AbstractTkImage\n An appropriate image instance.\n\n \"\"\"\n flag = FLAG_MAP.get(format.lower(), wx.BITMAP_TYPE_ANY)\n return cls(wx.Image(path, flag))\n\n @property\n def size(self):\n \"\"\" The size of the image as a (width, height) tuple.\n \n Returns\n -------\n result : (width, height)\n The width, height size tuple of the image.\n\n \"\"\"\n img = self._wximage\n return (img.GetWidth(), img.GetHeight())\n\n def data(self):\n \"\"\" The data for the image as a bytearray of RGBA bytes.\n \n Returns\n -------\n results : bytearray\n A bytearray of the image data in RGBA32 format.\n\n \"\"\"\n wximg = self._wximage\n width, height = wximg.GetWidth(), wximg.GetHeight()\n buf = wximg.GetDataBuffer()\n out = bytearray(width * height * 4)\n out[0::4] = buf[0::3]\n out[1::4] = buf[1::3]\n out[2::4] = buf[2::3] \n if wximg.HasAlpha():\n out[3::4] = wximg.GetAlphaBuffer()[:]\n else:\n out[3::4] = '\\xff' * (width * height)\n return out\n\n def scale(self, size, preserve_aspect_ratio=False):\n \"\"\" Create a new version of this image scaled to the given size.\n \n Parameters\n ----------\n size : (width, height)\n The size of the scaled image.\n \n preserve_aspect_ratio : bool, optional\n Whether to preserve the original aspect ratio of the image\n when scaling to the new size.\n\n Returns\n -------\n results : AbstractTkImage\n A new image of the proper scaled size.\n \n \"\"\"\n width, height = size\n if preserve_aspect_ratio:\n img_width, img_height = self.size\n ratio = float(img_width) / img_height\n if width > height:\n height = width / ratio\n else:\n width = height * ratio\n new_img = self._wximage.Scale(width, height, wx.IMAGE_QUALITY_HIGH)\n return WXImage(new_img)\n \n #--------------------------------------------------------------------------\n # Toolkit Specific\n #--------------------------------------------------------------------------\n def as_wxImage(self):\n \"\"\" Returns the internal wx.Image instance.\n\n \"\"\"\n return self._wximage\n\n def as_wxBitmap(self):\n \"\"\" Returns a wx.Bitmap generated from the underlying wx.Image.\n \n This is provided as a convenience for wx backend components \n which require a wx.Bitmap rather than a wx.Image.\n \n \"\"\"\n return wx.BitmapFromImage(self._wximage)\n\n","sub_path":"enaml/backends/wx/noncomponents/wx_image.py","file_name":"wx_image.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"561689023","text":"n_central_stations = 424\r\ntimeSlot = 1\r\ntimeSlotV2 = 60\r\n##############################################################\r\n# (0) feature and target length\r\nfeatureLength = 6\r\ntargetLength = 1\r\nlossTestLength = 30\r\npValueConfidenceValue = 0.1\r\n##############################################################\r\n# (1) LSTM AutoEncoder Parameters\r\nn_epoch = 5001\r\nbatch_size = 128\r\nlr = 0.0001 # learning rate\r\nn_hidden_units = 64\r\ndropout_pro = 0.01\r\nn_inputs = 1\r\nn_steps_encoder = featureLength\r\nn_steps_decoder = 3\r\ndecoderOutputSize = 1\r\n\r\ntimeRange = ['2013-07-01', '2017-09-30']\r\ntrainDataTimeRange = ['2013-07-01', '2016-09-30']\r\n# valDataTimeRange = ['2016-10-01', '2016-12-31']\r\n# testDataRange = ['2017-01-01', '2017-09-30']\r\n\r\n# trainDataLength = 186 # 22 60\r\nvalDataLength = 40\r\ntestDataLength = 80\r\n\r\ndemandTypeList = ['in', 'out', 'sum']\r\ndemandType = demandTypeList[0]\r\n##############################################################\r\n# (2) prediction network\r\n# with the first layer and the last layer\r\nn_epoch_pre = 6001\r\naddFeatureLength = 2 # additionalFeatureLengthDict[str(rank)]\r\npredictLength = targetLength\r\nn_units_pre = [n_hidden_units + addFeatureLength, n_hidden_units*2, n_hidden_units, n_hidden_units/2, predictLength]\r\nn_units_pre = [int(e) for e in n_units_pre]\r\n##############################################################\r\n# (3) uncertainty parameters\r\nvarB = 100","sub_path":"NYC/SharedParameters/SharedParameters.py","file_name":"SharedParameters.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41818605","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# 设置中文字体\nplt.rcParams['font.family'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n# 数据传入\nn = 1024\nx = np.random.normal(0, 1, n)\ny = np.random.normal(0, 1, n)\n\nplt.scatter(x, y)\n\nplt.title('绘制散点图')\n\n# 以分辨率为72来保存图片\nplt.savefig('散点图', dpi=72)\n# 显示图形\nplt.show()\n","sub_path":"py_for/venv/src/ctrl_project2/Python_scatterPlot.py","file_name":"Python_scatterPlot.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"554150444","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# test_mkisofs.py - Unit test cases for COT.helpers.mkisofs submodule.\n#\n# March 2015, Glenn F. Matthews\n# Copyright (c) 2014-2015 the COT project developers.\n# See the COPYRIGHT.txt file at the top-level directory of this distribution\n# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.\n#\n# This file is part of the Common OVF Tool (COT) project.\n# It is subject to the license terms in the LICENSE.txt file found in the\n# top-level directory of this distribution and at\n# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part\n# of COT, including this file, may be copied, modified, propagated, or\n# distributed except according to the terms contained in the LICENSE.txt file.\n\n\"\"\"Unit test cases for the COT.helpers.mkisofs submodule.\"\"\"\n\nfrom distutils.version import StrictVersion\n\nfrom .test_helper import HelperUT\nfrom COT.helpers.helper import Helper\nfrom COT.helpers.mkisofs import MkIsoFS\n\n\nclass TestMkIsoFS(HelperUT):\n \"\"\"Test cases for MkIsoFS helper class.\"\"\"\n\n def setUp(self):\n \"\"\"Test case setup function called automatically prior to each test.\"\"\"\n self.helper = MkIsoFS()\n super(TestMkIsoFS, self).setUp()\n\n def test_get_version_mkisofs(self):\n \"\"\"Test .version getter logic for mkisofs.\"\"\"\n self.fake_output = (\"mkisofs 3.00 (--) Copyright (C) 1993-1997 \"\n \"Eric Youngdale (C) 1997-2010 Jörg Schilling\")\n self.assertEqual(StrictVersion(\"3.0\"), self.helper.version)\n\n def test_get_version_genisoimage(self):\n \"\"\"Test .version getter logic for genisoimage.\"\"\"\n self.fake_output = \"genisoimage 1.1.11 (Linux)\"\n self.assertEqual(StrictVersion(\"1.1.11\"), self.helper.version)\n\n def test_find_mkisofs(self):\n \"\"\"If mkisofs is found, use it.\"\"\"\n def find_one(self, name):\n if name == \"mkisofs\":\n return \"/mkisofs\"\n return None\n Helper.find_executable = find_one\n self.assertEqual(\"mkisofs\", self.helper.name)\n self.assertEqual(self.helper.path, \"/mkisofs\")\n\n def test_find_genisoimage(self):\n \"\"\"If mkisofs is not found, but genisoimage is, use that.\"\"\"\n def find_one(self, name):\n if name == \"genisoimage\":\n return \"/genisoimage\"\n return None\n Helper.find_executable = find_one\n self.assertEqual(\"genisoimage\", self.helper.name)\n self.assertEqual(self.helper.path, \"/genisoimage\")\n\n def test_install_helper_already_present(self):\n \"\"\"Don't re-install if already installed.\"\"\"\n self.helper.install_helper()\n self.assertEqual([], self.last_argv)\n self.assertLogged(**self.ALREADY_INSTALLED)\n\n def test_install_helper_port(self):\n \"\"\"Test installation via 'port'.\"\"\"\n Helper.find_executable = self.stub_find_executable\n Helper.PACKAGE_MANAGERS['apt-get'] = False\n Helper.PACKAGE_MANAGERS['port'] = True\n Helper._port_updated = False\n self.helper.install_helper()\n self.assertEqual([\n ['sudo', 'port', 'selfupdate'],\n ['sudo', 'port', 'install', 'cdrtools'],\n ], self.last_argv)\n self.assertTrue(Helper._port_updated)\n # Make sure we don't 'port selfupdate' again unnecessarily\n self.last_argv = []\n self.helper.install_helper()\n self.assertEqual([\n ['sudo', 'port', 'install', 'cdrtools']\n ], self.last_argv)\n\n def test_install_helper_apt_get(self):\n \"\"\"Test installation via 'apt-get'.\"\"\"\n Helper.find_executable = self.stub_find_executable\n Helper.PACKAGE_MANAGERS['apt-get'] = True\n Helper.PACKAGE_MANAGERS['port'] = False\n Helper.PACKAGE_MANAGERS['yum'] = False\n Helper._apt_updated = False\n self.fake_output = 'not installed'\n self.helper.install_helper()\n self.assertEqual([\n ['dpkg', '-s', 'genisoimage'],\n ['sudo', 'apt-get', '-q', 'update'],\n ['sudo', 'apt-get', '-q', 'install', 'genisoimage'],\n ], self.last_argv)\n self.assertEqual('genisoimage', self.helper.name)\n self.assertTrue(Helper._apt_updated)\n # Make sure we don't 'apt-get update' again unnecessarily\n self.last_argv = []\n self.helper.install_helper()\n self.assertEqual([\n ['dpkg', '-s', 'genisoimage'],\n ['sudo', 'apt-get', '-q', 'install', 'genisoimage'],\n ], self.last_argv)\n\n def test_install_helper_unsupported(self):\n \"\"\"Installation fails with neither apt-get nor port nor yum.\"\"\"\n Helper.find_executable = self.stub_find_executable\n Helper.PACKAGE_MANAGERS['apt-get'] = False\n Helper.PACKAGE_MANAGERS['port'] = False\n Helper.PACKAGE_MANAGERS['yum'] = False\n self.system = \"Windows\"\n with self.assertRaises(NotImplementedError):\n self.helper.install_helper()\n","sub_path":"COT/helpers/tests/test_mkisofs.py","file_name":"test_mkisofs.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"593093370","text":"# Section 1R\n#%% Test NP\nimport numpy as np\nx = np.array([[1,2,3],[4,5,6]])\nprint(x)\n#%% Test SciPy\nfrom scipy import sparse\neye = np.eye(4)\nprint (\"NumPy array:\\n{}\".format(eye))\n\nsparse_martix = sparse.csr_matrix(eye)\nprint(\"\\nSciPy sparse.csr martix:\\n{}\".format(sparse_martix))\n\ndata = np.ones(4)\nrow_indices = np.arange(4)\ncol_indices = np.arange(4)\neye_coo = sparse.coo_matrix((data,(row_indices,col_indices)))\nprint(\"COO :\\n {}\".format(eye_coo))\n#%% Test Matplotlib \nimport matplotlib.pyplot as plt\nx = np.linspace(-10,10,100)\ny=np.sin(x)\nplt.plot(x,y,marker=\"x\",color=\"black\")\n#%% Test Pandas\nimport pandas as pd\n# create a simple dataset of people\ndata = {'Name': [\"John\", \"Anna\", \"Peter\", \"Linda\"],\n 'Location' : [\"New York\", \"Paris\", \"Berlin\", \"London\"],\n 'Age' : [24, 13, 53, 33]\n }\n\ndata_pandas = pd.DataFrame(data)\n# IPython.display allows \"pretty printing\" of dataframes\n# in the Jupyter notebook\ndisplay(data_pandas)\ndisplay(data_pandas[data_pandas.Age >30])\n#%% Section 1.7\n# Load Data\nimport mglearn\nfrom IPython.display import display\nfrom sklearn.datasets import load_iris\niris_dataset = load_iris()\n# Data type View\nprint(\"Keys of iris_dataset: {}\".format(iris_dataset.keys()))\nprint(iris_dataset['DESCR'][:193] + \"\\n...\") # DESCR is a key value for data descriptor\nprint(\"Target names: {}\".format(iris_dataset['target_names']))\nprint(\"Feature names: {}\".format(iris_dataset['feature_names']))\nprint(\"Type of data: {}\".format(type(iris_dataset['data'])))\nprint(\"Shape of data: {}\".format(iris_dataset['data'].shape))\nprint(\"First five rows of data:\\n{}\".format(iris_dataset['data'][:5]))\n#%% Sample set split \nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n iris_dataset['data'], iris_dataset['target'], random_state=0)\n#%% Data View\n# create dataframe from data in X_train\n# label the columns using the strings in iris_dataset.feature_names\niris_dataframe = pd.DataFrame(X_train, columns=iris_dataset[\"feature_names\"])\n# create a scatter matrix from the dataframe, color by y_train\npd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',\n hist_kwds={'bins': 20}, s=60, alpha=.8, cmap=mglearn.cm3)\n#%% KNN models\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train, y_train)\n# prediction\nX_new = np.array([[5, 2.9, 1, 0.2]])\nprint(\"X_new.shape: {}\".format(X_new.shape))\nprediction = knn.predict(X_new)\nprint(\"Prediction: {}\".format(prediction))\nprint(\"Predicted target name: {}\".format(\n iris_dataset['target_names'][prediction]))\n# Test set \ny_pred = knn.predict(X_test)\nprint(\"Test set score: {:.2f}\".format(np.mean(y_pred == y_test)))\n#%%\n\n\n\n\n\n\n\n","sub_path":"Sec_01/Sec_01.py","file_name":"Sec_01.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128500392","text":"import random, math\n\n# Base Model for all DTLZ\nclass Model(object):\n def gen(self):\n while True:\n for i in xrange(self.decisionsN):\n self.decisions[i] = random.uniform(self.bottom[i], self.top[i])\n if self.isValid(): \n break\n\n def __init__(self):\n self.objectivesN, self.decisionsN = 0, 0\n self.decisions, self.prevDecision, self.objectives = [], [], []\n self.top, self.bottom = [0], [0]\n self.gen()\n\n def eval(self):\n return sum(self.getObjectives())\n\n def copy(self,other):\n self.decisionsN, self.objectivesN = other.decisionsN, other.objectivesN\n self.decisions, self.prevDecision, self.objectives = other.decisions[:], other.prevDecision[:], other.objectives[:]\n self.top, self.bottom = other.top[:], other.bottom[:]\n\n def getObjectives(self):\n return []\n\n def isValid(self):\n for i in xrange(0, self.decisionsN):\n if self.decisions[i] < self.bottom[i] or self.decisions[i] > self.top[i]:\n return False\n return True\n \nclass DTLZ1(Model):\n def __init__(self, dec = 10, obj = 2):\n self.decisionsN, self.objectivesN = dec, obj\n self.decisions, self.prevDecision, self.objectives = [0 for x in xrange(dec)], [], []\n self.top, self.bottom = [1 for x in xrange(dec)], [0 for x in xrange(dec)]\n self.gen()\n\n def getObjectives(self):\n if self.prevDecision == self.decisions:\n return self.objectives\n \n obj = []\n\n y=self.decisionsN - self.objectivesN + 1\n for x in self.decisions[self.objectivesN - 1:]:\n y += pow(x - 0.5, 2) - math.cos((x - 0.5) * 20 * math.pi)\n\n y *= 100\n for i in xrange(self.objectivesN):\n t = 0.5 * (1 + y)\n for x in self.decisions[:self.objectivesN - i - 1]:\n t *= x\n \n if i != 0:\n t *= (1 - self.decisions[self.objectivesN - i])\n \n obj.append(t)\n \n self.prevDecision = self.decisions\n self.objectives = obj\n\n return obj\n\nclass DTLZ3(Model):\n def __init__(self, dec = 10, obj = 2):\n self.decisionsN, self.objectivesN = dec, obj\n self.decisions, self.prevDecision, self.objectives = [0 for x in xrange(dec)], [], []\n self.top, self.bottom = [1 for x in xrange(dec)], [0 for x in xrange(dec)]\n self.gen()\n\n def getObjectives(self):\n if self.prevDecision == self.decisions:\n return self.objectives\n\n obj = []\n\n y = self.decisionsN - self.objectivesN + 1\n for x in self.decisions[self.objectivesN-1:]:\n y += pow(x - 0.5, 2) - math.cos((x - 0.5) * 20 * math.pi)\n\n y *= 100\n for i in xrange(self.objectivesN):\n t = y + 1\n for x in self.decisions[:self.objectivesN - i - 1]:\n t *= math.cos(x * math.pi / 2)\n \n if i != 0:\n t *= math.sin(self.decisions[self.objectivesN - i] * math.pi / 2)\n \n obj.append(t)\n\n self.prevDecision = self.decisions\n self.objectives = obj\n\n return obj\n\nclass DTLZ5(Model):\n def __init__(self, dec = 10, obj = 2):\n self.decisionsN, self.objectivesN = dec, obj\n self.decisions, self.prevDecision, self.objectives = [0 for x in xrange(dec)], [], []\n self.top, self.bottom = [1 for x in xrange(dec)], [0 for x in xrange(dec)]\n self.gen()\n\n def getObjectives(self):\n if self.prevDecision == self.decisions:\n return self.objectives\n\n obj = []\n\n y = 0\n for x in self.decisions[self.objectivesN-1:]:\n y += pow(x - 0.5, 2)\n \n theta = [math.pi * self.decisions[0] / 2]\n for x in self.decisions[1:self.objectivesN - 1]:\n theta.append((1 + 2 * y * x) * math.pi / (4 * (1 + y)))\n \n for i in xrange(self.objectivesN):\n t = 1 + y\n for x in theta[:self.objectivesN - i - 1]:\n t *= math.cos(x * math.pi / 2)\n \n if i!= 0:\n t *= math.sin(theta[self.objectivesN - i - 1] * math.pi / 2)\n \n obj.append(t)\n\n self.prevDecision = self.decisions\n self.objectives = obj\n\n return obj\n\nclass DTLZ7(Model):\n def __init__(self, dec = 10, obj = 2):\n self.decisionsN, self.objectivesN = dec, obj\n self.decisions, self.prevDecision, self.objectives = [0 for x in xrange(dec)], [], []\n self.top, self.bottom = [1 for x in xrange(dec)], [0 for x in xrange(dec)]\n self.gen()\n\n def getObjectives(self):\n if self.prevDecision == self.decisions:\n return self.objectives\n\n obj = []\n \n y = 1 + 9/(self.decisionsN - self.objectivesN + 1) * sum(self.decisions[self.objectivesN - 1:])\n z = self.objectivesN\n \n for i in xrange(self.objectivesN - 1):\n obj.append(self.decisions[i])\n z -= obj[i] / (1 + y) * (1 + math.sin(3 * math.pi * obj[i]))\n\n obj.append((1 + y) * z)\n\n self.prevDecision=self.decisions\n self.objectives=obj\n\n return obj\n","sub_path":"hw/code/9/DTLZ.py","file_name":"DTLZ.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"20352355","text":"import script1\n\ndef multi(a,b):\n\tprint(f\"The Multiplcation of {a}*{b} is {a*b}\")\n\treturn None\nx=int(input(\"Enter x: \"))\ny=int(input(\"Enter y: \"))\ndef main():\n\tscript1.addition(x,y)\n\tscript1.sub(x,y)\n\tmulti(x,y)\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Udemy/python/pYTHON_cHECK/18-Functions/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274774439","text":"# http://qiita.com/onoxeve/items/0c843d97c8db0e7f3feb\n# coding:utf-8\n\nfrom __future__ import print_function\n\nimport boto3\nimport json\nimport logging\nimport os\nimport datetime\n\nfrom base64 import b64decode\nfrom urllib2 import Request, urlopen, URLError, HTTPError\n\n# The base-64 encoded, encrypted key (CiphertextBlob) stored in the kmsEncryptedHookUrl environment variable\nENCRYPTED_HOOK_URL = os.environ['kmsEncryptedHookUrl']\n\nHOOK_URL = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_HOOK_URL))['Plaintext']\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# CloudWatchからAWS請求情報を取得(昨日から今日にかけて1日分の最大値)\n# 2017/7現在: バージニア北部(us-east-1)リージョンのみ請求情報を取得可能\ncloud_watch = boto3.client('cloudwatch', region_name='us-east-1')\nget_metric_statistics = cloud_watch.get_metric_statistics(\n Namespace='AWS/Billing',\n MetricName='EstimatedCharges',\n Dimensions=[{\n 'Name': 'Currency',\n 'Value': 'USD'\n }],\n StartTime=datetime.datetime.today() - datetime.timedelta(days=1),\n EndTime=datetime.datetime.today(),\n Period=86400,\n Statistics=['Maximum']\n)\n\ndef lambda_handler(event, context):\n logger.info(\"Event: \" + str(event))\n #message = json.loads(event['Records'][0]['Sns']['Message'])\n\n # AWS請求情報をフィルタ1\n message = get_metric_statistics['Datapoints'][0]\n logger.info(\"Message: \" + str(message))\n\n #alarm_name = message['AlarmName']\n #old_state = message['OldStateValue']\n #new_state = message['NewStateValue']\n #reason = message['NewStateReason']\n\n # AWS請求情報をフィルタ2\n currency_statistics = message['Maximum']\n time_statistics = message['Timestamp'].strftime('%Y/%m/%d')\n\n # しきい値超過でSlackメッセージの色を変更する\n if currency_statistics > 15.0:\n notify_color = \"danger\"\n else:\n notify_color = \"good\"\n\n # Slack投稿メッセージ\n # username,color,title,title_linkを追加\n slack_message = {\n 'attachments': [{\n # メッセージを色分けする\n 'color': notify_color,\n # タイトルを追加\n \"title\": \"AWS Billing & Cost\",\n # AWS請求ダッシュボードへのリンクを設定\n \"title_link\": \"https://console.aws.amazon.com/billing/home?#/\",\n # メッセージ本文\n 'text': \"EstimatedCharges is now %s USD in %s\" % (currency_statistics, time_statistics)\n }]\n }\n logger.info(slack_message)\n\n req = Request(HOOK_URL, json.dumps(slack_message))\n response = urlopen(req)\n logger.info(response.read())\n","sub_path":"aws-billing/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204810518","text":"#!/bin/python3\r\n#Strings -> CamelCase\r\nimport sys\r\n\r\ndef camelcase(s):\r\n counter = 1\r\n for i in s:\r\n if i.isupper() == True:\r\n counter += 1\r\n return counter\r\n\r\nif __name__ == \"__main__\":\r\n s = input().strip()\r\n result = camelcase(s)\r\n print(result)\r\n","sub_path":"HackerRank/Algorithm/newAlgorithm/CamelCase.py","file_name":"CamelCase.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11676897","text":"from Opponent import Opponent\n\nclass PlayerOpponent(Opponent):\n def __init__(self, player_character):\n super(Opponent, self).__init__()\n self._character = player_character\n\n # Override\n def get_move(self):\n print(\"\")\n\n try:\n row = int(input(\"Enter Row Number: \"))\n except ValueError:\n row = -1\n\n try:\n column = int(input(\"Enter Column Number: \"))\n except ValueError:\n column = -1\n\n print(\"\")\n return (row, column)\n","sub_path":"PlayerOpponent.py","file_name":"PlayerOpponent.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41118682","text":"import re\nimport os\nimport cv2\nimport json\nimport face_recognition\n\nimport numpy as np\n\n# Get a reference to webcam #0 (the default one)\nvideo_capture = cv2.VideoCapture(0)\n\n_, frame1 = video_capture.read()\n_, frame2 = video_capture.read()\n\nknown_face_encodings = [ ]\nknown_face_names = [ ]\n\n## LOADING IN CONFIG DATA\nwith open('../config.json') as json_file: \n data = json.load(json_file)\n owner = data[\"owner\"]\n maxTick = data[\"max_owner_tick\"]\n\nownerTick=0\nrandomPerson=0\n\nsdThresh = 10\n\ndef distMap(frame1, frame2):\n \"\"\"outputs pythagorean distance between two frames\"\"\"\n frame1_32 = np.float32(frame1)\n frame2_32 = np.float32(frame2)\n diff32 = frame1_32 - frame2_32\n norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/np.sqrt(255**2 + 255**2 + 255**2)\n dist = np.uint8(norm32*255)\n return dist\n\n# Set up faces\nfor file in os.listdir(os.fsencode(\"../accounts\")):\n filename = os.fsdecode(file)\n if filename.endswith(\".jpg\"):\n name = filename.split('.')[0]\n\t\t\n print(name)\n \n known_face_encodings.append(face_recognition.face_encodings(face_recognition.load_image_file(\"../accounts/\" + name + \".jpg\"))[0])\n \n name = re.split(r'\\d', name)[0]\n print(name)\n\t\t\n known_face_names.append(name)\n\t\t\n print(name)\n continue\n\n\nwhile True:\n _, frame3 = video_capture.read()\n rows, cols, _ = np.shape(frame3)\n dist = distMap(frame1, frame3)\n\n frame1 = frame2\n frame2 = frame3\n\n # apply Gaussian smoothing\n mod = cv2.GaussianBlur(dist, (9,9), 0)\n\n # apply thresholding\n _, thresh = cv2.threshold(mod, 100, 255, 0)\n\n # calculate st dev test\n _, stDev = cv2.meanStdDev(mod)\n \n if stDev > sdThresh:\n print(\"Motion detected.. Do something!!!\");\n #TODO: Face Detection 2\n\n\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_frame = frame[:, :, ::-1]\n\n # Find all the faces and face enqcodings in the frame of video\n face_locations = face_recognition.face_locations(rgb_frame)\n face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)\n\n # Loop through each face in this frame of video\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n\n name = \"Unknown\"\n\n # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n if name == owner:\n print(\"Owner found\")\n ownerTick += 1\n if ownerTick >= maxTick:\n video_capture.release()\n cv2.destroyAllWindows()\n break\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"tools/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461182053","text":"\nfrom concurrent.futures import ThreadPoolExecutor, as_completed \nfrom concurrent.futures import ProcessPoolExecutor\nimport time\n'''\n多进程编程\n因为 GIL 的存在,在 计算型密集型任务面前,多进程没办法发挥优势,实现并发,\n这时候就需要多进程编程,多进程可以充分利用多核的优势实现并发\n对于 io 操作,使用多进程编程比多进程要好,因为线程之间的切换代价要低于进程之间的切换\n'''\n\n# 1、对于耗 CPU 的计算型密集任务,比如计算,图像处理,机器学习的算法,比特币挖矿 \n# 多进程优于多线程\n\n# def fn(n):\n# if n <= 2:\n# return n\n# return fn(n-1) + fn(n-2)\n\n# if __name__ == '__main__':\n# start = time.time()\n# with ProcessPoolExecutor(3) as executor:\n# all_task = [executor.submit(fn,(num)) for num in range(25,30)]\n# # 获取任务的返回结果,异步进行,那个结束那个就返回,无序\n# for future in as_completed(all_task):\n# data = future.result()\n# print(data)\n# print('运行时间{}'.format(time.time()-start))\n\n\n# 线程就用线程池 ThreadPoolExecutor 进程就用进程池 ProcessPoolExecutor\n# 进程耗时:运行时间45.087360858917236\n# 线程耗时:运行时间60.71353578567505\n# 结论:耗CPU的操作,多进程比多线程更加省时\n\n# 2、对于 io 操作来说,多线程优于多进程\ndef fn(n):\n time.sleep(n)\n return n\n\nif __name__ == '__main__':\n start = time.time()\n with ThreadPoolExecutor(3) as executor:\n all_task = [executor.submit(fn,(num)) for num in [2]*30]\n # 获取任务的返回结果,异步进行,那个结束那个就返回,无序\n for future in as_completed(all_task):\n data = future.result()\n print(data)\n print('运行时间{}'.format(time.time()-start))\n\n\n# 进程耗时:运行时间20.41038465499878\n# 线程耗时:运行时间20.009454250335693\n# 虽然实验结果不太明显,但是具体使用差距就体现出来了\n# 开启多个线程比较轻松,开启多个进程比较吃力,比如说60个线程很轻松,但60个进程就比较吃力了。\n# 结论:对于 io 操作来说,多线程优于多进程\n","sub_path":"process/线程进程对比.py","file_name":"线程进程对比.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"822908","text":"import unittest\nfrom cffconvert import Citation\n\n\nclass CitationTestOrgRepoTreeBranch(unittest.TestCase):\n\n def test_retrieval_via_branchname(self):\n # https://github.com///tree/\n # this test checks if cffconvert can behave similar to a simple curl or wget\n url = \"https://github.com/citation-file-format/cff-converter-python/tree/master\"\n citation = Citation(url=url)\n self.assertIsNotNone(citation)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"livetest/05/CitationTestOrgRepoTreeBranch05.py","file_name":"CitationTestOrgRepoTreeBranch05.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128049758","text":"import datetime\n\nimport form as form\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom .models import Post, Comment\n\n\ndef hello(request):\n return HttpResponse('Buna ziua!')\n\n\ndef home(request):\n context = {\n 'nr': 1234,\n }\n return render(request, 'home.html', context=context)\n\n\nclass TabeleView(View):\n @classmethod\n def get(cls, request):\n posts = Post.objects.all().order_by('-data_p')\n\n context = {\n 'posts': posts,\n }\n\n return render(request, 'tabele.html', context=context)\n\n @classmethod\n def post(cls, request):\n if request.POST['tipform'] == 'post':\n\n Post(\n titlu=request.POST['titlu'],\n data_p=datetime.datetime.now(),\n autor=request.POST['autor'],\n continut=request.POST['continut'],\n ).save()\n\n elif request.POST['tipform'] == 'comment':\n\n post = Post.objects.get(id=request.POST['post'])\n\n Comment(\n post=post,\n nume_cont=request.POST['nume_cont'],\n continut=request.POST['continut_com'],\n data_ora=datetime.datetime.now(),\n ).save()\n\n return HttpResponseRedirect(reverse('my_app:tabele'))\n\n","sub_path":"tema3/tema3/app_tema/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396399699","text":"#!/usr/bin/python\n# coding=utf-8\n\nimport os\nimport time\nfrom time import sleep\n\nimport unittest\n\nimport HTMLTestRunner\nfrom appium import webdriver\n\n# Returns abs path relative to this file and not cwd\nfrom appium.webdriver.common.touch_action import TouchAction\n\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\n\nclass HighingAndroidTests(unittest.TestCase):\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '6.0'\n desired_caps['deviceName'] = 'Nexus 5'\n desired_caps['appPackage'] = 'cn.highing.hichat' # 被测App的包名\n desired_caps['appActivity'] = 'cn.highing.hichat.ui.SplashActivity' # 启动时的Activity\n # desired_caps['app'] = PATH('D:/highing.apk')\n\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\n\n def tearDown(self):\n # end the session\n self.driver.quit()\n\n def test_Swipe_Login(self):\n sleep(10)\n if self.driver.current_activity == \".ui.GuideActivity\":\n self.driver.implicitly_wait(10)\n try:\n # 划过引导页\n self.driver.swipe(1000, 960, 80, 960, 500)\n self.driver.swipe(1000, 960, 80, 960, 500)\n self.driver.implicitly_wait(10)\n self.driver.swipe(1000, 960, 80, 960, 500)\n self.driver.implicitly_wait(10)\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_guide_enter')\n el.click()\n self.driver.implicitly_wait(10)\n except:\n return\n\n # 登录\n textfields = self.driver.find_elements_by_class_name(\"android.widget.EditText\")\n textfields[0].send_keys(\"18116137476\")\n textfields[1].send_keys(\"123456\")\n\n self.driver.hide_keyboard()\n el = self.driver.find_element_by_id('cn.highing.hichat:id/btn_login')\n el.click()\n\n # 隐性等待/如果一个无素没有出现都会默认等待你所设定的时间,直到超时或者元素出现\n self.driver.implicitly_wait(15)\n el = self.driver.find_element_by_xpath('//android.widget.TextView[contains(@text, \"每日频道\")]')\n el.click()\n self.driver.implicitly_wait(10)\n try:\n # 过教学页\n el = self.driver.find_element_by_xpath(\n '//android.widget.FrameLayout/android.widget.LinearLayout/android.widget.ImageView[contains(@text, \"\")]')\n el.click()\n except:\n return\n\n try:\n # 发文字帖\n el_1 = self.driver.find_element_by_id('cn.highing.hichat:id/topic_text_send')\n el_1.click()\n\n try:\n # 过教学页\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_comment_tips')\n el.click()\n sleep(1)\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_gps_tips')\n el.click()\n sleep(1)\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_comment_tips')\n el.click()\n sleep(1)\n except:\n return\n\n # 发文字帖\n el = self.driver.find_element_by_id('cn.highing.hichat:id/content_text')\n el.click()\n\n textfield = self.driver.find_element_by_class_name(\"android.widget.EditText\")\n textfield.send_keys(\"text_test_0001\")\n\n el = self.driver.find_element_by_id('cn.highing.hichat:id/header_btn_right')\n el.click()\n\n except:\n try:\n # 发图片帖\n el_2 = self.driver.find_element_by_id('cn.highing.hichat:id/topic_img_send')\n el_2.click()\n self.driver.implicitly_wait(10)\n\n # 使用xpath无法解决问题,无法定位到元素\n # self.driver.find_elements_by_xpath(\n # '//android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.GridView/android.widget.RelativeLayout[2]/android.widget.LinearLayout[contains(@index, \"1\")]').click()\n # self.driver.find_elements_by_xpath(\n # '//android.widget.LinearLayout/android.widget.RelativeLayout/android.widget.GridView/android.widget.RelativeLayout[3]/android.widget.LinearLayout[contains(@index, \"1\")]').click()\n\n # 使用class_name的list,顺利解决问题\n checkboxes = self.driver.find_elements_by_class_name('android.widget.CheckBox')\n checkboxes[0].click()\n checkboxes[1].click()\n\n # 使用坐标点点��也能解决问题,但是无法兼容,局限性大\n # self.driver.swipe(630, 320, 630, 320, 500)\n # self.driver.implicitly_wait(10)\n # self.driver.swipe(1000, 320, 1000, 320, 500)\n # self.driver.implicitly_wait(10)\n\n el = self.driver.find_element_by_id('cn.highing.hichat:id/header_layout_rightview_container')\n el.click()\n # self.driver.implicitly_wait(10)\n\n try:\n # 过教学页\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_gps_tips')\n el.click()\n sleep(1)\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_comment_tips')\n el.click()\n sleep(1)\n except:\n return\n\n textfield = self.driver.find_element_by_class_name(\"android.widget.EditText\")\n textfield.send_keys(\"image_test_0001\")\n\n el = self.driver.find_element_by_id('cn.highing.hichat:id/header_btn_right')\n el.click()\n self.driver.implicitly_wait(10)\n\n except:\n # 发语音帖\n action1 = TouchAction(self.driver)\n el_3 = self.driver.find_element_by_id('cn.highing.hichat:id/topic_voice_send')\n action1.long_press(el_3).wait(10000).perform()\n self.driver.implicitly_wait(10)\n\n try:\n # 过教学页\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_gps_tips')\n el.click()\n sleep(1)\n el = self.driver.find_element_by_id('cn.highing.hichat:id/iv_comment_tips')\n el.click()\n sleep(1)\n except:\n return\n\n textfield = self.driver.find_element_by_class_name(\"android.widget.EditText\")\n textfield.send_keys(\"voice_test_0001\")\n\n el = self.driver.find_element_by_id('cn.highing.hichat:id/header_btn_right')\n el.click()\n self.driver.implicitly_wait(10)\n self.driver.tap()\n\n\nif __name__ == '__main__':\n # suite = unittest.TestLoader().loadTestsFromTestCase(HighingAndroidTests)\n # unittest.TextTestRunner(verbosity=2).run(suite)\n\n\n suite = unittest.TestSuite()\n suite.addTest(HighingAndroidTests(\"test_Swipe_Login\"))\n\n timestr = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n filename = \"./result_\" + timestr + \".html\" # 定义个报告存放路径,支持相对路径。\n\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='Test Results',\n description='Test Reports') # 使用HTMLTestRunner配置参数,输出报告路径、报告标题、描述\n\n runner.run(suite) # 自动进行测试\n fp.close() # 测试报告关闭\n","sub_path":"examples/python/Highing_Start_Login.py","file_name":"Highing_Start_Login.py","file_ext":"py","file_size_in_byte":7986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"452974463","text":"\"\"\"This module offers facilities to initialize a WindowsInterpreter\nbased on libvirt automation.\n\n\"\"\"\n\nimport re\n\nfrom xml.etree import ElementTree\n\nimport libvirt\n\n\nfrom murphy.automation.control import LibvirtControl\nfrom murphy.automation.feedback import LibvirtFeedback\n# from murphy.model.scrapers.winapi import WinAPIScraper\nfrom murphy.model.scrapers.uiauto import WinUIAutomationScraper\nfrom murphy.model.interpreters.windows import WindowsInterpreter, Tolerance\n\n\ndef state_interpreter(\n domain_id: (int, str), scraper_port: int = 8000) -> WindowsInterpreter:\n \"\"\"Returns a WindowsInterpreter based on libvirt.\"\"\"\n domain = libvirt_domain(domain_id)\n address = domain_address(domain)\n vnc_server = domain_vnc_server(domain)\n\n control = LibvirtControl(vnc_server, domain_id)\n feedback = LibvirtFeedback(vnc_server, domain_id)\n scraper = WinUIAutomationScraper(address, scraper_port, full_scrape=True)\n tolerance = Tolerance(1.6, (0.35, 0.2, 0.18))\n # As the WinAPI scraper does not report toggled checkboxes,\n # a lower image comparison tolerance is recommended\n # scraper = WinAPIScraper(address, scraper_port)\n # tolerance = Tolerance(1.0, (0.20, 0.1, 0.18))\n\n interpreter = WindowsInterpreter(feedback, control, scraper)\n interpreter.tolerance = tolerance\n\n return interpreter\n\n\ndef domain_address(domain: libvirt.virDomain) -> str:\n interfaces = domain.interfaceAddresses(\n libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE)\n\n for iface in interfaces.values():\n if isinstance(iface, dict) and 'addrs' in iface:\n for address in iface['addrs']:\n if 'addr' in address:\n return address['addr']\n\n raise RuntimeError(\"No IP address found for the given domain\")\n\n\ndef domain_vnc_server(domain: libvirt.virDomain) -> str:\n etree = ElementTree.fromstring(domain.XMLDesc())\n\n vnc = etree.find('.//graphics[@type=\"vnc\"]')\n if vnc is None:\n raise RuntimeError(\"No VNC connection found for the given domain\")\n\n if 'socket' in vnc.keys():\n return vnc.get('socket')\n elif {'listen', 'port'} <= set(vnc.keys()):\n return '::'.join((vnc.get('listen'), vnc.get('port')))\n else:\n raise RuntimeError(\"No valid VNC connection found for the given domain\")\n\n\ndef libvirt_domain(domain_id: (int, str)) -> libvirt.virDomain:\n connection = libvirt.open(QEMU_URI)\n\n if isinstance(domain_id, int):\n return connection.lookupByID(domain_id)\n elif re.match(UUID_EXPR, domain_id):\n return connection.lookupByUUIDString(domain_id)\n else:\n return connection.lookupByName(domain_id)\n\n\nQEMU_URI = 'qemu:///system'\nUUID_EXPR = \"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-\" + \\\n \"[89aAbB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}\"\n","sub_path":"murphy/win_libvirt.py","file_name":"win_libvirt.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"593855955","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nhttp://peter-hoffmann.com/2010/extrinsic-visitor-pattern-python-inheritance.html\n\n*TL;DR80\nSeparates an algorithm from an object structure on which it operates.\n\"\"\"\n\n\nclass Node(object):\n def accept(self, visitor):\n visitor.visit(self)\n\nclass A(Node):\n pass\n\nclass B(Node):\n pass\n\n\nclass C(A, B):\n def accept(self, visitor):\n visitor.visit_C(self)\n\nclass Visitor(object):\n def visit(self, node, *args, **kwargs):\n meth = None\n for cls in node.__class__.__mro__:\n meth_name = 'visit_'+cls.__name__\n meth = getattr(self, meth_name, None)\n if meth:\n break\n else:\n return self.generic_visit(node, *args, **kwargs)\n return meth(node, *args, **kwargs)\n\n def generic_visit(self, node, *args, **kwargs):\n print('generic_visit ' + node.__class__.__name__)\n\nclass ConcreteVisitor1(Visitor):\n def visit_B(self, node, *args, **kwargs):\n print('ConcreteVisitor1 visit_B ' + node.__class__.__name__)\n \n def visit_C(self, node, *args, **kwargs):\n print('ConcreteVisitor1 visit_C ' + node.__class__.__name__)\n\nclass ConcreteVisitor2(Visitor):\n def visit_A(self, node, *args, **kwargs):\n print('ConcreteVisitor2 visit_A ' + node.__class__.__name__)\n \n def visit_C(self, node, *args, **kwargs):\n print('ConcreteVisitor2 visit_C ' + node.__class__.__name__)\n\na = A()\nb = B()\nc = C()\nvisitor1 = ConcreteVisitor1()\na.accept(visitor1)\nb.accept(visitor1)\nc.accept(visitor1)\n\nvisitor2 = ConcreteVisitor2()\na.accept(visitor2)\nb.accept(visitor2)\nc.accept(visitor2)\n\n### OUTPUT ###\n# generic_visit A\n# ConcreteVisitor1 visit_B B\n# ConcreteVisitor1 visit_C C\n# ConcreteVisitor2 visit_A A\n# generic_visit B\n# ConcreteVisitor2 visit_C C\n","sub_path":"behavioral/visitor_v2.py","file_name":"visitor_v2.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317928924","text":"#!/usr/bin/env python\n\n\"\"\"\n\nATTENTION: CHECK testing operation BEFORE USE!!\n\n=============================================\nΕφαρμογή για αποστολή ιατρικών ΑΠΥ στο MYDATA\n=============================================\n\nΥποστηρίζει:\n1) αποστολή ΑΠΥ\n2) κατέβασμα όλων των ΑΠΥ από MyDATA\n2) αναζήτηση με βάση ΑΑ, ημερoμηνία, όνομα/αιτία\n4) ακύρωση ΑΠΥ βάση ΜΑΡΚ\n5) εκτύπωση ΑΠΥ βάση ΜΑΡΚ ή βάση των entries (offline) \n\nInvoice columns: Υποκ, ΑΑ, Ημ/νία, Ποσό, Πληρωμή, Σχόλιο, ΜΑΡΚ\nComment = Patient Name - Address - Visit reason\n\nHardcoded User info:\n1) testing operation, CHECK IT BEFORE USE!!\n2) Username and Keys\n3) Branches (όπως φαίνονται στο Taxis)\n4) Paymethods\n\nIn Windows:\n1) compatible Python version = 3.4.3\n2) only Print offline works\n \n\"\"\"\n\nimport os\nfrom datetime import datetime\nimport xml.etree.ElementTree as ET\nimport webbrowser\nimport tkinter as tk\nimport http.client, urllib.request, urllib.parse, urllib.error, base64\n\n# =====================================================\n# Globals\n# =====================================================\n\n# ===== ATTENTION\nisTesting = 1\n# ===== ATTENTION\n\n# MYDATA credentials\nUSER = 'user'\nif isTesting:\n KEY = 'key'\n BASE_URL = 'mydata-dev.azure-api.net'\n BASE_EXT = ''\nelse:\n KEY = 'key'\n BASE_URL = 'mydatapi.aade.gr'\n BASE_EXT = '/myDATA'\nheaders = {\n 'aade-user-id': USER,\n 'Ocp-Apim-Subscription-Key': KEY,\n }\n# MYDATA XML namespace\nNS = '{http://www.aade.gr/myDATA/invoice/v1.0}'\n\n# Υποκαταστήματα με κωδικό από TAXIS (preffered key is City name)\nBRANCHES = {\n 'ΑΘΗΝΑ':\n ['1', 'ΔΙΕΥΘΥΝΣΗ 1, ΤΗΛΕΦΩΝΟ 1'],\n 'ΘΕΣΣΑΛΟΝΙΚΗ':\n ['2', 'ΔΙΕΥΘΥΝΣΗ 2, ΤΗΛΕΦΩΝΟ 2']\n }\n\n# Paymethods (Missing: Διεθνές POS)\nPAYMETHODS = {\n 'Μετρητά': '3',\n 'POS': '1'\n }\n\n# Globals\nINVOICES = []\nINVOICE_HEAD = 'ΥΠΟΚ; ΑΑ; ΗΜ/ΝΙΑ; ΠΟΣΟ; ΠΛΗΡ; ΟΝ/ΜΟ-ΑΙΤΙΑ; ΜΑΡΚ'\nMAIN_SERVICE = 'ΣΥΝΤΑΓΟΓΡΑΦΗΣΗ'\nD = ';' # line delimiter\n\n\ndef SendInvoice():\n ''' SENDS an invoice to MYDATA '''\n\n # Get needed entries\n branch = BRANCHES[branchOmVar.get()][0]\n aa = entry_aa.get()\n amount = entry_amount.get()\n date = entry_date.get()\n paymethod = PAYMETHODS[paymethodOmVar.get()]\n comment = (\n entry_patname.get() + '-' +\n entry_pataddr.get() + '-' +\n entry_patvisit.get()\n ).replace(';', '')\n\n payload_xml = \"\"\"\n\n\n\n \n 062725970\n GR\n %d\n \n \n A\n %d\n %s\n 11.2\n EUR\n \n \n \n %d\n %s\n %s\n \n \n \n 1\n %s\n 7\n 0\n 7\n \n E3_561_003\n category1_3\n %s\n 1\n \n \n \n %s\n 0\n 0.00\n 0.00\n 0.00\n 0.00\n 0.00\n %s\n \n E3_561_003\n category1_3\n %s\n \n \n\n\n \"\"\" % (\n int(branch),\n int(aa),\n date,\n int(paymethod),\n amount,\n comment,\n amount, amount, amount, amount, amount # ...a mountain in in the valley...\n )\n\n # urllib accepts only bytes\n payload_xml = payload_xml.encode('utf-8')\n \n conn = http.client.HTTPSConnection(BASE_URL)\n conn.request(\"POST\", BASE_EXT + \"/SendInvoices\", payload_xml, headers)\n response = conn.getresponse().read().decode('utf-8')\n if (response.startswith(' 0, ie. all)\n and calls: SearchInvoices() + AdjustForBranch() \n '''\n\n params = urllib.parse.urlencode({'mark': '0'})\n conn = http.client.HTTPSConnection(BASE_URL)\n conn.request(\"GET\", BASE_EXT + \"/RequestTransmittedDocs?%s\" % params, \"\", headers)\n response = conn.getresponse().read().decode('utf-8')\n\n if (response.startswith(' City works provided the BRANCHES are numbered 1,2... \n invoice_info[0] = list(BRANCHES.keys())[int(invoice_info[0])-1]\n patient_info = invoice_info[5].split('-')\n\n if not ('invoice_info' in locals()):\n ShowNotification(\"Ο ΜΑΡΚ δεν βρέθηκε\", [800, H_UPP+20])\n return\n \n entry_mark.delete(0, 'end')\n\n else: # 'offline':\n invoice_info = [\n branchOmVar.get(),\n entry_aa.get(),\n entry_date.get(),\n entry_amount.get()\n ]\n patient_info = [entry_patname.get(), entry_pataddr.get(), entry_patvisit.get()]\n\n # empty Name and Address entries\n if len(patient_info) == 2:\n patient_info[2] = MAIN_SERVICE\n # empty Visit entry\n if patient_info[2] == '':\n patient_info[2] = MAIN_SERVICE\n\n apy_html = \"\"\"\n\n\n \n \n ΑΠΥ Ιατρικών Υπηρεσιών\n \n \n\n \n
\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n
\n \n \n \n \n \n
\n ΙΑΤΡΟΣ ΙΑΤΡΟΠΟΥΛΟΣ
\n Ιατρικές Υπηρεσίες
\n ΑΦΜ: ΧΧΧ, ΔΟΥ: ΥΥΥ\n
\n Απόδειξη Λιανικών Συναλλαγών (ΑΠΥ)
\n Αριθμός: %s
\n Ημερομηνία: %s\n
\n
\n \n \n \n \n \n
\n Διεύθυνση:
\n %s\n
\n ΠΑΡΑΛΗΠΤΗΣ:
\n %s
\n %s\n
\n
ΥπηρεσίαΑξία (Ευρώ)
%s
\n (χωρίς ΦΠΑ, άρθρο 22 Κώδικα)\n
%s\n
ΠΑΡΑΛΑΒΗ         ΕΚΔΟΣΗ      
\n
\n \n\n \"\"\" % (\n invoice_info[1],\n invoice_info[2], \n BRANCHES[invoice_info[0]][1],\n patient_info[0],\n patient_info[1],\n patient_info[2],\n invoice_info[3]\n )\n\n file_html = os.path.abspath('Zapy2print.html')\n with open(file_html, 'w', encoding='utf-8') as f: # enc... fixes Windows charmap bug \n f.write(apy_html)\n webbrowser.open('file://' + file_html)\n\n\n\n# SMALL METHODS\n\ndef setMark():\n ''' Adds mark to mark entry for the selected invoice '''\n\n entry_mark.delete(0,'end')\n entry_mark.insert(0, invoiceOmVar.get().split(D)[6].strip())\n\n\ndef setRange():\n ''' Sets default or validates given range '''\n \n range_until = entry_until.get()\n range_from = entry_from.get()\n\n # AA range\n if searchtermOmVar.get()[0] == '1':\n if not range_until.isnumeric():\n entry_until.delete(0, 'end')\n entry_until.insert(0, \"10\")\n if not range_from.isnumeric():\n entry_from.delete(0, 'end')\n entry_from.insert(0, \"0\")\n\n # Date range\n elif searchtermOmVar.get()[0] == '2':\n if '-' not in range_until:\n range_until = datetime.today().strftime('%Y-%m-%d')\n entry_until.delete(0, 'end')\n entry_until.insert(0, range_until)\n if range_from == '':\n range_from = range_until\n entry_from.delete(0, 'end')\n entry_from.insert(0, range_from)\n\n # Name/Visit range not changed\n else:\n pass\n\n\ndef AdjustForBranch():\n ''' Increments AA and updates city in Address entry based on selected Branch '''\n\n max_aa = 0\n for l in INVOICES:\n if ( l.split(D)[0] == BRANCHES[branchOmVar.get()][0] and\n int(l.split(D)[1]) > max_aa ):\n max_aa = int(l.split(D)[1])\n\n entry_aa.delete(0, 'end')\n entry_aa.insert(0, str(max_aa+1))\n entry_pataddr.delete(0, 'end')\n entry_pataddr.insert(0, branchOmVar.get())\n\n\ndef ShowNotification(content, place):\n ''' Present a temporary notification '''\n\n label_result = tk.Label(root, text=content, font=(SMALL_FONT))\n label_result.after(NOTIF_DUR, lambda: label_result.destroy())\n canvas.create_window(place[0], place[1], window=label_result)\n\n\n\n#===========================\n# GUI: Main Window\n#===========================\n\nNOTIF_DUR = 3000 # msec\nDEF_COLOUR = 'lavender'\nBIG_FONT = 'helvetica', 14\nMID_FONT = 'helvetica', 10\nSMALL_FONT = 'helvetica', 9, 'bold'\nH_TIT = 40\nH_UPP = 80\nH_MID = 120\nH_LOW = 160\nH_BUTT = 230 # no pun intented with 'tit' and 'butt'\n\nroot = tk.Tk()\nroot.title(\"ΕΦΑΡΜΟΓΗ MYDATA ΓΙΑ ΙΑΤΡΟΥΣ - TESTING\" if isTesting == 1\n else \"ΕΦΑΡΜΟΓΗ MYDATA ΓΙΑ ΙΑΤΡΟΥΣ - PRODUCTION\") \ncanvas = tk.Canvas(root, bg=DEF_COLOUR, width = 900, height = 300)\ncanvas.pack()\nroot.resizable(False, False)\nroot.option_add(\"*font\", MID_FONT)\n\n\n#================\n# GUI: Αποστολή ΑΠΥ\n#================\n\ncanvas.create_window(100, H_TIT, window=tk.Label(root,\n text=\"Αποστολή ΑΠΥ\", bg=DEF_COLOUR, font=BIG_FONT))\n \n# branches\nbranchOmVar = tk.StringVar()\nbranchOmVar.set(list(BRANCHES.keys())[0])\nom_branch = tk.OptionMenu(root, branchOmVar, *list(BRANCHES.keys()),\n command=lambda _: AdjustForBranch())\ncanvas.create_window(240, H_TIT, window=om_branch, width = 120)\n\n# AA\ncanvas.create_window(90, H_UPP, window=tk.Label(root,\n text=\"AA\", bg=DEF_COLOUR, font=MID_FONT))\nentry_aa = tk.Entry(root)\ncanvas.create_window(130, H_UPP, window=entry_aa, width=50)\n\n# date\ncanvas.create_window(190, H_UPP, window=tk.Label(root,\n text=\"ΗΜ\", bg=DEF_COLOUR, font=MID_FONT))\nentry_date = tk.Entry(root)\ncanvas.create_window(250, H_UPP, window=entry_date, width=90)\n\n# amount\ncanvas.create_window(90, H_MID, window=tk.Label(root,\n text=\"EΥ\", bg=DEF_COLOUR, font=MID_FONT))\nentry_amount = tk.Entry(root)\ncanvas.create_window(130, H_MID, window=entry_amount, width=50)\n\n# paymethods\ncanvas.create_window(190, H_MID, window=tk.Label(root,\n text=\"ΠΛ\", bg=DEF_COLOUR, font=MID_FONT))\npaymethodOmVar = tk.StringVar()\npaymethodOmVar.set(list(PAYMETHODS.keys())[0])\nom_paymethod = tk.OptionMenu(root, paymethodOmVar, *list(PAYMETHODS.keys()))\ncanvas.create_window(250, H_MID, window=om_paymethod, width = 90)\n\n# patient data (comments)\ncanvas.create_window(50, H_LOW, window=tk.Label(root,\n text=\"Ον/μο\", bg=DEF_COLOUR, font=MID_FONT))\nentry_patname = tk.Entry(root)\ncanvas.create_window(190, H_LOW, window=entry_patname, width=220)\n\ncanvas.create_window(50, H_LOW+30, window=tk.Label(root,\n text=\"Διευθ\", bg=DEF_COLOUR, font=MID_FONT))\nentry_pataddr = tk.Entry(root)\ncanvas.create_window(190, H_LOW+30, window=entry_pataddr, width=220)\n\ncanvas.create_window(50, H_LOW+60, window=tk.Label(root,\n text=\"Αιτία\", bg=DEF_COLOUR, font=MID_FONT))\nentry_patvisit = tk.Entry(root)\ncanvas.create_window(190, H_LOW+60, window=entry_patvisit, width=220)\n\n# Send button\nbutton_Send = tk.Button(text=\"Αποστολή\", command=SendInvoice,\n bg='brown', fg='white', font=(MID_FONT))\ncanvas.create_window(150, H_BUTT+30, window=button_Send)\n\n\n#====================\n# GUI: Αναζήτηση ΑΠΥ\n#====================\n\ncanvas.create_window(520, H_TIT, window=tk.Label(root,\n text=\"Αναζήτηση ΑΠΥ\", bg=DEF_COLOUR, font=BIG_FONT))\n\n# search terms (X = index in columns) \nsearchTerms = [\n \"1. Αριθμοί ΑΠΥ [Από] [Έως]\"\n , \"2. Ημερομηνία [Από] [Έως]\"\n , \"5. Στοιχεία [Όνομα ή Αιτία]\"\n ] \nsearchtermOmVar = tk.StringVar()\nsearchtermOmVar.set(searchTerms[1])\nom_searchterm = tk.OptionMenu(root, searchtermOmVar, *searchTerms,\n command=lambda _: setRange())\ncanvas.create_window(520, H_UPP, window=om_searchterm, width = 210)\n\n# range\nentry_from = tk.Entry(root)\ncanvas.create_window(460, H_MID, window=entry_from, width = 90)\nentry_until = tk.Entry(root)\ncanvas.create_window(580, H_MID, window=entry_until, width = 90)\n\n# invoices (the rest is implemented in SearchInvoices)\ninvoiceOmVar = tk.StringVar()\ninvoiceOmVar.set(INVOICE_HEAD)\n\n# Search button\nbutton_Request = tk.Button(text='Αναζήτηση',\n command=SearchInvoices,\n bg='brown', fg='white', font=(MID_FONT))\ncanvas.create_window(450, H_BUTT, window=button_Request)\n\n# Sync button\nbutton_RequestTransmittedDocs = tk.Button(text='Sync MyDATA',\n command=RequestTransmittedDocs)\ncanvas.create_window(580, H_BUTT, window=button_RequestTransmittedDocs)\n\n#=====================\n# GUI: Διαχείριση ΑΠΥ\n#=====================\n\ncanvas.create_window(800, H_TIT, window=tk.Label(root,\n text=\"Διαχείριση ΑΠΥ\", bg=DEF_COLOUR, font=BIG_FONT))\n\n# mark\nentry_mark = tk.Entry(root)\ncanvas.create_window(800, H_UPP, window=entry_mark, width = 120)\n\n# Cancel button\nbutton_Cancel = tk.Button(text='Ακύρωση',\n command=CancelInvoice,\n bg='brown', fg='white', font=(MID_FONT))\ncanvas.create_window(800, H_MID+10, window=button_Cancel)\n\n# Print button\nbutton_Print = tk.Button(text='Εκτύπωση',\n command=lambda: PrintInvoice('online'),\n bg='brown', fg='white', font=(MID_FONT))\ncanvas.create_window(800, H_LOW+20, window=button_Print)\n\n# Print offline button\nbutton_Print_offline = tk.Button(text='Εκτύπωση Offline',\n command=lambda: PrintInvoice('offline'),\n bg='brown', fg='white', font=(MID_FONT))\ncanvas.create_window(800, H_BUTT , window=button_Print_offline)\n\n#=====================\n# GUI: Initialise\n#=====================\n\nentry_amount.insert(0, '5.00')\nentry_date.insert(0, datetime.today().strftime('%Y-%m-%d'))\nentry_pataddr.insert(0, branchOmVar.get())\nentry_patvisit.insert(0, MAIN_SERVICE)\nif os.name != 'nt': # well, Windows ...\n RequestTransmittedDocs()\n\n# BAM!!!\nroot.mainloop()\n","sub_path":"myDATA.py","file_name":"myDATA.py","file_ext":"py","file_size_in_byte":23010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"515345766","text":"import re\n\n\nclass Calculator(object):\n def add(self, numbers: str) -> int:\n \"\"\"\n Method add many number\n :str numbers: string with number, all number must be separated by a comma\n \"\"\"\n separatorString = self.__getSepartatorStr(numbers)\n spited_string = re.split(separatorString, numbers)\n instanceOfRegxLen = len(re.findall('\\n|,', numbers))\n numbers_array = [x for x in spited_string if x.isdigit()] # lambda expression\n\n if len(numbers_array) <= instanceOfRegxLen:\n raise ValueError\n\n self.__sum = 0\n for n in numbers_array:\n self.__sum += int(n)\n return self.__sum\n\n def __getSepartatorStr(self, numbers):\n separators = re.findall('//*[^\\w\\d]\\n', numbers)\n returnString = ''\n for separator in separators:\n returnString += separator.replace('//', '').replace('\\n', '') + '|'\n return returnString[:-1]\n\n def __init__(self):\n self.__sum = 0\n","sub_path":"Part1/Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249512500","text":"import os\nimport numpy as np\nimport cv2\n\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nfrom collections import deque\n\nclass MHIProcessor:\n '''\n Process MHI as inputs of Fall Detector model\n '''\n def __init__(self, dim=128, threshold=0.1, interval=2, duration=40, use_body_segmentation=True):\n # initialize MHI params\n self.index = 0\n self.dim = dim\n self.threshold = threshold\n self.interval = interval\n self.duration = duration\n self.decay = 1 / self.duration\n self.use_body_segmentation = use_body_segmentation\n self.masks = deque(maxlen=duration)\n #initialize frames\n \n def resize(self, img):\n if self.dim is not None:\n return cv2.resize(img,(self.dim, self.dim),\n interpolation=cv2.INTER_AREA)\n return img.copy()\n def process(self, frame_bgr):\n self.index += 1\n\n frame = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)\n\n if self.index == 1:\n self.prev_frame = self.resize(frame)\n self.prev_mhi = np.zeros_like(self.prev_frame)\n \n if self.index % self.interval == 0:\n frame = self.resize(frame)\n diff = cv2.absdiff(self.prev_frame, frame)\n \n binary = (diff >= (self.threshold * 255)).astype(np.uint8)\n mask = True \n if self.use_body_segmentation:\n from human_mask import get_body_mask\n mask = get_body_mask(cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB))\n if mask is None:\n binary.fill(0)\n else:\n mask = self.resize(mask.astype('u1'))\n binary = mask & binary\n mhi = binary + (binary == 0) * np.maximum(np.zeros_like(self.prev_mhi),\n (self.prev_mhi - self.decay))\n # update frames\n self.prev_frame = frame\n self.prev_mhi = mhi\n \n if self.index >= (self.duration * self.interval) and (mask is not None):\n img = cv2.normalize(mhi, None, 0.0, 255.0, cv2.NORM_MINMAX)\n return cv2.cvtColor(img.astype('u1'), cv2.COLOR_GRAY2BGR)\n \n return None\n\ndef create_MHI(images, preprocess=None, use_body_segmentation=True, **k):\n mhi_processor = MHIProcessor(use_body_segmentation=use_body_segmentation, **k)\n\n preprocessed = []\n for frame in tqdm(images):\n if isinstance(frame, str):\n frame = cv2.imread(frame)\n if preprocess is not None:\n frame = preprocess(frame)\n img = mhi_processor.process(frame)\n # frame_id = mhi_processor.index\n \n if img is not None:\n yield (frame, img)\n return preprocessed\n\n\n","sub_path":"preprocess_datasets/mhi.py","file_name":"mhi.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142873129","text":"from flask import request\nfrom flask_restful import Resource, abort, reqparse\n\nWISH_LIST = {\n \"wish1\": {\n \"wish\": \"Buy Macbook Pro\",\n \"done\": False\n },\n \"wish2\": {\n \"wish\": \"Buy Google Pixel 2\",\n \"done\": False\n }\n}\n\ndef abort_if_404(id):\n if id not in WISH_LIST:\n abort(404, message=\"Wish {} tidak ditemukan\".format(id))\n\nparser = reqparse.RequestParser()\nparser.add_argument('wish')\nparser.add_argument('done')\n\nclass Wish(Resource):\n def get(self, id):\n abort_if_404(id)\n return WISH_LIST[id]\n\n def put(self, id):\n abort_if_404(id)\n args = parser.parse_args()\n wish = {\n 'wish': args['wish'],\n 'done': args['done']\n }\n WISH_LIST[id] = wish\n return {id: WISH_LIST[id]}, 201\n\n def delete(self, id):\n abort_if_404(id)\n del WISH_LIST[id]\n return '', 204\n\nclass WishList(Resource):\n def get(self):\n return WISH_LIST\n\n def post(self):\n args = parser.parse_args()\n if WISH_LIST:\n id = int(max(WISH_LIST.keys()).lstrip('wish')) + 1\n else:\n id = int(1)\n id = 'wish{}'.format(id)\n WISH_LIST[id] = {\n 'wish': args['wish'],\n 'done': False\n }\n return {id: WISH_LIST[id]}, 201","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"201786998","text":"import itertools\n\ndef yesOrNo(List):\n Index=[]\n for i in range(len(List)):\n Index.append(i)\n for i in range (len(List)):\n combination=itertools.combinations(Index,i+1)\n combin=list(combination)\n for j in range(len(combin)):\n lis=List\n for k in range(i+1):\n lis[combin[j][k]]=-lis[combin[j][k]]\n if sum(lis)==0:\n return 'YES'\n return 'YES'\n \n \nn=int(input())\nList=[]\nfor i in range(n):\n List.append(int(input()))\nprint(yesOrNo(List))","sub_path":"Code/CodeRecords/2785/60837/274116.py","file_name":"274116.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231143094","text":"\nimport torch\nprint(\"PyTorch Version: \",torch.__version__)\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os,glob,shutil\nimport copy\nprint(\"PyTorch Version: \",torch.__version__)\n# print(\"Torchvision Version: \",torchvision.__version__)\nfrom PIL import Image\n\n\ndef main():\n\n test(\n data_dir=\"/home/ars/sda5/data/chaoyuan/datasets/classify_datasets/申请表字段有无签名/val/无\",\n out_dir=\"/home/ars/sda5/data/chaoyuan/datasets/classify_datasets/申请表字段有无签名/val_test_results\",\n model_path='model.pkl',\n input_size=224,\n classes_path='trained_models/handwriting/classes.txt'\n )\n\ndef test(\n data_dir,\n out_dir=None,\n model_path = 'model.pkl',\n input_size = 224,\n classes = None,\n classes_path=None\n):\n if classes_path:\n classes=open(classes_path).read().strip().split('\\n')\n model, device, classes, transform=init(model_path,classes,input_size)\n test_dir(data_dir,model,out_dir,device,classes,transform)\n\ndef init(model_path,classes,input_size):\n\n\n # imgs = [data_dir + '/' + i for i in os.listdir(data_dir)]\n ###########################\n model_name = 'resnet'\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(input_size),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n transform = data_transforms['val']\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n ###########################\n model = torch.load(model_path)\n model.to(device)\n model.eval()\n return model,device,classes,transform\n###########################\nos.system('killall display')\ndef mark_img(img,text):\n from PIL import Image,ImageDraw,ImageFont\n draw=ImageDraw.ImageDraw(img)\n font=ImageFont.truetype(font='arsocr/utils/msyh.ttf',size=16)\n draw.text((0,0),text=text,fill='red',font=font)\n return img\n\n\ndef test_dir(dir,model,out_dir,device,classes,transform):\n if not out_dir:\n out_dir=os.path.dirname(dir)+'/'+os.path.basename(dir)+'_test_results'\n if os.path.exists(out_dir):shutil.rmtree(out_dir)\n os.makedirs(out_dir)\n for i,f in enumerate(glob.glob(dir+'/*.jpg')):\n f2=out_dir+'/'+os.path.basename(f)\n img=load_img(f,transform)\n img=img.to(device)\n y=model(img)\n y = torch.argmax(y).cpu().int()\n y = int(y)\n cls=classes[y]\n img=Image.open(f)\n img=mark_img(img,cls)\n img.save(f2)\n print(i,f)\n\n\ndef load_img(f,transform):\n im = Image.open(f)\n im = transform(im).float()\n im = torch.tensor(im, requires_grad=False)\n im = im.unsqueeze(0)\n return im\n\ndef val_dir(data_dir,model,device,classes,transform):\n def iter_data():\n for cls in os.listdir(data_dir):\n cls_dir = data_dir + '/' + cls\n for f in glob.glob(cls_dir + '/*.jpg'):\n yield f, cls\n\n correct = 0\n total = 0\n for i, (f, cls) in enumerate(iter_data()):\n im=load_img(f,transform)\n im = im.to(device)\n y = model(im)\n y = torch.argmax(y).cpu().int()\n y = int(y)\n # print(y,len(classes))\n pred = classes[y]\n if pred == cls:\n correct += 1\n total += 1\n # print(cls==pred)\n print(i, f, y, pred, cls)\n\n accuracy = correct / total\n print('total: %s , accuracy : %s' % (total, accuracy))\n\nif __name__ == '__main__':\n # test_dir(data_dir,model)\n\n main()","sub_path":"build/lib/wpcv/models/resnet/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"285125033","text":"from django.contrib.auth.models import User\n# For merging user and profile forms\nfrom django.shortcuts import get_object_or_404\nfrom django.http import Http404\nfrom committees.models import Committee\nfrom .forms import ProfileSearchForm\nfrom .models import Profile, Skill, TermsOfService\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import UpdateView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.views.generic import CreateView, RedirectView\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\n\nclass ProfileListView(ListView):\n # Lister opp alle brukerprofiler med pagination\n model = Profile\n form_class = ProfileSearchForm\n paginate_by = 9\n template_name = \"userprofile/profile_list.html\"\n\n # Søkefunksjonalitet som filtrerer queryset\n def get_queryset(self):\n filter_val = self.request.GET.get('filter', '')\n committee_array = Committee.objects.values_list('name', flat=True)\n profiles = Profile.objects.filter(user__groups__name__in=list(committee_array), user__first_name__icontains=filter_val).order_by('user__first_name')\n return profiles\n\n def get_context_data(self, **kwargs):\n context = super(ProfileListView, self).get_context_data(**kwargs)\n context['filter'] = self.request.GET.get('filter', '')\n return context\n\nclass SelfProfileDetailView(DetailView):\n # Vis egen profil.\n # Endpointet her er /profile/\n template_name = \"userprofile/profile.html\"\n model = Profile\n\n def get_object(self):\n try:\n userprofile = self.request.user.profile\n return userprofile\n except AttributeError:\n raise Http404(\"Profile not found\")\n\nclass ProfileDetailView(DetailView):\n # Vis en spesifikk profil.\n # Endpointet her er /profile/\n template_name = \"userprofile/profile.html\"\n\n def get_object(self):\n # Get the user for the pk, then return the user profile\n pk = self.kwargs['pk']\n user = get_object_or_404(User, pk=pk)\n return get_object_or_404(Profile, pk=user.profile.pk)\n\nclass ProfileUpdateView(SuccessMessageMixin, UpdateView):\n # Klasse for å oppdatere brukerprofilen sin\n model = Profile\n fields = ['image', 'access_card', 'study', 'show_email', 'skills', 'social_discord', 'social_steam', 'social_battlenet', 'social_git', 'allergi_gluten', 'allergi_vegetar', 'allergi_vegan', 'allergi_annet', 'limit_social', 'phone_number']\n template_name = \"userprofile/edit_profile.html\"\n success_url = \"/profile\"\n success_message = \"Profilen er oppdatert.\"\n\n def get_object(self):\n try:\n userprofile = self.request.user.profile\n return userprofile\n except AttributeError:\n raise Http404(\"Profile not found\")\n\n\nclass TermsOfServiceView(DetailView):\n model = TermsOfService\n template_name = \"userprofile/tos_detail.html\"\n\nclass MostRecentTermsOfServiceView(RedirectView):\n\n # Redirect url without primary key to detail view of the latest TOS\n def get_redirect_url(self, *args, **kwargs):\n termsofservice = TermsOfService.objects.order_by('-pub_date').first()\n return reverse('tos-details', kwargs={'pk': termsofservice.id})\n\n\nclass TermsOfServiceCreateView(PermissionRequiredMixin, SuccessMessageMixin, CreateView):\n\n model = TermsOfService\n fields = ['text', 'pub_date']\n template_name = \"userprofile/create_tos.html\"\n permission_required = \"userprofile.add_termsofservice\"\n success_message = \"TOS er opprettet\"\n\n def get_success_url(self):\n\n # Redirect to detail view of newly created TOS\n return reverse('tos-details', kwargs={'pk': self.object.id})\n\n def get_initial(self):\n initial = super().get_initial()\n\n # Check if new TOS should be based on an old TOS\n if('pk' in self.kwargs):\n # Prepopulate new TOS with text from old TOS given in URL\n initial['text'] = TermsOfService.objects.get(id=self.kwargs['pk']).text\n\n return initial\n","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"367124084","text":"import random\nimport numpy as np\n\nclass Functions:\n\n\t@staticmethod\n\tdef random_chromosome_numpy(L_INDIVIDUAL):\t\t\t\t\n\t\tchromosome = [np.float] * L_INDIVIDUAL\n\t\tfor i in xrange(0,L_INDIVIDUAL):\n\t\t\tchromosome[i] = np.around(np.random.uniform(0,360),decimals=3)\n\t\treturn chromosome\n\n\t@staticmethod\n\tdef random_float_value():\n\t\treturn np.random.uniform(0,360)\n\n\t@staticmethod\t\n\tdef create_wheel(layer,previouslayer):\n\t\tif(previouslayer != None):\n\t\t\tpopulation_aux = layer.population + previouslayer.population\n\t\t\tn_individuals_aux = layer.n_individuals + previouslayer.n_individuals\n\t\telse:\n\t\t\tn_individuals_aux = layer.n_individuals\n\t\t\tpopulation_aux = layer.population\n\t\tpopulation_aux.sort()\n\t\tLwheel=n_individuals_aux*10\n\t\tmaxValue=max(population_aux)\n\t\tacc=0\n\t\tfor p in xrange(n_individuals_aux):\n\t\t\tacc+=maxValue.score - population_aux[p].score\n\t\tfraction=[]\n\t\tfor p in xrange(n_individuals_aux):\n\t\t\tfraction.append( float(maxValue.score - population_aux[p].score)/acc)\n\t\t\tif fraction[-1]<=1.0/Lwheel:\n\t\t\t\tfraction[-1]=1.0/Lwheel\t\n\t\tfraction[0]-=(sum(fraction)-1.0)/2\n\t\tfraction[1]-=(sum(fraction)-1.0)/2\n\t\twheel=[]\n\t\tpc=0\n\n\t\tfor f in fraction:\n\t\t\tNp=int(f*Lwheel)\n\t\t\tfor i in xrange(Np):\n\t\t\t\twheel.append(pc)\n\t\t\tpc+=1\n\t\treturn wheel\t","sub_path":"src/Functions/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"582479561","text":"# Copyright (c) 2016-2019 Renata Hodovan, Akos Kiss.\n#\n# Licensed under the BSD 3-Clause License\n# .\n# This file may not be copied, modified, or distributed except\n# according to those terms.\n\nfrom setuptools import setup, find_packages\n\n\ndef fuzzinator_version():\n def _version_scheme(version):\n return version.format_with('{tag}')\n\n def _local_scheme(version):\n if version.exact and not version.dirty:\n return ''\n parts = ['{distance}'.format(distance=version.distance)]\n if version.node:\n parts.append('{node}'.format(node=version.node))\n if version.dirty:\n parts.append('d{time:%Y%m%d}'.format(time=version.time))\n return '+{parts}'.format(parts='.'.join(parts))\n\n return { 'version_scheme': _version_scheme, 'local_scheme': _local_scheme }\n\n\nsetup(\n name='fuzzinator',\n packages=find_packages(),\n url='https://github.com/renatahodovan/fuzzinator',\n license='BSD',\n author='Renata Hodovan, Akos Kiss',\n author_email='hodovan@inf.u-szeged.hu, akiss@inf.u-szeged.hu',\n description='Fuzzinator Random Testing Framework',\n long_description=open('README.rst').read(),\n zip_safe=False,\n include_package_data=True,\n setup_requires=['setuptools_scm'],\n use_scm_version=fuzzinator_version,\n install_requires=[\n 'chardet',\n 'chevron',\n 'google-api-python-client',\n 'jinja2',\n 'keyring',\n 'keyrings.alt',\n 'markdown',\n 'oauth2client',\n 'pexpect',\n 'picire==19.3',\n 'picireny==19.3',\n 'psutil',\n 'PyGithub',\n 'pymongo',\n 'pyperclip',\n 'python-bugzilla',\n 'python-gitlab',\n 'rainbow_logging_handler',\n 'setuptools',\n 'tornado<6.0', # no Tornado 6 for Python < 3.5\n 'urwid',\n 'xson',\n ],\n extras_require={\n 'docs': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ]\n },\n entry_points={\n 'console_scripts': ['fuzzinator = fuzzinator.executor:execute']\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69784148","text":"# -*- coding: utf-8 -*-\r\n\r\n##################################\r\n# StabilityMap_n3.py\r\n##################################\r\n\r\n\r\n# analysis of three unidirectionally coupled tipping elements \r\n# for manuscript: \r\n# \"Emergence of cascading dynamics in interacting tipping elements of ecology and climate\"\r\n\r\n# three undirectionally coupled tipping elements given by \r\n# subsystem 0\r\n# dx0/dt = a_0*x0 - b_0*x0^3 + c_0\r\n# subsystem 1\r\n# dx1/dt = a_1*x1 - b_1*x1^3 + c_1 + d_1*x0\r\n# subsystem 2\r\n# dx2/dt = a_2*x2 - b_2*x2^3 + c_2 + d_2*x1\r\n\r\n# computing a (2d) matrix of stability maps showing the number of stable fixed points\r\n# depending on the control parameters of subsystem 1 and 2 \r\n# given a fixed value for the control patameter of subsystem 0\r\n# the position of the stability map withing the matrix is determined by the coupling strength of the tipping elements\r\n\r\n# script generates elements of Figure 7 in manuscript and Figure 2/3 in Supplementary Material \r\n# for parameter settings indicated below \r\n\r\n\r\n###############################################################################\r\n\r\n\r\n# Import packages \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\nfrom numpy import linalg as LA\r\nfrom matplotlib.colors import ListedColormap\r\nfrom numpy import roots\r\n\r\n\r\n# definition of coefficients \r\na_0 = 1.0\r\na_1 = 1.0\r\na_2 =1.0 \r\nb_0 =1.0\r\nb_1 = 1.0\r\nb_2 = 1.0\r\n\r\n# calulcation of intrinsic tipping points \r\nc_1_crit = 2*np.sqrt((a_1/3)**3/(b_1))\r\nc_2_crit = 2*np.sqrt((a_2/3)**3/(b_2))\r\n\r\n# control parameter arrays \r\nanz = 500 # 500 chosen for single stability map, 100 chosen for matrix of stability maps \r\nvalue_c1 = np.linspace(0.0, 0.8,anz) \r\nvalue_c2 = np.linspace(0.8,0.0,anz) \r\n\r\n# coupling stength arrays for subsystem 1 and 2 \r\nvalue_d1 = np.array([0.2]) # used: \r\n # for matrix of stability maps (Figure 7 in manuscript, Figure 2 in Supplementary Material): 0.0,0.2,0.3,0.5,0.7,0.9\r\n # for Figure 3 in Supplementary Material: 0.2\r\nvalue_d2 = np.array([0.2]) # used: \r\n # for matrix of stability maps (Figure 7 in manuscript, Figure 2 in Supplementary Material): 0.9,0.7,0.5,0.3,0.2,0.0\r\n # For Figure 3 in Supplementary Material: 0.2\r\n\r\n# fixed control parameter of subsystem 0\r\nc_0 = 0.4 # used: \r\n # for matrix of stability maps (Figure 7 in manuscript): 0.4\r\n # for matrix of stability maps (Figure 2 in Supplementary Material): 0.2\r\n # for Figure 3 in Supplementary material: 0.4\r\n\r\n\r\n###############################################################################\r\n# definition of functions \r\n###############################################################################\r\n\r\n \r\ndef roots_(*params):\r\n return roots(list(params)) \r\n\r\nroots3 = np.vectorize(roots_, signature = \"(),(),(),()->(n)\",otypes=[complex])\r\nroots9 = np.vectorize(roots_, signature = \"(),(),(),(),(),(),(),(),(),()->(n)\", otypes=[complex])\r\n \r\n# find equilibria via roots\r\ndef find_roots(a0 = 1,b0 = 1 , c0 = 0 , a1 = 1, b1 = 1,c1 = 0,d1 = 0, a2 = 1, b2 = 1, c2 = 0, d2 = 0):\r\n \r\n x0_roots_ = roots([-b0,0,a0,c0])\r\n x0 = []\r\n x1 = []\r\n x2_f = []\r\n x1_f = []\r\n x0_f = []\r\n for x0_root in x0_roots_:\r\n x1 += [roots([-b1,0,a1,c1 + d1 * x0_root])]\r\n x0 += [x0_root]*3\r\n \r\n x0e = np.round(np.array(x0).flatten(),decimals=5)\r\n x1e = np.round(np.array(x1).flatten(),decimals=5)\r\n for i in range(0,len(x1e)): \r\n x2_f += [roots([-b2,0,a2,c2 + d2 * x1e[i]])]\r\n x1_f += [x1e[i]]*3\r\n x0_f += [x0e[i]]*3\r\n \r\n return (np.round(np.array(x0_f).flatten(),decimals=5),np.round(np.array(x1_f).flatten(),decimals=5),np.round(np.array(x2_f).flatten(),decimals=5))\r\n\r\n# determine stability via eigenvalues of Jacobian \r\ndef eigenvalues(a,b,c):\r\n \r\n a00 = a_0 - 3.0*b_0*(a**2) \r\n a01 = 0 \r\n a02 = 0 \r\n a10 = d_1 \r\n a11 = a_1 - 3.0*b_1*(b**2) \r\n a12 = 0 \r\n a20 = 0 \r\n a21 = d_2 \r\n a22 = a_2 - 3.0*b_2*(c**2) \r\n\r\n A = np.array([[a00,a01,a02],[a10,a11,a12], [a20,a21,a22]]) \r\n eig = LA.eigvals(A) \r\n \r\n lambda1 = eig[0] \r\n lambda2 = eig[1] \r\n lambda3 = eig[2] \r\n \r\n return lambda1, lambda2, lambda3 \r\n\r\n \r\n \r\n################################################################################\r\n################################################################################\r\n\r\n\r\n# definitions for axis ticks \r\nmajor_xticks = np.linspace(value_c1[0], value_c1[anz-1],5) \r\nmajor_yticks = np.linspace(value_c2[anz-1], value_c2[0],5) \r\nya = 0\r\n\r\n# initialization of array nr_stable_all for number of stable fixed points\r\nnr_stable_all = np.zeros((value_c2.shape[0],value_c1.shape[0])) \r\n \r\n# initialization of counters \r\ncount_c1 = -1 \r\ncount_c2= -1 \r\ncount_loop = 0 \r\n\r\n# open figure\r\nfig = plt.figure(figsize = (10,10)) \r\n\r\n\r\n###############################################################################\r\n# determine number of stable equilibria \r\n###############################################################################\r\n\r\n\r\n# loop over coupling strength d2\r\nfor d_2 in value_d2: \r\n \r\n # loop over coupling strength d1\r\n for d_1 in value_d1: \r\n \r\n count_loop = count_loop+ 1 \r\n count_c1 = -1\r\n \r\n # loop over control parameter c1\r\n for c_1 in value_c1: \r\n \r\n count_c1 = count_c1+1\r\n count_c2 = -1 \r\n \r\n # loop over control parameter c2\r\n for c_2 in value_c2: \r\n \r\n count_c2 = count_c2+1 \r\n \r\n params = {\"c0\" : c_0, \"c1\" : c_1, \"c2\": c_2,\"d1\" : d_1,\"d2\" : d_2}\r\n\r\n # find equilibira for given combination of control parameters \r\n x0,x1,x2 = find_roots(**params) \r\n # determine stability \r\n l0 = np.zeros(len(x0),dtype = np.complex) +0j\r\n l1 = np.zeros(len(x0),dtype = np.complex) +0j\r\n l2 = np.zeros(len(x0),dtype = np.complex) +0j\r\n for i in range(0,len(x0)): \r\n l0[i],l1[i],l2[i] = eigenvalues(x0[i],x1[i],x2[i])\r\n \r\n # find real FP\r\n such_real = np.logical_and(np.logical_and(np.isreal(x0),np.isreal(x1)),np.isreal(x2))\r\n # find stable FP \r\n such_l = np.logical_and(np.logical_and(np.real(l0[such_real])<0,np.real(l1[such_real])<0),np.real(l2[such_real])<0)\r\n x0_stab = np.array(x0[such_real][such_l])\r\n x1_stab = np.array(x1[such_real][such_l]) \r\n x2_stab = np.array(x2[such_real][such_l]) \r\n \r\n # count number of stable FP and save them in result array \r\n nr_stable_all[count_c2,count_c1] = len(x0_stab)\r\n\r\n \r\n \r\n\r\n#################################################################\r\n# Plotten \r\n################################################################# \r\n\r\n # subplots \r\n ax = fig.add_subplot(value_d2.shape[0],value_d1.shape[0],count_loop) \r\n \r\n \r\n # definition of colormap \r\n cmap_s = np.array(['lightslategrey','lightgray','silver', 'darkgray', 'dimgray','lightgreen','skyblue','thistle','firebrick'])\r\n # choose color range depending on range of number of stable equilibria \r\n crange = np.arange(np.min(nr_stable_all),np.max(nr_stable_all)+1,1)\r\n cMap = ListedColormap(cmap_s[crange.astype(int)])\r\n \r\n # plot result array nr_stable_all \r\n plt.imshow(nr_stable_all, interpolation='nearest', cmap = cMap, extent = [value_c1[0],value_c1[anz-1],value_c2[anz-1],value_c2[0]], aspect='auto') # Plotten von nr_stable_all \r\n \r\n # add intrinsic tipping points\r\n plt.plot(np.zeros(len(value_c2))+c_2_crit,value_c2,'--', color = 'black',linewidth = 1) \r\n plt.plot(value_c2,np.zeros(len(value_c2))+c_2_crit,'--', color = 'black',linewidth = 1) \r\n \r\n\r\n # axis labels \r\n plt.xticks(())\r\n plt.yticks(())\r\n \r\n if count_loop > ((value_d2.shape[0] * value_d1.shape[0])-value_d1.shape[0]):\r\n plt.xticks(major_xticks, fontsize = 15) # 15\r\n plt.xlabel(r\"$c_2$\" \"\\n\" \"\\n\" r\"$d_{12} = %s$\"%d_1 , fontsize = 15 ) \r\n if count_loop == (1+(ya*value_d1.shape[0])):\r\n plt.yticks(major_yticks, fontsize = 15) \r\n plt.ylabel(r\"$d_{23} = %s$\" \"\\n\" r\"$c_3$\" %d_2, fontsize =15) \r\n ya = ya + 1 \r\n\r\n \r\n plt.gca().set_aspect('equal', adjustable='box')\r\n for spine in plt.gca().spines.values():\r\n spine.set_visible(False)\r\n \r\n\r\n\r\n\r\n # add arrows (for some specific plots)\r\n # ax.annotate(\"\", xy = (0.25, 0.25), xytext = (0.11, 0.11), arrowprops=dict(color = 'deeppink', width = 10, headwidth = 20, headlength = 20) )\r\n # ax.annotate(\"\", xy = (0.30, 0.11), xytext = (0.11, 0.11), arrowprops=dict(color = 'yellow', width = 10, headwidth = 20, headlength = 20) )\r\n # ax.annotate(\"\", xy = (0.11, 0.301), xytext = (0.11, 0.11), arrowprops=dict(color = 'yellow', width = 10, headwidth = 20, headlength = 20) )\r\n \r\n \r\n","sub_path":"StabilityMap_n3.py","file_name":"StabilityMap_n3.py","file_ext":"py","file_size_in_byte":10510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567447579","text":"import numpy as np\nimport pandas as pd\nimport csv\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy import stats\nimport seaborn\nimport statsmodels.formula.api as sm\nfrom statsmodels.api import add_constant\nimport sys, os\nfrom subprocess import Popen, PIPE\nimport pickle\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nimport subprocess\n\n# 1. AIME: the sum of 35 highest years of earnings\n# 2. 3 bend point calculations: given by each year. \n# 3. Maximum earnings to be considered for SS calculation\n# 4. Must have at least 10 years of earnings to qualify\n# 5. (I will fix the lifetime earnings vector, and incorporate the new regression model and maybe try random forests if that's okay with you)\n# 6. I should index past and future earnings by SS index vector\n'''This script calculates the Social Security Marginal Tax Rates for \nindividuals in the 2014 CPS. We use our regression to calculate future earnings \nhere for SS anypiab calculator to calculate future earnings after the year\n2014.\n\nRefer to SS_MTR_nofuture.py for a more detailed step-by-step documentation\n\nThe differences between the three SS_MTR files are found in the functions\nget_LE, and get_txt '''\n\n\n\ndef get_SS_MTR(YrsPstHS, Reg_YrsPstHS, age, wages, adjustment, bendpoints, max_earnings, CPI, boost_futurereg, earning,\\\n\t\tin1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in12, in13, race_1, race_4,race_5 ,race_6 ,race_8,race_14, \\\n\t\trace_21 ,race_22, a_sex, age_child, child):\n\t'''\n\tCreates the Lifetime Earnings vector with and without adjustment\n\n\tinputs: \n\t\tx: \t scalar, the number of post-secondary years of education.\n\t\tage: \t scalar, age of individual.\n\t\twages: vector, wage inflation rates since 1950. Used to adjust\n\t\t\t\t for wage inflation.\n\t\tadjustment: scalar, the amount that we adjust the year 2014 earnings\n\t\t\t\t\tto calculate MTRs.\n\toutputs:\n\n\t'''\n\tyears_worked = age - (17 + YrsPstHS)\n\n\tif years_worked < 0:\n\t\tyears_worked = 0\n\tyears_to_work = 65 - (17 + YrsPstHS) #maybe 64\n\n\tstart_yr = sim_year - years_worked\n\tend_yr = start_yr + years_to_work\n\n\t# ---- Creating index for pre-retirment earnings. We don't increase index before 2014 because we use 2014 earnings\n\tindex = wages.loc[wages['Year'] == end_yr - 6].index.values\n\twages = wages.loc[wages['Year'] == end_yr - 6, 'Avg_Wage'].values[0] / wages['Avg_Wage'].values\n\twages = wages[: index+1]\n\twages = np.append(wages, np.ones(6) * wages[-1])\n\n\t# -----Creating regression variables\n\texperience = np.arange(0, years_to_work + 1)\n\texperienceSquared = experience*experience\n\tones = np.ones(len(experience))\n\teduc_level = ones * Reg_YrsPstHS\n\tgender = ones * a_sex\n\tindustry1 = ones * in1\n\tindustry2 = ones * in2\n\tindustry3 = ones * in3\n\tindustry4 = ones * in4\n\tindustry5 = ones * in5\n\tindustry6 = ones * in6\n\tindustry7 = ones * in7\n\tindustry8 = ones * in8\n\tindustry9 = ones * in9\n\tindustry10 = ones * in10\n\tindustry12 = ones * in12\n\tindustry13 = ones * in13\n\trace1 = ones * race_1\n\trace4 = ones * race_4\n\trace5 = ones * race_5\n\trace6 = ones * race_6\n\trace8 = ones * race_8\n\trace14 = ones * race_14\n\trace21 = ones * race_21\n\trace22 = ones * race_22\n\n\tchild = np.ones(len(experience))\n\n\tif age_child < len(experience):\n\t child = np.concatenate((np.zeros(len(experience) - int(age_child)), (np.ones(int(age_child)))))\n\n\tif age_child == 99:\n\t child = np.zeros(len(experience))\n\n\tgender_child = a_sex * child\n\n\tLE = np.exp(\n\t ones * params[0] + educ_level * params[1] + experience * params[2] + experienceSquared * params[3]\n\t + gender * params[4] + child * params[5] + gender_child * params[6] + industry1 * params[7]\n\t + industry2 * params[8] + industry3 * params[9] + industry4 * params[10] + industry5 * params[11]\n\t + industry6 * params[12] + industry7 * params[13] + industry8 * params[14] + industry9 * params[15]\n\t + industry10 * params[16] + industry12 * params[17] + industry13 * params[18] + race1 * params[19]\n\t + race4 * params[20] + race5 * params[21] + race6 * params[22] + race8 * params[23] + race14 * params[24]\n\t + race21 * params[25] + race22 * params[26]).astype(int)\n\n\tif len(LE) == 0:\n\t\tpass\n\t\n\telse:\n\t\tLE = (LE * boost_futurereg[63-years_worked:64+(years_to_work - years_worked)]).astype(int)\n\t\tscale = earning / LE[years_worked]\n\t\tLE = LE * scale\n\t\tLE = (LE * wages[63-years_worked:64+(years_to_work - years_worked)]).astype(int)\n\t\n\tLE_adjusted = LE.copy()\n\t\n\tmax_earnings_use = max_earnings.loc[(max_earnings['Year'] >= start_yr) & (max_earnings['Year'] <= end_yr), 'Max_Earnings']\n\n\twithin_threshold = False\n\t# ---------Max earnings check--------------\n\tif LE[years_worked] > max_earnings.loc[max_earnings['Year'] == sim_year, 'Max_Earnings'].values - adjustment:\n\t\twithin_threshold = True\n\tif within_threshold == True: #If within max earnings threshold, make current earnings equal to max earnings\n\t\tLE_adjusted[years_worked] = max_earnings.loc[max_earnings['Year'] == sim_year, 'Max_Earnings'].values\n\telse:\n\t\tLE_adjusted[years_worked] += adjustment # Else, add the adjustment\n\t\n\tLE = np.where(LE > max_earnings_use, max_earnings_use, LE) #Correcting for max earnings threshold for all years\n\tLE_adjusted = np.where(LE_adjusted > max_earnings_use, max_earnings_use, LE_adjusted)\n\n\t# 10 year minimum contribution eligibility rule\n\t# if len(LE) < 10:\n\t# \treturn 0\n\n\t# Taking top 35 earnings years\n\ttop35 = np.argpartition(-LE, 35)\n\tresult_args = top35[:35]\n\ttop35 = np.partition(-LE, 35)\n\tLE = -top35[:35]\n\n\tif (np.sum(LE) / (35.* 12) - int(np.sum(LE) / (35.* 12))) >= .9999: # Correcting round-down errors from int(.)\n\t\tAIME_before = np.sum(LE) / (35.* 12)\n\telse: # Correcting round-down errors from int(.)\n\t\tAIME_before = int(np.sum(LE) / (35.* 12))\n\n\tPIA = 0\n\n\t# Bend points for year of retirement:\n\tbend_pt1 = bendpoints.loc[bendpoints['Year'] == end_yr - 4, 'Bend_pt1'].values[0]\n\tbend_pt2 = bendpoints.loc[bendpoints['Year'] == end_yr - 4, 'Bend_pt2'].values[0]\n\teffective_bendpt2 = bend_pt2 - bend_pt1\n\n\t#First bend-point\n\tif AIME_before <= 0:\n\t\tpass\n\telif (AIME_before > 0) & (AIME_before < bend_pt1):\n\t\tPIA += AIME_before * .9\n\t\tAIME_before = 0\n\telse :\n\t\tPIA += bend_pt1 * .9\n\t\tAIME_before -= bend_pt1\n\n\t# Second bend-point\n\tif AIME_before <= 0:\n\t\tpass\n\telif (AIME_before > 0) & (AIME_before < effective_bendpt2):\n\t\tPIA += AIME_before * .32\n\t\tAIME_before = 0\n\telse :\n\t\tPIA += effective_bendpt2 * .32\n\t\tAIME_before -= effective_bendpt2\n\n\t# Rest\n\tif AIME_before <= 0:\n\t\tpass\n\telse: \n\t\tPIA += AIME_before * .15\n\n\ttop35 = np.argpartition(-LE_adjusted, 35)\n\tresult_args = top35[:35]\n\ttop35 = np.partition(-LE_adjusted, 35)\n\tLE_adjusted = -top35[:35]\n\n\tif (np.sum(LE_adjusted) / (35.* 12) - int(np.sum(LE_adjusted) / (35.* 12))) >= .9999: # Correcting round-down errors from int(.)\n\t\tAIME_after = np.sum(LE_adjusted) / (35.* 12)\n\telse: # Correcting round-down errors from int(.)\n\t\tAIME_after = int(np.sum(LE_adjusted) / (35.* 12))\n\tPIA_after = 0\n\n\t#First bend-point\n\tif AIME_after <= 0:\n\t\tpass\n\telif (AIME_after > 0) & (AIME_after < bend_pt1):\n\t\tPIA_after += AIME_after * .9\n\t\tAIME_after = 0\n\telse :\n\t\tPIA_after += bend_pt1 * .9\n\t\tAIME_after -= bend_pt1\n\n\t# Second bend-point\n\tif AIME_after <= 0:\n\t\tpass\n\telif (AIME_after > 0) & (AIME_after < effective_bendpt2):\n\t\tPIA_after += AIME_after * .32\n\t\tAIME_after = 0\n\telse :\n\t\tPIA_after += effective_bendpt2 * .32\n\t\tAIME_after -= effective_bendpt2\n\n\t# Rest\n\tif AIME_after <= 0:\n\t\tpass\n\telse :\n\t\tPIA_after += AIME_after * .15\n\n\t# ------CPI adjustment for benefit--------\n\n\t# Adjusting for years after 62 years old (default retirement)\n\tadjust_from = end_yr - 4\n\tCPI_adjust = CPI.loc[(CPI['Year'] >= adjust_from) & (CPI['Year'] < adjust_from + 4), \"CPI\"].as_matrix()\n\tCPI_adjust_scale = np.prod(CPI_adjust)\n\n\t# Adjusting the benefit amount upon retirement (65) \n\n\tPIA *= CPI_adjust_scale\n\tPIA_after *= CPI_adjust_scale\n\n\t# Adjusting all benefit amounts after retirment year, up to death (78) for pre-adjustment PIA\n\tPIA_vec = np.ones(13) * CPI.loc[(CPI['Year'] >= end_yr) & (CPI['Year'] < end_yr + 13), 'CPI'].values\n\tPIA_vec[0] = PIA\n\tPIA_vec = np.cumprod(PIA_vec) * 12.\n\t#Rounding like in the calculator\n\t# PIA_vec = np.floor(PIA_vec * 10) / 10.\n\tPIA = np.sum(PIA_vec)\n\n\t# Adjusting all benefit amounts after retirment year, up to death (78) for post-adjustment PIA\n\tPIA_vec_after = np.ones(13) * CPI.loc[(CPI['Year'] >= end_yr) & (CPI['Year'] < end_yr + 13), 'CPI'].values\n\tPIA_vec_after[0] = PIA_after\n\tPIA_vec_after = np.cumprod(PIA_vec_after) * 12.\n\t#Rounding like in the calculator\n\t# PIA_vec_after = np.floor(PIA_vec_after * 10) / 10.\n\tPIA_after = np.sum(PIA_vec_after)\n\n\t# Taking different between pre- and post-adjustment PIA and dividing by adjustment for MTR\n\tSS_MTR = ((PIA_after - PIA) / adjustment)\n\tSS_MTR = np.floor(SS_MTR * 100) / 100.\n\tif SS_MTR < 0:\n\t\tSS_MTR = 0\n\n\treturn SS_MTR\n\n\ndef LE_reg(CPS, plot = False):\n\t'''\n\tUses a linear regression to approximate coefficient to Mincer's earnings equation \n\twhich approximates Lifetime Earnings \n\n\tMincers: ln(earnings) = beta_0 + beta_1 * education + beta_2 * work_experience + beta_3 * work_experience^2 \n\n\treturns: array, the fitted parameters of the regression.\n\t'''\n\t\n\tsample = CPS.copy()[\n\n\t (CPS['a_age'] > 16) & (CPS['a_age'] < 66) & (CPS['a_ftpt'] == 0) & (CPS['earned_income'] > 0)]\n\n\tearned_income = sample['earned_income']\n\tsample['const'] = 1.\n\tindep_vars = ['const', 'Reg_YrsPstHS', 'experience', 'experienceSquared', 'a_sex', 'child', 'a_sex_child',\\\n\t '1.0', '2.0', '3.0', '4.0', '5.0','6.0', '7.0', '8.0', '9.0', '10.0','12.0', '13.0',\n\t 'race_1.0' , 'race_4.0','race_5.0' ,'race_6.0' ,'race_8.0','race_14.0', 'race_21.0' ,'race_22.0']\n\n\tX = sample[indep_vars]\n\tmodel = sm.OLS(np.log(sample['earned_income']), X)\n\tresults = model.fit()\n\tparams = results.params\n\tif plot == True:\n\t\tx = np.linspace(0,np.max(CPS['earned_income']),15000)\n\t\ty = 0\n\t\tfor i in xrange(len(params)):\n\t\t\ty += sample[indep_vars[i]] * params[i]\n\t\t# Cross Validation:\n\t\tfig, ax = plt.subplots()\n\t\tplt.scatter( np.exp(y) , sample['earned_income'], label = 'earned_income vs. predicted earned_income')\n\t\tplt.plot(x, x, label = 'perfect fit', c = 'black', linewidth = 5)\n\t\tlegend = ax.legend(loc = \"upper right\", shadow = True, title = 'earned_income vs. predicted earned_income')\n\t\tplt.xlabel('Predicted earned_income')\n\t\tplt.ylabel('Actual earned_income Amount')\n\t\t# plt.ylim(0,7000)\n\t\tplt.title('Accuracy of Linear Regresssion When Predicting earned_income')\n\t\tplt.show()\n\treturn params\n\nsim_year = 2014\nadjustment = 500\n\nbendpoints = pd.read_csv('Bendpoints.csv', dtype = {\"Year\": np.int32, \"Bend_pt1\": np.int32, \"Bend_pt2\": np.int32})\nmax_earnings = pd.read_csv('Max_Earnings.csv', dtype = {\"Year\": np.int32, \"Max_Earnings\": np.float64})\nwages = pd.read_csv('averagewages.csv', dtype = {\"Year\": np.int32, \"Avg_Wage\": np.float64})\nCPI = pd.read_csv('CPI_Intermediate.csv', dtype = {\"Year\": np.int32, \"Max_Earnings\": np.float64})\n # Below makes it so the earnings calculated via the regression (in 2014 terms) for previous years \n\t# are indexed in 2014 terms later\nwages.loc[wages['Year'] < sim_year, 'Avg_Wage'] = wages.loc[wages['Year'] == sim_year, 'Avg_Wage'].values[0]\nboost_futurereg = wages['Avg_Wage'].values / wages['Avg_Wage'][wages['Year'] == sim_year].values[0] \n\nCPS = pd.read_csv('CPSRETS.csv')\nCPS = CPS[[ 'AGEH' ,'AGES' ,'WAS', 'WASS','BIL_HEAD' ,'BIL_SPOUSE', 'FIL_HEAD', 'FIL_SPOUSE',\\\n\t \t'HGA_HEAD', 'HGA_SPOUSE', 'FTPT_HEAD', 'FTPT_SPOUSE', 'FAMREL_HEAD','FAMREL_SPOUSE', \\\n\t \t'MJIND_SPOUSE', 'MJIND_HEAD','CPSSEQ', 'WT','h_seq', 'GENDER_HEAD', 'GENDER_SPOUSE', \\\n\t \t'RACE_HEAD', 'RACE_SPOUSE']].fillna(0)\n\nCPS['earned_income_spouse'] = CPS[['WASS','BIL_SPOUSE','FIL_SPOUSE']].sum(axis = 1)\nCPS['earned_income_head'] = CPS[['WAS','BIL_HEAD','FIL_HEAD']].sum(axis = 1)\nCPS_before = CPS.copy()\nCPS_spouse = CPS[['earned_income_spouse','AGES', 'HGA_SPOUSE', 'FTPT_SPOUSE', 'FAMREL_SPOUSE',\\\n\t 'MJIND_SPOUSE', 'CPSSEQ','WT', 'h_seq', 'GENDER_SPOUSE', 'RACE_SPOUSE']][CPS.AGES != 0].copy()\nCPS_spouse.columns = ['earned_income','a_age', 'a_hga', 'a_ftpt', 'a_famrel', 'a_mjind','CPSSEQ',\\\n\t'wt', 'fh_seq', 'a_sex', 'prdtrace']\nCPS_head = CPS[['earned_income_head','AGEH', 'HGA_HEAD', 'FTPT_HEAD', 'FAMREL_HEAD', 'MJIND_HEAD',\\\n\t 'CPSSEQ','WT', 'h_seq', 'GENDER_HEAD', \"RACE_HEAD\"]].copy()\nCPS_head.columns = ['earned_income','a_age', 'a_hga', 'a_ftpt', 'a_famrel', 'a_mjind','CPSSEQ','wt',\\\n\t 'fh_seq', 'a_sex', 'prdtrace']\nCPS = pd.concat([CPS_head, CPS_spouse], axis = 0).reset_index()\nCPS['a_age'] = CPS['a_age'].astype(int)\nCPS['a_mjind'] = CPS['a_mjind'].astype(str)\ndummies = pd.get_dummies(CPS['a_mjind'], drop_first=True)\nCPS = pd.concat([CPS, dummies], axis=1)\n\ndf = pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 5, 7, 11, 11], \\\nindex=[0,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46])\nCPS['Reg_YrsPstHS'] = CPS['a_hga'].map(df)\n\n# 0: Children\n# 31: Less than 1st grade\n# 32: 1st,2nd,3rd,or 4th grade\n# 33: 5th or 6th grade\n# 34: 7th and 8th grade\n# 35: 9th grade\n# 36: 10th grade \n# 37: 11th grade \n# 38: 12th grade no diploma\n# 39: High school graduate - high \n# 40: Some college but no degree \n# 41: associates degree - occupational/vocational training\n# 42: associates degree - academic program\n# 43: bachelor's degree\n# 44: master's degree\n# 45: professional degree\n# 46: doctorate\n\ndf = pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4, 7, 10, 10], \\\nindex=[0,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46])\nCPS['YrsPstHS'] = CPS['a_hga'].map(df)\nCPS_child = CPS[CPS['a_famrel'] == 1]\nCPS = CPS.join(CPS_child.groupby('fh_seq')['a_age'].max(), on='fh_seq', rsuffix='_child')\nCPS['a_age_child'] = CPS['a_age_child'].fillna(99)\nCPS['child'] = np.where(CPS['a_age_child'] == 99, 0, 1)\n\nCPS['experience'] = CPS['a_age'] - CPS['YrsPstHS'] - 17 \nCPS.loc[CPS['experience'] < 0, 'experience'] = 1\nCPS['experienceSquared'] = CPS['experience'] * CPS['experience']\nCPS.loc[:, 'prdtrace'] = 'race_' + CPS.loc[:, 'prdtrace'].astype(str)\ndummies = pd.get_dummies(CPS['prdtrace'], drop_first=False)\nCPS = pd.concat([CPS, dummies], axis=1)\n\nCPS['a_sex_child'] = CPS['a_sex'] * CPS['child']\n\nparams = LE_reg(CPS)\n\nCPS['SS_MTR'] = 0\nCPS_laborforce = CPS[(CPS['a_age'] >17) & (CPS['a_age'] < 66) & (CPS['a_ftpt'] == 0) & (CPS['earned_income'] > 0)]\n\nCPS_laborforce['SS_MTR'] = CPS_laborforce.apply(lambda x: get_SS_MTR(x['YrsPstHS'], x['Reg_YrsPstHS'], x['a_age'],\\\n\t wages, adjustment, bendpoints, max_earnings, CPI, boost_futurereg, x['earned_income'],\\\n\t x['1.0'] , x['2.0'],x['3.0'],x['4.0'],x['5.0'],x['6.0'],x['7.0'],x['8.0'],x['9.0'],x['10.0'],x['12.0'], \\\n\t x['13.0'], x['race_1.0'] , x['race_4.0'],x['race_5.0'] ,x['race_6.0'] ,x['race_8.0'],x['race_14.0'], \\\n\t x['race_21.0'] ,x['race_22.0'], x['a_sex'], x['a_age_child'], x['child']), axis=1)\n\n\nboth = pd.concat([CPS, CPS_laborforce[[\"SS_MTR\"]]], axis = 1).fillna(0)\nheads = both.iloc[:len(CPS_before['AGEH'])]\nspouses = both.iloc[len(CPS_before['AGEH']):]\nfinal = heads.merge(spouses,how = 'left', suffixes = ('_head', '_spouse'), on = 'CPSSEQ').fillna(0)\nfinal[['SS_MTR_head', 'SS_MTR_spouse', 'earned_income_head' , 'earned_income_spouse', 'CPSSEQ']].to_csv('SS_MTR_FutureReg_RETS_age_fixed.csv', index = None)\n\n","sub_path":"SS/MTR/CPS_RETS/SS_MTR_FutureReg.py","file_name":"SS_MTR_FutureReg.py","file_ext":"py","file_size_in_byte":15187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496648851","text":"\"\"\" String replace for column values. \"\"\"\nimport csv\nimport getopt\nimport sys\nimport re\n\nimport rill\n\n\n@rill.component\n@rill.inport('INFILE')\n@rill.inport('OUTFILE_IN')\n@rill.inport('TO_REPLACE')\n@rill.inport('WITH_REPLACE')\n@rill.outport('OUTFILE_OUT')\ndef replace_text(INFILE, OUTFILE_IN, TO_REPLACE, WITH_REPLACE, OUTFILE_OUT):\n \"\"\"\n Replace text within field with new text.\n :param ggd361_csv: input CSV file\n :param out_file: output CSV file\n :param to_replace: substring within field to be replaced\n :param with_replace: substring to replace to_replace substring\n \"\"\"\n for infile, outfile, to_replace, with_replace in \\\n zip(INFILE.iter_contents(), OUTFILE_IN.iter_contents(),\n THRESHOLD.iter_contents(), VALUE.iter_contents()):\n with open(infile) as _in, open(outfile, 'w') as _out:\n data = csv.reader(_in)\n output = csv.writer(_out)\n for row in data:\n for i, item in enumerate(row):\n field = item.replace(\"'\", \"\")\n new_field = re.sub(to_replace, with_replace, field)\n row[i] = \"'{0}'\".format(new_field)\n output.writerow(row)\n\n # ofile = open(out_file, 'w')\n # writer = csv.writer(ofile, delimiter=',', quoting=csv.QUOTE_NONE, lineterminator='\\n')\n #\n # csv_data = []\n # field_names = None\n # with open(ggd361_csv) as csv_values:\n # reader = csv.reader(csv_values, delimiter=',', quoting=csv.QUOTE_NONE)\n # for row in reader:\n # field = row[0].replace('\\'', '')\n # new_field = field.replace(to_replace, with_replace)\n # row[0] = '\\'' + new_field + '\\''\n #\n # writer.writerow(row)\n # ofile.close()\n","sub_path":"dit_flow/dit_widget/replace_text.py","file_name":"replace_text.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507064899","text":"from math import sin, pi\nfrom numba.decorators import vectorize\n\n@vectorize\ndef sinc(x):\n if x==0.0:\n return 1.0\n else:\n return sin(x*pi)/(pi*x)\n \nfrom numpy import linspace\nx = linspace(-5,5,1001)\ny = sinc(x)\nfrom pylab import plot, show\nplot(x,y)\nshow()\n","sub_path":"examples/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18968727","text":"import logging\nimport numpy as np\nimport os\nimport time\nimport torch\nimport torch.nn as nn\nimport cv2\nfrom utils.meter import AverageMeter\nfrom utils.metrics import R1_mAP, R1_mAP_Pseudo\nfrom utils.reranking import re_ranking, euclidean_distance\nfrom utils.db_qe import retrieve, calculate_sim_matrix\nimport json\nimport datetime\nfrom torch.cuda.amp import autocast as autocast, GradScaler\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1\"\n\ndef do_train(cfg,\n model,\n center_criterion,\n train_loader,\n val_loader,\n optimizer,\n optimizer_center,\n scheduler,\n loss_fn,\n num_query):\n log_period = cfg.SOLVER.LOG_PERIOD\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n epochs = cfg.SOLVER.MAX_EPOCHS\n\n logger = logging.getLogger(\"reid_baseline.train\")\n logger.info('start training')\n\n if device:\n model.to(device)\n #print(\"cuda个数\", torch.cuda.device_count())\n if torch.cuda.device_count() > 1:\n print('Using {} GPUs for training'.format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n loss_meter = AverageMeter()\n acc_meter = AverageMeter()\n ce_loss_meter = AverageMeter()\n triplet_loss_meter = AverageMeter()\n # train\n scaler = GradScaler()\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n loss_meter.reset()\n acc_meter.reset()\n ce_loss_meter.reset()\n triplet_loss_meter.reset()\n\n model.train()\n for n_iter, (img, vid) in enumerate(train_loader):\n optimizer.zero_grad()\n img = img.cuda(non_blocking=True)\n target = vid.cuda(non_blocking=True)\n with autocast():\n score, feat = model(img, target)\n loss, ce_loss, triplet_loss = loss_fn(score, feat, target)\n scaler.scale(loss).backward()\n # optimizer.module.step()\n scaler.step(optimizer)\n scaler.update()\n acc = (score.max(1)[1] == target).float().mean()\n loss_meter.update(loss.item(), img.shape[0])\n ce_loss_meter.update(ce_loss.item(), img.shape[0])\n triplet_loss_meter.update(triplet_loss.item(), img.shape[0])\n\n acc_meter.update(acc, 1)\n\n if (n_iter + 1) % log_period == 0:\n logger.info(\"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Ce Loss: {:.3f}, Triplet Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}\"\n .format(epoch, (n_iter + 1), len(train_loader),\n loss_meter.avg, ce_loss_meter.avg, triplet_loss_meter.avg, acc_meter.avg, scheduler.get_lr()[0]))\n scheduler.step()\n end_time = time.time()\n time_per_batch = (end_time - start_time) / (n_iter + 1)\n logger.info(\"Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]\"\n .format(epoch, time_per_batch, train_loader.batch_size / time_per_batch))\n\n if epoch % checkpoint_period == 0:\n torch.save(model.state_dict(), os.path.join(cfg.OUTPUT_DIR, cfg.MODEL.NAME + '_{}.pth'.format(epoch)))\n\ndef cosine_dist(x, y):\n bs1, bs2 = x.size(0), y.size(0)\n frac_up = torch.matmul(x, y.transpose(0, 1))\n frac_down = (torch.sqrt(torch.sum(torch.pow(x, 2), 1))).view(bs1, 1).repeat(1, bs2) * \\\n (torch.sqrt(torch.sum(torch.pow(y, 2), 1))).view(1, bs2).repeat(bs1, 1)\n cosine = frac_up / frac_down\n return 1 - cosine\n \ndef do_inference(cfg, model, val_loader, num_query, query_name, gallery_name):\n time_start = time.time()\n model.eval()\n model.cuda()\n model = nn.DataParallel(model)\n feature = torch.FloatTensor().cuda()\n with torch.no_grad():\n for (img, pid) in val_loader:\n input_img = img.cuda()\n input_img_mirror = input_img.flip(dims=[3])\n outputs = model(input_img)\n outputs_mirror = model(input_img_mirror)\n f = outputs + outputs_mirror\n feature = torch.cat((feature, f), 0) \n #feats = torch.nn.functional.normalize(features, dim=1, p=2)\n #query_vecs = feature[-num_query:, :]\n #reference_vecs = feature[:-num_query, :]\n del model\n feature_ = feature.cpu().numpy()\n np.save('ibn_a_600.npy', feature_)\n if cfg.TEST.DB_QE:\n print(\"进行db_qe过程\")\n query_vecs = feature[-num_query:, :].cpu().numpy()\n reference_vecs = feature[:-num_query, :].cpu().numpy()\n query_vecs, reference_vecs, distmat = retrieve(query_vecs, reference_vecs)\n if cfg.TEST.RE_RANKING:\n print(\"reranking过程\")\n feature = torch.cat((torch.tensor(reference_vecs, dtype=torch.float32), torch.tensor(query_vecs, dtype=torch.float32)), 0)\n feature = torch.nn.functional.normalize(feature, dim=1, p=2)\n query_vecs = feature[-num_query:, :]\n reference_vecs = feature[:-num_query, :]\n ranking_parameter = cfg.TEST.RE_RANKING_PARAMETER\n k1 = ranking_parameter[0]\n k2 = ranking_parameter[1]\n lambda_value = ranking_parameter[2]\n distmat = re_ranking(query_vecs, reference_vecs, k1=k1, k2=k2, lambda_value=lambda_value)\n \n elif cfg.TEST.RE_RANKING:\n print(\"reranking过程\")\n feature = torch.nn.functional.normalize(feature, dim=1, p=2)\n query_vecs = feature[-num_query:, :]\n reference_vecs = feature[:-num_query, :]\n ranking_parameter = cfg.TEST.RE_RANKING_PARAMETER\n k1 = ranking_parameter[0]\n k2 = ranking_parameter[1]\n lambda_value = ranking_parameter[2]\n distmat = re_ranking(query_vecs, reference_vecs, k1=k1, k2=k2, lambda_value=lambda_value)\n else :\n print(\"最原始的计算距离过程\")\n query_vecs = feature[-num_query:, :].cpu().numpy()\n reference_vecs = feature[:-num_query, :].cpu().numpy()\n print(query_vecs.shape)\n distmat = calculate_sim_matrix(query_vecs, reference_vecs)\n #query_vecs = feature[-num_query:, :]\n #reference_vecs = feature[:-num_query, :]\n #distmat = cosine_dist(query_vecs, reference_vecs)\n #distmat = distmat.cpu().numpy()\n np.save('distmat_a_600.npy', distmat)\n num_q, num_g = distmat.shape\n indices = np.argsort(distmat, axis=1)\n max_10_indices = indices[:, :10]\n res_dict = dict()\n for q_idx in range(num_q):\n filename = query_name[q_idx][0].split(\"\\\\\")[-1]\n max_10_files = [gallery_name[i][0].split(\"\\\\\")[-1] for i in max_10_indices[q_idx]]\n res_dict[filename] = max_10_files \n with open('submission.csv', 'w') as file:\n for k, v in res_dict.items():\n writer_string = \"%s,{%s,%s,%s,%s,%s,%s,%s,%s,%s,%s}\\n\"%(k, v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9])\n file.write(writer_string)\n file.close()\n time_final = time.time()\n print(\"总共用时:\", time_final-time_start)\n \n \n\ndef do_inference_Pseudo(cfg,\n model,\n val_loader,\n num_query\n ):\n device = \"cuda\"\n\n evaluator = R1_mAP_Pseudo(num_query, max_rank=200, feat_norm=cfg.TEST.FEAT_NORM)\n evaluator.reset()\n if device:\n if torch.cuda.device_count() > 1:\n print('Using {} GPUs for inference'.format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n model.to(device)\n\n reranking_parameter = [14, 4, 0.4]\n\n model.eval()\n for n_iter, (img, pid, camid, imgpath) in enumerate(val_loader):\n with torch.no_grad():\n img = img.to(device)\n if cfg.TEST.FLIP_FEATS == 'on':\n feat = torch.FloatTensor(img.size(0), 2048).zero_().cuda()\n for i in range(2):\n if i == 1:\n inv_idx = torch.arange(img.size(3) - 1, -1, -1).long().cuda()\n img = img.index_select(3, inv_idx)\n f = model(img)\n feat = feat + f\n else:\n feat = model(img)\n\n evaluator.update((feat, imgpath))\n\n distmat, img_name_q, img_name_g = evaluator.compute(reranking_parameter)\n\n return distmat, img_name_q, img_name_g","sub_path":"processor/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":8351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210943843","text":"from __future__ import print_function\nimport sys\nimport os\nimport numpy as np\nimport logging as log\nfrom openvino.inference_engine import IECore\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nimport ngraph as ng\n\nimport time\nimport threading\nimport cv2\n\nfrom PIL import Image\n\nclass InferenceHandler():\n def __init__(self, person_model, classification_model, device):\n self.model = person_model\n self.classification_model = classification_model\n self.device = device\n self.classification = None\n self.ie = None\n self.net = None\n self.exec_net = None\n self.out_blob = None\n self.n, self.c, self.h, self.w = None, None, None, None\n\n\n # KERAS\n self.class_net = None\n self.class_exec_net = None\n\n\n self.init_models()\n\n\n\n def init_models(self):\n self.ie = IECore()\n self.net = self.ie.read_network(model=self.model)\n\n self.class_net = self.ie.read_network(model=self.classification_model)\n self.class_exec_net = self.ie.load_network(network=self.class_net, device_name=self.device)\n\n self.exec_net = self.ie.load_network(network=self.net, device_name=self.device)\n\n for input_key in self.net.input_info:\n if len(self.net.input_info[input_key].input_data.layout) == 4:\n self.n, self.c, self.h, self.w = self.net.input_info[input_key].input_data.shape\n \n print(\"[info] Preparing input blobs\")\n self.out_blob = next(iter(self.net.outputs))\n\n print('[info] Preparing output blobs')\n output_name, output_info = \"\", None\n func = ng.function_from_cnn(self.net)\n if func:\n ops = func.get_ordered_ops()\n for op in ops:\n if op.friendly_name in self.net.outputs and op.get_type_name() == \"DetectionOutput\":\n output_name = op.friendly_name\n output_info = self.net.outputs[output_name]\n break\n else:\n output_name = list(self.net.outputs.keys())[0]\n output_info = self.net.outputs[output_name]\n\n if output_name == \"\":\n log.error(\"Can't find a DetectionOutput layer in the topology\")\n output_dims = output_info.shape\n if len(output_dims) != 4:\n log.error(\"Incorrect output dimensions for SSD model\")\n max_proposal_count, object_size = output_dims[2], output_dims[3]\n if object_size != 7:\n log.error(\"Output item should have 7 as a last dimension\")\n output_info.precision = \"FP32\"\n\n def prediction(self, net, model, img, preds, box):\n start_time = time.time()\n rs_img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR)\n rs_img = rs_img[:,:,::-1].transpose(2,0,1)\n rs_img = np.ascontiguousarray(rs_img)\n\n input_blob = next(iter(net.inputs))\n\n res = model.infer(inputs={input_blob: [rs_img]})\n # print(res)\n\n preds.append((\"jaune\" if res['reid_embedding'][0][0] < res['reid_embedding'][0][1] else \"autre\", box))\n end_time = time.time()\n # print(\"Prediction in \" + str(end_time - start_time) + \" seconds\")\n\n def process_frame(self, frame):\n start_time = time.time()\n image = frame\n tmp_image = image\n images = np.ndarray(shape=(self.n, self.c, self.h, self.w))\n images_hw = []\n if type(image) == None:\n return\n ih, iw = image.shape[:-1]\n images_hw.append((ih, iw))\n if (ih, iw) != (self.h, self.w):image = cv2.resize(image, (self.w, self.h))\n image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n images[0] = image\n data = {}\n data['data'] = images\n res = self.exec_net.infer(inputs=data)\n res = res[self.out_blob]\n boxes, classes = {}, {}\n data = res[0][0]\n for number, proposal in enumerate(data):\n if proposal[2] > 0:\n imid = np.int(proposal[0])\n ih, iw = images_hw[imid]\n label = np.int(proposal[1])\n confidence = proposal[2]\n xmin = np.int(iw * proposal[3])\n ymin = np.int(ih * proposal[4])\n xmax = np.int(iw * proposal[5])\n ymax = np.int(ih * proposal[6])\n if proposal[2] > 0.75:\n if not imid in boxes.keys():\n boxes[imid] = []\n boxes[imid].append([xmin, ymin, xmax, ymax])\n if not imid in classes.keys():\n classes[imid] = []\n classes[imid].append(label)\n persons = []\n for imid in classes:\n for box in boxes[imid]:\n persons.append((tmp_image[box[1]:box[3], box[0]:box[2]], box))\n predictions = []\n threads = []\n for p in persons:\n person = p[0]\n box = p[1]\n if person.size == 0:\n continue\n threads.append(threading.Thread(target=self.prediction, args=(self.class_exec_net, self.class_exec_net, person, predictions, box)))\n threads[-1].start()\n for t in threads:\n t.join()\n for pred, box in predictions:\n if (pred == 'jaune'):\n tmp_image = cv2.rectangle(tmp_image, (box[0], box[1]), (box[2], box[3]), (0,255,0), 3)\n else:\n tmp_image = cv2.rectangle(tmp_image, (box[0], box[1]), (box[2], box[3]), (0,0,255), 3)\n end_time = time.time()\n print(\"FPS: \" + str(1 / (end_time - start_time)) + \"FPS\")\n return tmp_image\n ","sub_path":"InferenceHandler.py","file_name":"InferenceHandler.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73586214","text":"from typing import Tuple\nfrom skorch import callbacks\nimport os\nfrom pathlib import Path\nfrom torch.utils.tensorboard.writer import SummaryWriter\nfrom datetime import datetime\n\n\nclass LoadEndState(callbacks.Callback):\n \"\"\"\n Loads weights from a checkpoint when training ends. \n This is useful, for example, to load and use the best weights of all epochs.\n \"\"\"\n\n def __init__(self, checkpoint: callbacks.Checkpoint, delete_checkpoint=False):\n \"\"\"\n Args:\n delele_checkpoints: Deletes checkpoint after loading it.\n \"\"\"\n self.checkpoint = checkpoint\n self.delete_checkpoint = delete_checkpoint\n\n def on_train_end(self, net,\n X=None, y=None, **kwargs):\n net.load_params(checkpoint=self.checkpoint)\n if(self.delete_checkpoint):\n os.remove(Path(self.checkpoint.dirname) / self.checkpoint.f_params)\n\n\nclass TensorBoardCallbackBase(callbacks.Callback):\n UUIDs = {}\n SUMMARY_WRITERS = {}\n\n def __init__(self, writer, close_after_train=True):\n self.writer = writer\n self.close_after_train = close_after_train\n\n def on_train_begin(self, net, X, y, **kwargs):\n import hashlib\n if(isinstance(self.writer, dict)):\n w = str(self.writer)\n if(w in TensorBoardCallbackBase.SUMMARY_WRITERS):\n self.writer = TensorBoardCallbackBase.SUMMARY_WRITERS[w]\n else:\n self.writer = SummaryWriter(**self.writer)\n TensorBoardCallbackBase.SUMMARY_WRITERS[w] = self.writer\n m = hashlib.md5()\n m.update(str(X[:len(X)//2].mean()).encode('utf-8'))\n m.update(str(X.max()).encode('utf-8'))\n m.update(str(y[:len(y)//2].sum()).encode('utf-8'))\n uuid = m.hexdigest()\n if(uuid not in TensorBoardCallbackBase.UUIDs):\n TensorBoardCallbackBase.UUIDs[uuid] = len(TensorBoardCallbackBase.UUIDs)+1\n self.foldtag = \"fold-%d\" % TensorBoardCallbackBase.UUIDs[uuid]\n\n return super().on_train_begin(net, X=X, y=y, **kwargs)\n\n def get_params(self, deep):\n params = super().get_params(deep=deep)\n if(isinstance(self.writer, dict)):\n return params\n\n w = self.writer\n\n writer_params = {\n 'log_dir': w.log_dir,\n 'purge_step': w.purge_step,\n 'max_queue': w.max_queue,\n 'flush_secs': w.flush_secs,\n 'filename_suffix': w.filename_suffix\n }\n params['writer'] = writer_params\n return params\n\n def on_train_end(self, net, X, y, **kwargs):\n if self.close_after_train:\n self.writer.close()\n\n @staticmethod\n def create_SummaryWriter(root_directory, name=\"\") -> SummaryWriter:\n current_time = datetime.now().strftime('%b%d_%H-%M-%S')\n log_dir = os.path.join(root_directory, current_time, name)\n return SummaryWriter(log_dir=log_dir)\n\n\nclass TensorBoardEmbeddingCallback(TensorBoardCallbackBase):\n \"\"\"\n Callback that saves images of embeddings of a net. \n The neural net must implement transform(.) method.\n \"\"\"\n\n def __init__(self, writer: SummaryWriter, close_after_train=False, labels_name=None) -> None:\n super().__init__(writer, close_after_train)\n self.labels_name = labels_name\n\n def on_train_end(self, net, X=None, y=None, **kwargs):\n if(self.labels_name is not None):\n y = [self.labels_name[int(a)] for a in y]\n\n # D_train, D_valid = net.get_split_datasets(X, y, **kwargs)\n\n X_emb = net.transform(X)\n self.writer.add_embedding(tag=self.foldtag+\"/metric_space/train\", mat=X_emb, metadata=y)\n\n # if(net.validation_dataset is not None): # FIXME: use net.get_split_datasets\n # y_val = net.validation_dataset.y\n # if(self.labels_name is not None):\n # y_val = [self.labels_name[a] for a in y_val]\n # X_emb = net.transform(net.validation_dataset.X)\n # self.writer.add_embedding(tag=self.foldtag+\"/metric_space/valid\", mat=X_emb, metadata=y_val)\n super().on_train_end(net, X=X, y=y, **kwargs)\n\n\nclass TensorBoardCallback(TensorBoardCallbackBase, callbacks.TensorBoard):\n def __init__(self, writer, close_after_train, keys_ignored=None, key_mapper=lambda x: x):\n callbacks.TensorBoard.__init__(self, writer, close_after_train=close_after_train,\n keys_ignored=keys_ignored, key_mapper=key_mapper)\n\n def add_scalar_maybe(self, history, key, tag, global_step):\n return super().add_scalar_maybe(history, key, self.foldtag+'/'+tag, global_step=global_step)\n\n\nclass ExtendedEpochScoring(callbacks.EpochScoring):\n \"\"\"\n Enables EpochScoring to be run at training data and validation data simultaneously.\n \"\"\"\n\n def get_test_data(self, dataset_train, dataset_valid):\n assert(self.use_caching == False), \"Caching not available for ExtendedEpochScoring\"\n on_train = self.on_train\n self.on_train = True\n Xtrain, ytrain, _ = super().get_test_data(dataset_train, dataset_valid)\n self.on_train = False\n Xvalid, yvalid, _ = super().get_test_data(dataset_train, dataset_valid)\n self.on_train = on_train\n return (Xtrain, Xvalid), (ytrain, yvalid), []\n\n def _record_score(self, history, current_score):\n # if(current_score is not tuple):\n # super()._record_score(history, current_score)\n trainname = \"train_\"+self.name_\n validname = \"valid_\"+self.name_\n train_score, valid_score = current_score\n history.record(trainname, train_score)\n if(valid_score is not None):\n history.record(validname, valid_score)\n score = train_score if self.on_train else valid_score\n else:\n score = train_score\n\n is_best = self._is_best_score(score)\n if is_best is None:\n return\n # name = trainname if self.on_train else validname\n history.record(self.name_ + '_best', bool(is_best))\n if is_best:\n self.best_score_ = score\n\n\nclass EstimatorEpochScoring(ExtendedEpochScoring):\n class EstimatorCallback:\n def __init__(self, estimator, metric):\n from sklearn.metrics import get_scorer\n self.estimator = estimator\n self.metric = get_scorer(metric)\n\n def __call__(self, net, X, y) -> Tuple[float, float]:\n Xtrain, Xvalid = X\n ytrain, yvalid = y\n\n X_emb = net.transform(Xtrain)\n self.estimator.fit(X_emb, ytrain)\n score_train = self.metric(self.estimator, X_emb, ytrain)\n\n if(Xvalid is not None):\n X_emb = net.transform(Xvalid)\n score_valid = self.metric(self.estimator, X_emb, yvalid)\n else:\n score_valid = None\n return score_train, score_valid\n\n def __init__(self, estimator, metric='f1_macro', name='score', lower_is_better=False,\n use_caching=False, on_train=False,\n **kwargs):\n self.estimator = estimator\n self.metric = metric\n est_cb = EstimatorEpochScoring.EstimatorCallback(estimator, metric)\n super().__init__(est_cb, lower_is_better=lower_is_better, use_caching=use_caching, name=name, on_train=on_train,\n **kwargs)\n\n def get_params(self, deep=True):\n params = super().get_params(deep=deep)\n del params['scoring']\n return params\n","sub_path":"skorch_extra/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382424829","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\ndef return_layer_weights_notation(inp, out, l):\n capa = []\n biases = []\n for i in range(inp):\n capa.append([])\n for j in range(out):\n capa[i].append(f\"$w_{{{i+1},{j+1}}}^{l}$\")\n for j in range(out):\n biases.append(f\"$w_{{{0},{j+1}}}^{l}$\")\n return capa, biases\n\ndef return_weights_notation(shape):\n weights=[]\n biases = []\n for i in range(len(shape)-1):\n inp = shape[i]\n out = shape[i+1]\n capa, bias = return_layer_weights_notation(inp, out, i+1)\n weights.append(np.array(capa))\n biases.append(bias)\n return weights, biases\n\ndef draw_neural_net(ax, left, right, bottom, top, layer_sizes, coefs_, intercepts_):\n '''\n Draw a neural network cartoon using matplotilb.\n \n :usage:\n >>> fig = plt.figure(figsize=(12, 12))\n >>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])\n \n :parameters:\n - ax : matplotlib.axes.AxesSubplot\n The axes on which to plot the cartoon (get e.g. by plt.gca())\n - left : float\n The center of the leftmost node(s) will be placed here\n - right : float\n The center of the rightmost node(s) will be placed here\n - bottom : float\n The center of the bottommost node(s) will be placed here\n - top : float\n The center of the topmost node(s) will be placed here\n - layer_sizes : list of int\n List of layer sizes, including input and output dimensionality\n '''\n ax.axis('off')\n n_layers = len(layer_sizes)\n v_spacing = (top - bottom)/float(max(layer_sizes))\n h_spacing = (right - left)/float(len(layer_sizes) - 1)\n \n # Input-Arrows\n layer_top_0 = v_spacing*(layer_sizes[0] - 1)/2. + (top + bottom)/2.\n for m in range(layer_sizes[0]):\n plt.arrow(left-0.18, layer_top_0 - m*v_spacing, 0.12, 0, lw =0.1, head_width=0.01, head_length=0.02)\n \n # Nodes\n for n, layer_size in enumerate(layer_sizes):\n layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.\n for m in range(layer_size):\n circle = plt.Circle((n*h_spacing + left, layer_top - m*v_spacing), v_spacing/8.,\n color='w', ec='k', zorder=4)\n if n == 0:\n plt.text(left-0.125, layer_top - m*v_spacing, r'$X_{'+str(m+1)+'}$', fontsize=15)\n elif (n_layers == 3) & (n == 1):\n plt.text(n*h_spacing + left+0.00, layer_top - m*v_spacing+ (v_spacing/8.+0.01*v_spacing), r'$a_{'+str(m+1)+'}$', fontsize=15)\n elif n == n_layers -1:\n # plt.text(n*h_spacing + left+0.10, layer_top - m*v_spacing, r' $\\hat{p}_{'+str(m+1)+'}$=sigmoid($h_{'+str(m+1)+'}$)', fontsize=15)\n plt.text(n*h_spacing + left+0.10, layer_top - m*v_spacing, r' $h_{'+str(m+1)+'}$', fontsize=15)\n ax.add_artist(circle)\n # Bias-Nodes\n for n, layer_size in enumerate(layer_sizes):\n if n < n_layers -1:\n x_bias = (n+0.5)*h_spacing + left\n y_bias = top + 0.005\n circle = plt.Circle((x_bias, y_bias), v_spacing/8., color='w', ec='k', zorder=4)\n plt.text(x_bias-(v_spacing/8.+0.10*v_spacing+0.01), y_bias, r'$1$', fontsize=15)\n ax.add_artist(circle) \n # Edges\n # Edges between nodes\n for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):\n layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.\n layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.\n for m in range(layer_size_a):\n for o in range(layer_size_b):\n line = plt.Line2D([n*h_spacing + left, (n + 1)*h_spacing + left],\n [layer_top_a - m*v_spacing, layer_top_b - o*v_spacing], c='k', lw=0.1)\n ax.add_artist(line)\n xm = (n*h_spacing + left)\n xo = ((n + 1)*h_spacing + left)\n ym = (layer_top_a - m*v_spacing)\n yo = (layer_top_b - o*v_spacing)\n rot_mo_rad = np.arctan((yo-ym)/(xo-xm))\n rot_mo_deg = rot_mo_rad*180./np.pi\n xm1 = xm + (v_spacing/8.+0.05)*np.cos(rot_mo_rad)\n if n == 0:\n if yo > ym:\n ym1 = ym + (v_spacing/8.+0.12)*np.sin(rot_mo_rad)\n else:\n ym1 = ym + (v_spacing/8.+0.05)*np.sin(rot_mo_rad)\n else:\n if yo > ym:\n ym1 = ym + (v_spacing/8.+0.12)*np.sin(rot_mo_rad)\n else:\n ym1 = ym + (v_spacing/8.+0.04)*np.sin(rot_mo_rad)\n # print(n, m, o, str(coefs_[n][m, o]))\n plt.text( xm1, ym1,\\\n str(coefs_[n][m, o]),\\\n rotation = rot_mo_deg, \\\n fontsize = 12)\n # Edges between bias and nodes\n for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):\n if n < n_layers-1:\n layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.\n layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.\n x_bias = (n+0.5)*h_spacing + left\n y_bias = top + 0.005 \n for o in range(layer_size_b):\n line = plt.Line2D([x_bias, (n + 1)*h_spacing + left],\n [y_bias, layer_top_b - o*v_spacing], c='k', lw=0.1)\n ax.add_artist(line)\n xo = ((n + 1)*h_spacing + left)\n yo = (layer_top_b - o*v_spacing)\n rot_bo_rad = np.arctan((yo-y_bias)/(xo-x_bias))\n rot_bo_deg = rot_bo_rad*180./np.pi\n xo2 = xo - (v_spacing/8.+0.01)*np.cos(rot_bo_rad)\n yo2 = yo - (v_spacing/8.+0.01)*np.sin(rot_bo_rad)\n xo1 = xo2 -0.05 *np.cos(rot_bo_rad)\n yo1 = yo2 -0.05 *np.sin(rot_bo_rad)\n plt.text( xo1, yo1,\\\n str(intercepts_[n][o]),\\\n rotation = rot_bo_deg, \\\n fontsize = 12) \n \n # Output-Arrows\n layer_top_0 = v_spacing*(layer_sizes[-1] - 1)/2. + (top + bottom)/2.\n for m in range(layer_sizes[-1]):\n plt.arrow(right+0.015, layer_top_0 - m*v_spacing, 0.16*h_spacing, 0, lw =1, head_width=0.01, head_length=0.02)\n \ndef get_dataset(random_seed=42, N=200):\n N = N//4\n np.random.seed(random_seed)\n X1 = np.random.multivariate_normal(np.array([0, 0]), [[1,0],[0,1]], 2*N)\n X2 = np.random.multivariate_normal(np.array([0, 6]), [[6,0],[0,1]], N)\n X3 = np.random.multivariate_normal(np.array([6, 0]), [[1,0],[0,6]], N)\n X = np.vstack([X1, X2, X3])\n y = np.vstack([np.ones((2*N, 1)), np.zeros((2*N, 1))]).reshape(-1)\n return X, y\n\ndef get_dataset_2(random_seed=42, N=200):\n N = N//4\n np.random.seed(random_seed)\n X1 = np.random.multivariate_normal(np.array([0, 0]), [[1,0],[0,1]], 2*N)\n X2 = np.random.multivariate_normal(np.array([0, 6]), [[6,0],[0,1]], N)\n X3 = np.random.multivariate_normal(np.array([6, 0]), [[1,0],[0,6]], N)\n X4 = np.random.multivariate_normal(np.array([-6, 0]), [[1,0],[0,6]], N)\n X5 = np.random.multivariate_normal(np.array([0, -6]), [[6,0],[0,1]], N)\n X = np.vstack([X1, X2, X3, X4, X5])\n y = np.vstack([np.ones((2*N, 1)), np.zeros((4*N, 1))]).reshape(-1)\n return X, y\n\ndef generate_gaussians_distributions(sep=1, N=500, random_state=42, normalize=True):\n np.random.seed(random_state)\n # Zeros\n X1 = np.random.multivariate_normal(sep*np.array([0.5, 0.5]), [[0.1,-0.085],[-0.085,0.1]], N//2)\n # Ones\n X2 = np.random.multivariate_normal([-0.25, -0.25], [[0.1,0],[0,0.1]], N//2)\n X = np.append(X1, X2, axis=0)\n y = np.append(np.zeros(N//2), np.ones(N//2))\n indexes = np.arange(len(y))\n np.random.shuffle(indexes)\n if normalize:\n X = (X - X.mean(axis=0))/X.std(axis=0)\n else:\n X[:, 0] = X[:, 0]\n X[:, 1] = X[:, 1]\n return X[indexes], y[indexes]\n\ndef plot_boundaries_keras(X_train, y_train, score, probability_func, degree=None, bias=False, h = .02, ax = None, margin=0.5):\n X = X_train\n x_min, x_max = X[:, 0].min() - margin, X[:, 0].max() + margin\n y_min, y_max = X[:, 1].min() - margin, X[:, 1].max() + margin\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n if ax is None:\n ax = plt.subplot(1, 1, 1)\n \n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n \n if degree is not None:\n polynomial_set = get_polynimial_set(np.c_[xx.ravel(), yy.ravel()], degree = degree, bias=bias)\n Zaux = probability_func(polynomial_set)\n else:\n Zaux = probability_func(np.c_[xx.ravel(), yy.ravel()])\n # Z = Z_aux[:, 1]\n \n if Zaux.shape[1] == 2:\n Z = Zaux[:, 1]\n else:\n Z = Zaux[:, 0]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n cf = ax.contourf(xx, yy, Z, 50, cmap=cm, alpha=.8)\n plt.colorbar(cf, ax=ax)\n #plt.colorbar(Z,ax=ax)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,\n edgecolors='k', s=100)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n if score is not None:\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=40, horizontalalignment='right')\n\ndef plot_boundaries(X_train, y_train, score=None, probability_func=None, degree = None, n_colors = 100, mesh_res = 1000, ax = None):\n X = X_train #np.vstack((X_test, X_train))\n if len(y_train.shape) == 2 and y_train.shape[1] == 1:\n y_train = y_train.reshape(-1)\n margin_x = (X[:, 0].max() - X[:, 0].min())*0.05\n margin_y = (X[:, 1].max() - X[:, 1].min())*0.05\n x_min, x_max = X[:, 0].min() - margin_x, X[:, 0].max() + margin_x\n y_min, y_max = X[:, 1].min() - margin_y, X[:, 1].max() + margin_y\n hx = (x_max-x_min)/mesh_res\n hy = (y_max-y_min)/mesh_res\n x_domain = np.arange(x_min, x_max, hx)\n y_domain = np.arange(y_min, y_max, hy)\n xx, yy = np.meshgrid(x_domain, y_domain)\n\n if ax is None:\n ax = plt.subplot(1, 1, 1)\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n if probability_func is not None:\n if degree is not None:\n polynomial_set = get_polynimial_set(np.c_[xx.ravel(), yy.ravel()], degree = degree)\n Z = probability_func(polynomial_set)[:, 1]\n else:\n Z_aux = probability_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z_aux[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n \n cf = ax.contourf(xx, yy, Z, n_colors, vmin=0., vmax=1., cmap=cm, alpha=.8)\n plt.colorbar(cf, ax=ax)\n #plt.colorbar(Z,ax=ax)\n\n boundary_line = np.where(np.abs(Z-0.5)<0.001)\n\n ax.scatter(x_domain[boundary_line[1]], y_domain[boundary_line[0]], color='k', alpha=0.5, s=1)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.text(xx.max() - .3, yy.min() + .3, score,\n size=20, horizontalalignment='right')\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,\n edgecolors='k', s=40, marker='o')","sub_path":"mlp_helper.py","file_name":"mlp_helper.py","file_ext":"py","file_size_in_byte":11779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192911562","text":"print(\"Celsius/Fahrenheit converter\")\n\nwhile True:\n print(\"If you want to convert C to F, then enter 'C'\")\n print(\"If you want to convert F to C, then enter 'F'\")\n \n op=input()\n if op=='C' or op=='F':\n break\n print(\"Wrong input! Please enter again.\")\n\nval=float(input(\"Enter your value : \"))\n\nif op=='C':\n res=9/5*val+32\n print(f\"{val}C = {res}F\")\nelse:\n res=(val-32)*9/5\n print(f\"{val}F = {res}C\")\n","sub_path":"hw2/prob1.py","file_name":"prob1.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249634583","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom api.customer.infrastructure.models import Customer\nfrom api.product.infrastructure.models import Product\nfrom api.purchase.infrastructure.models import Purchase\n\ndef get_anonymous_customer(anonymous_customer_id):\n ''' Получение покупателя по id. '''\n anonymous_customer = Customer.objects.get(pk=anonymous_customer_id)\n return anonymous_customer\n\ndef create_anonymous_customer():\n ''' Создание покупателя. '''\n anonymous_customer = Customer.objects.create()\n return anonymous_customer\n\ndef get_purchase(cart, purchase_id):\n try:\n return cart.purchase_set.get(pk=purchase_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotExist(f'Purchase {purchase_id} does not exist')\n\ndef get_product(product_id):\n try:\n return Product.objects.get(pk=product_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotExist(f'Product {product_id} does not exist')\n\ndef delete_purchase_from_cart(customer, purchase_id):\n cart = customer.get_dont_checkout_cart()\n purchase = get_purchase(cart, purchase_id)\n purchase.delete()\n\ndef change_count_product_in_purchase(customer, new_count):\n if new_count:\n cart = customer.get_dont_checkout_cart()\n purchase = get_purchase(cart, purchase_id)\n purchase.count = int(new_count)\n purchase.save()\n else:\n raise ValueError('Count does not exist or not integer')\n\ndef add_product_to_cart(customer, product_id, count):\n cart = customer.get_dont_checkout_cart()\n wont_to_add_product = get_product(product_id)\n for purchase in cart.purchase_set.all():\n if purchase.product == wont_to_add_product:\n purchase.count += int(count)\n purchase.save()\n return None \n purchase = Purchase.objects.create(product=wont_to_add_product, cart=cart, count=count)\n","sub_path":"pavlyuchenko/ozon/api/purchase/domain/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624883928","text":"from tensorflow.keras import layers\nfrom tensorflow.keras import regularizers as reg\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport cust_datagen as cd\nimport attention_models as am\nimport utils\nimport h5py\nimport os, shutil\nimport deprecated as dep\n\nlogdir_name = \"logs_attention_reduced\"\nif os.path.isdir(logdir_name):\n shutil.rmtree(logdir_name)\n\n# CHANGE THESE PATHS\npaths = open(\"pathconfig.cfg\",\"r\").read().split(\"\\n\")\nCSV_TRAIN = paths[0]\nCSV_VALIDATION = paths[1]\n\n# HDF5 link: https://drive.google.com/file/d/1GeCiqkdh3aqY8MUJztrvhHy-H0Ghd5Jo/view?usp=sharing\nH5_COMBINED = paths[2]\ncombined_h5 = h5py.File(H5_COMBINED, 'r')\n\nattention_window = 8\ndstport_embedding = 8\nprotocol_embedding = 3\ncolumns = list(range(3,79))\n# useless = [i for i,n in enumerate(combined_h5[\"minmaxes\"]) if n[1] - n[0] == 0]\n# columns = [k for k in columns if k not in useless]\ncolumns = dep.indices_to_use\nprint(columns)\n\ndims = (attention_window, len(columns))\n\nin_dstport = layers.Input(shape=(attention_window))\ndstport = layers.Embedding(65535+1, dstport_embedding, input_length=attention_window)(in_dstport)\n\nin_protocol = layers.Input(shape=(attention_window))\nprotocol = layers.Embedding(17+1, protocol_embedding, input_length=attention_window)(in_protocol)\n\ninputs = layers.Input(shape=dims)\n\nall_in = layers.Concatenate()([protocol, dstport, inputs])\nat = layers.TimeDistributed(layers.Dense(32), input_shape=(attention_window, len(columns)+dstport_embedding+protocol_embedding))(all_in)\nat = layers.TimeDistributed(layers.BatchNormalization())(at)\nat = layers.Activation(\"relu\")(at)\n\nat = layers.TimeDistributed(layers.Dense(32))(at)\nat = layers.TimeDistributed(layers.BatchNormalization())(at)\nat = layers.Activation(\"relu\")(at)\n\nat = layers.TimeDistributed(layers.Dense(32))(at)\nat = layers.TimeDistributed(layers.BatchNormalization())(at)\nat = layers.Activation(\"relu\")(at)\n\n\nat = layers.TimeDistributed(layers.Dense(1))(at)\nat = layers.Activation(\"relu\")(at)\n\n# at = layers.Flatten()(at)\n# at = layers.Dense(1)(at)\n# at = am.LuongAttention(2, 0.5, attention_window)([at, all_in])\n# at = am.WeightedAverageAttention()([at, all_in])\nat = am.WeightedAttention(scale_fac=5)([at, all_in])\nat = layers.Flatten()(at)\nat = layers.Activation(\"relu\")(at)\n\n\nx = layers.Dense(64)(at)\nx = layers.Activation(\"relu\")(x)\nx = layers.Dropout(0.4)(x)\n\nx = layers.Dense(64)(x)\nx = layers.Activation(\"relu\")(x)\nx = layers.Dropout(0.4)(x)\n\nx = layers.Dense(64)(x)\nx = layers.Activation(\"relu\")(x)\nx = layers.Dropout(0.4)(x)\n\n\noutputs = layers.Dense(1, activation='sigmoid')(x)\nmodel = tf.keras.Model(inputs=[in_protocol, in_dstport, inputs], outputs=outputs, name='ids_model')\nprint(model.summary())\n\nbatch_size = 1024\n\n# steps_per_epoch = 100\n# max_val_steps = (utils.rawcount(CSV_VALIDATION) - 1)//(batch_size*dims[0])\n# max_train_steps = (utils.rawcount(CSV_TRAIN) - 1)//(batch_size*dims[0])\n\n# gen = cd.IDSDataGeneratorAttention({\"Benign\": 0, \"Malicious\": 1}, CSV_TRAIN, dims, max_train_steps, batch_size=batch_size)\n# vgen = cd.IDSDataGeneratorAttention({\"Benign\": 0, \"Malicious\": 1}, CSV_VALIDATION, dims, max_val_steps, batch_size=batch_size)\n\ndata = combined_h5[\"combined\"][:]\ngen, vgen = cd.IDSDataGeneratorAttentionH5.create_data_generators(data, combined_h5, attention_window, columns, 0.2, batch_size=batch_size)\n\nsgd = tf.keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\nrmsp = tf.keras.optimizers.RMSprop(lr=0.001)\nadam = tf.keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(loss='binary_crossentropy',\n optimizer=sgd,\n metrics=['binary_accuracy', utils.true_positive_rate, utils.false_positive_rate])\n\nhistory = model.fit_generator(\n gen, epochs=3000, validation_data=vgen, shuffle=False,\n callbacks=[tf.keras.callbacks.TensorBoard(log_dir=logdir_name)], workers=8, use_multiprocessing=True\n)\n\n# with open(\"OUT\", \"a+\") as f:\n# with np.printoptions(threshold=np.inf):\n# f.write(str(vgen[0]))\n# f.write(\"\\n\\n\\n\\n\\n\\n\")\n# f.write(str(model.predict(vgen, steps=1)))\n# print(model.predict(vgen, steps=1))","sub_path":"attention_reduced.py","file_name":"attention_reduced.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"435845807","text":"'''\n Time and Space Efficient!\n v1 runtime : 90.90ms (sample size = 40)\n v2 runtime : 04.96ms (sample size = 40)\n'''\n\nn = 10\nN = chi = interval = 0\nstep = (0.0+1.0)/n\n\ncount = {}\nfor i in range(n):\n interval += step\n count[round(interval,2)] = 0\n\nfor x in sorted(input('Enter the random sequence: ').split(',')):\n N += 1\n for k in sorted(count.keys()):\n if float(x) < k:\n count[k] = count[k]+1\n break\n\nalpha = float(input(\"Enter the critical value: \"))\n\nEi = N/n\nfor k in count:\n chi+=(count[k]-Ei)**2/Ei\n\nif round(chi,2) < alpha:\n print(\"\\nThe random sequence is uniformly distributed\")\nelse:\n print(\"\\nThe random sequence is not uniformly distributed\")\n","sub_path":"random/tests/uniformity/Chi Square v2.py","file_name":"Chi Square v2.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418270854","text":"\"\"\" This module contains classes representing the state of an object in the\nsimulation. These states contain information describing the object and may be\npassed to agents to enable decision making. \"\"\"\n\n__author__ = 'lrbenke, mikepsn'\n\nclass SensorTrack(object):\n \"\"\" Data generated by the sensor model for each entity it is tracking. \"\"\"\n def __init__(self):\n self.callsign = ''\n self.id = None\n self.side = None\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n self.v = 0.0\n self.track_range = 0.0\n self.track_theta = 0.0\n self.track_psi = 0.0\n self.start_time = 0.0\n self.current_time = 0.0\n self.total_time = 0.0\n\nclass SensorState(object):\n \"\"\" Defines the current state of the sensor model. \"\"\"\n def __init__(self):\n self.name = \"\"\n self.max_range = 10000.0\n self.fov = 60.0\n self.tracks = {}\n\n\nclass FighterState(object):\n \"\"\" Defines the current state of the Fighter. \"\"\"\n def __init__(self, x, y, z, z_c, psi, psi_c, theta, theta_c,\n phi, phi_c, v, v_c, gload, v_min, v_max):\n self.timestep = 0.0\n self.callsign = \"callsign\" # aircraft's unique callsign\n self.id = 0 # unique aircraft id (starting at 0)\n self.side = 1 # side aircraft belongs to (either 1 or 2)\n self.actype = 0 # aircraft type\n self.x = x # x position or lat\n self.y = y # y position or lon\n self.z = z # z position or altitude\n self.z_c = z_c # desired altitude\n self.psi = psi # heading\n self.psi_c = psi_c # desired heading\n self.theta = theta # pitch angle\n self.theta_c = theta_c # desired pitch angle\n self.phi = phi # roll angle\n self.phi_c = phi_c # desired roll angle\n self.v = v # velocity\n self.v_c = v_c # desired velocity\n self.gload = gload # gload factor\n self.v_min = v_min # minimum velocity\n self.v_max = v_max # maximum velocity\n self.sensor_state = None\n self.tracking = False\n self.tracked = False\n self.contact_range = None\n self.contact_aa = None\n self.contact_ata = None\n self.los_angle = None\n self.mcgrew_angle = None\n self.mcgrew_range = None\n self.mcgrew_score = None\n self.lethal_range = False\n self.lethal_cone = False\n self.viable_lethal_shot = False\n self.opponent_viable_lethal_shot = False\n self.reward = 0\n\n @property\n def heading(self):\n \"\"\" Returns the current heading angle psi. \"\"\"\n return self.psi\n\n @property\n def desired_heading(self):\n \"\"\" Returns the commanded heading angle psi_c. \"\"\"\n return self.psi_c\n\n @property\n def pitch(self):\n \"\"\" Returns the current pitch angle theta. \"\"\"\n return self.theta\n\n @property\n def desired_pitch(self):\n \"\"\" Return the commanded pitch angle theta_c. \"\"\"\n return self.theta_c\n\n @property\n def desired_v(self):\n \"\"\" Returns the commanded/desired speed v_c. \"\"\"\n return self.v_c\n\n @property\n def roll(self):\n \"\"\" Return the current roll angle phi. \"\"\"\n return self.phi\n\n @property\n def desired_roll(self):\n \"\"\" Returns the commanded roll angle phi_c. \"\"\"\n return self.phi_c\n\n @property\n def gload_factor(self):\n \"\"\" Returns the aircrafts gload. \"\"\"\n return self.gload\n\n def pos_2d(self):\n \"\"\" Returns an (x,y) tuple representing the aircraft position. \"\"\"\n return self.x, self.y\n\n def pos_3d(self):\n \"\"\" Returns an (x,y,z) tuple representing the aircraft position. \"\"\"\n return self.x, self.y, self.z\n\n def __str__(self):\n \"\"\" String representation of the fighter state. \"\"\"\n tokens = []\n tokens += ['x({})={}'.format(self.callsign, self.x)]\n tokens += ['y({})={}'.format(self.callsign, self.y)]\n tokens += ['z({})={}'.format(self.callsign, self.z)]\n tokens += ['z_c({})={}'.format(self.callsign, self.z_c)]\n tokens += ['phi({})={}'.format(self.callsign, self.phi)]\n tokens += ['phi_c({})={}'.format(self.callsign, self.phi_c)]\n tokens += ['psi({})={}'.format(self.callsign, self.psi)]\n tokens += ['psi_c({})={}'.format(self.callsign, self.psi_c)]\n tokens += ['theta({})={}'.format(self.callsign, self.theta)]\n tokens += ['theta_c({})={}'.format(self.callsign, self.theta_c)]\n tokens += ['v({})={}'.format(self.callsign, self.v)]\n tokens += ['v_c({})={}'.format(self.callsign, self.v_c)]\n tokens += ['gload({})={}'.format(self.callsign, self.gload)]\n return ', '.join(tokens)\n","sub_path":"utils/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340475462","text":"\nimport feather\nimport gc\nimport json\nimport numpy as np\nimport pandas as pd\nimport sys\nimport warnings\n\nfrom utils import loadpkl, to_feature, line_notify, to_json, read_pickles\nfrom utils import removeCorrelatedVariables, removeMissingVariables, reduce_mem_usage\nfrom utils_lag import make_lags\n\n#===============================================================================\n# aggregation (hobbies)\n#===============================================================================\n\nwarnings.simplefilter(action='ignore')\n\ndef main():\n # load pkls\n df = read_pickles('../feats/sales')\n df_calendar = loadpkl('../feats/calendar.pkl')\n df_sell_prices = loadpkl('../feats/sell_prices.pkl')\n\n # extract foods\n df = df[df['cat_id'] == 'HOBBIES']\n\n # merge\n df = df.merge(df_calendar, on='d',how='left')\n df = df.merge(df_sell_prices, on=['store_id','item_id','wm_yr_wk'],how='left')\n\n del df_calendar, df_sell_prices\n gc.collect()\n\n # drop pre-release rows\n df = df[df['wm_yr_wk']>=df['release']]\n\n # make lag features\n df = make_lags(df,28)\n\n # add categorical features\n df['item_id_store_id'] = df['item_id']+'_'+df['store_id']\n df['item_id_state_id'] = df['item_id']+'_'+df['state_id']\n df['dept_id_store_id'] = df['dept_id']+'_'+df['store_id']\n df['dept_id_state_id'] = df['dept_id']+'_'+df['state_id']\n\n # label encoding\n cols_string = ['item_id','dept_id','cat_id','store_id','state_id',\n 'item_id_store_id','item_id_state_id','dept_id_store_id',\n 'dept_id_state_id']\n for c in cols_string:\n df[c], _ = pd.factorize(df[c])\n df[c].replace(-1,np.nan,inplace=True)\n\n # add price features\n df_grouped = df[['id','sell_price']].groupby('id')['sell_price']\n df['shift_price_t1'] = df_grouped.transform(lambda x: x.shift(1))\n df['price_change_t1'] = (df['shift_price_t1'] - df['sell_price']) / (df['shift_price_t1'])\n df['rolling_price_max_t365'] = df_grouped.transform(lambda x: x.shift(1).rolling(365).max())\n df['price_change_t365'] = (df['rolling_price_max_t365'] - df['sell_price']) / (df['rolling_price_max_t365'])\n df['rolling_price_std_t7'] = df_grouped.transform(lambda x: x.rolling(7).std())\n df['rolling_price_std_t30'] = df_grouped.transform(lambda x: x.rolling(30).std())\n\n # features release date\n df['release'] = df['release'] - df['release'].min()\n\n # price momentum by month & year\n df['price_momentum_m'] = df['sell_price']/df.groupby(['store_id','item_id','month'])['sell_price'].transform('mean')\n df['price_momentum_y'] = df['sell_price']/df.groupby(['store_id','item_id','year'])['sell_price'].transform('mean')\n\n # days for CustomTimeSeriesSplitter\n df['d_numeric'] = df['d'].apply(lambda x: str(x)[2:]).astype(int)\n\n # reduce memory usage\n df = reduce_mem_usage(df)\n\n # save as feather\n to_feature(df, '../feats/f108')\n\n # save feature name list\n features_json = {'features':df.columns.tolist()}\n to_json(features_json,'../configs/108_all_features_hobbies.json')\n\n # LINE notify\n line_notify('{} done.'.format(sys.argv[0]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/108_aggregation_hobbies.py","file_name":"108_aggregation_hobbies.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625347022","text":"# -*- coding: utf-8 -*-\r\n\r\nimport Game_Donnees\r\nimport time\r\nfrom tkinter import Tk, PhotoImage, Toplevel, Canvas, ALL, NW\r\nfrom PIL import Image\r\nimport random\r\n# Importations : Game_Donnees, Time, Tkinter, PIL\r\n\r\nclass Battle(object):\r\n \"Définit le combat\"\r\n def __init__(self):\r\n \"\"\"\r\n Fonction\r\n --------\r\n Fonction d'initialisation de la classe\r\n Battle, la récupération des localisations\r\n et des sprites ne requiert aucun \r\n argument\r\n \r\n Paramètres\r\n ----------\r\n Aucun\r\n \r\n Valeur retournée\r\n ----------------\r\n None\r\n \"\"\"\r\n \r\n get = Game_Donnees.Get_info\r\n sec = 'Locations'\r\n # On raccourcit la fonction de récupération\r\n \r\n gd_x, gd_y = Get_info('Graphics', 'bgame_dim_x'),\\\r\n Get_info('Graphics', 'bgame_dim_y')\r\n # On récupère les dimensions du jeu\r\n \r\n self.localisations = {'action_atk' : eval(get(sec, 'action_atk').split(',')),\\\r\n 'action_fuir' : eval(get(sec, 'action_fuir').split(',')),\\\r\n 'action_objet' : eval(get(sec, 'action_objet').split(',')),\\\r\n 'foeLifeBarBorder' : eval(get(sec, 'foeLifeBarBorder').split(',')),\\\r\n 'foeLifeBar' : eval(get(sec, 'foeLifeBar').split(',')),\\\r\n 'foeName' : eval(get(sec, 'foeName').split(',')),\\\r\n 'foeLvl' : eval(get(sec, 'foeLvl').split(',')),\\\r\n 'foeLvlNumber' : eval(get(sec, 'foeLvlNumber').split(',')),\\\r\n 'foeSprite' : eval(get(sec, 'foeSprite').split(',')),\\\r\n 'heroLifeBarBorder' : eval(get(sec, 'heroLifeBarBorder').split(',')),\\\r\n 'heroLifeBar' : eval(get(sec, 'heroLifeBar').split(',')),\\\r\n 'heroName' : eval(get(sec, 'heroName').split(',')),\\\r\n 'heroLvl' : eval(get(sec, 'heroLvl').split(',')),\\\r\n 'heroLvlNumber' : eval(get(sec, 'heroLvlNumber').split(',')),\\\r\n 'field' : eval(get(sec, 'field').split(',')),\\\r\n 'markerFoe' : eval(get(sec, 'markerFoe').split(','))}\r\n # On récupère les localisations\r\n #\r\n # Les fonctions eval et split permettent de découper les valeurs\r\n # contenues dans l'ini en un tuple qui sera calculé en fonction\r\n # des dimensions du jeu (gd_x et gd_y)\r\n #\r\n \r\n sec = 'Sprites'\r\n \r\n self.sprites = {'action_atk' : get(sec, 'action_atk'),\\\r\n 'action_fuir' : get(sec, 'action_fuir'),\\\r\n 'action_objet' : get(sec, 'action_objet'),\\\r\n 'foeLifeBarBorder' : get(sec, 'foeLifeBarBorder'),\\\r\n 'foeLifeBar' : get(sec, 'foeLifeBar'),\\\r\n 'foeLvl' : get(sec, 'foeLvl'),\\\r\n 'heroLifeBarBorder' : get(sec, 'heroLifeBarBorder'),\\\r\n 'heroLifeBar' : get(sec, 'heroLifeBar'),\\\r\n 'heroLvl' : get(sec, 'heroLvl'),\\\r\n 'forest_field' : get(sec, 'forest_field'),\\\r\n 'forest_markerFoe' : get(sec, 'forest_markerFoe')}\r\n # On récupère les sprites\r\n \r\n \r\n def initiate(self, player, foe):\r\n \"\"\"\r\n Fonction\r\n --------\r\n Récupère les participants du combat\r\n \r\n Paramètres\r\n ----------\r\n self : Battle.object :: Combat\r\n player : Character.object :: Joueur\r\n foe : Character.object :: Ennemi\r\n \r\n Valeur retournée\r\n ----------------\r\n None\r\n \"\"\" \r\n \r\n self.player = player\r\n self.foe = foe\r\n \r\n self.player.Defense_count = -1\r\n self.foe.Defense_count = -1\r\n # Compteur du boost de defense\r\n # 1 -> 0\r\n # -1 : Pas de boost\r\n \r\n self.field = self.sprites['forest_field']\r\n\r\n def run_battle(self):\r\n \"\"\"\r\n Fonction\r\n --------\r\n Lance le combat et déclenche les \r\n différentes phases de jeu\r\n \r\n Paramètres\r\n ----------\r\n self : Battle.object :: Combat\r\n \r\n Valeur retournée\r\n ----------------\r\n None\r\n \"\"\" \r\n \r\n print('Lancement du combat !\\n\\n')\r\n \r\n gd_x, gd_y = Game_Donnees.Get_info('Graphics', 'bgame_dim_x'),\\\r\n Game_Donnees.Get_info('Graphics', 'bgame_dim_y')\r\n # On récupère les dimensions du jeu\r\n \r\n self.master.canvas.delete(ALL)\r\n # On efface ce qui est affiché sur le canvas\r\n\r\n self.master.canvas.create_image(0,0,anchor = NW,image = self.field)\r\n # On affiche le background de combat \r\n \r\n x, y = self.localisations['foeSprite']\r\n # On récupère la localisation du sprite\r\n self.master.canvas.create_image(x,\r\n y,\r\n anchor = NW,\r\n image = self.foe.sprite) \r\n #On affiche l'ennemi\r\n \r\n \r\n x, y = eval(self.localisations['foeSprite'].split(',')) \r\n # On récupère la localisation du sprite\r\n self.master.canvas.create_image(x,\r\n y,\r\n anchor = NW,\r\n image = self.marker)\r\n # On affiche le marqueur d'ennemi\r\n # On affiche le combat\r\n \r\n while self.player.PDV != 0 and self.foe.PDV != 0:\r\n print('Tour de commande\\n-----------\\n')\r\n # -------- Commandes joueur et ennemi ----------------\r\n self.player.action = player_timeplay(self.player, self.foe)\r\n # Choix du joueur\r\n print(\"Choix de l'ennemi..\\n\")\r\n self.foe.action = random.choice(['poing', 'soin', 'bouclier'])\r\n time.sleep(2)\r\n # Choix de l'ennemi\r\n\r\n round(self)\r\n # Phase de combat\r\n \r\n def end(self, winner):\r\n \"\"\"\r\n Fonction\r\n --------\r\n Termine le combat.\r\n \r\n Paramètres\r\n ----------\r\n self : Battle.object :: Combat\r\n winner : Character.object :: gagnant\r\n \r\n Valeur retournée\r\n ----------------\r\n None\r\n \"\"\" \r\n \r\n if winner == self.player:\r\n winner = self.player.name\r\n loser = self.foe.name\r\n else:\r\n loser = self.player.name\r\n winner = self.foe.name\r\n \r\n print('{} est vaincu, {} remporte le combat !'.format(loser\\\r\n , winner))\r\n# Classe Combat\r\n\r\ndef round(combat):\r\n\r\n # --------- Détermination du plus rapide -------------\r\n \r\n if combat.player.stats['Vitesse'] > combat.foe.stats['Vitesse']:\r\n # Joueur plus rapide\r\n joueur1 = combat.player\r\n joueur2 = combat.foe\r\n elif combat.player.stats['Vitesse'] < combat.foe.stats['Vitesse']:\r\n # Ennemi plus rapide\r\n joueur2 = combat.player\r\n joueur1 = combat.foe \r\n else:\r\n # Joueur et ennemi ont la même vitesse\r\n rand = random.randint(0,1)\r\n if rand == 0:\r\n joueur1 = combat.player\r\n joueur2 = combat.foe \r\n else:\r\n joueur2 = combat.player\r\n joueur1 = combat.foe \r\n # Choix au hasard\r\n \r\n # ------------ Tour des attaques ---------------------\r\n \r\n tour = action_battle(combat = combat,\r\n personnage = joueur1,\r\n action_joueur = joueur1.action)\r\n # Action du joueur 1\r\n \r\n if tour == 'mort':\r\n print('mort')\r\n \r\n else: \r\n action_battle(combat = combat,\r\n personnage = joueur2,\r\n action_joueur = joueur2.action)\r\n # Action du joueur 2\r\n \r\n \r\n # ------------ Détection de mort ---------------------\r\n \r\n if combat.player.PDV == 0:\r\n # Si le joueur est mort\r\n combat.end(winner = 'foe')\r\n # On arrête le combat\r\n elif combat.foe.PDV == 0:\r\n # Si l'ennemi est mort\r\n combat.end(winner = 'player')\r\n # On arrête le combat\r\n else:\r\n # Si personne n'est mort\r\n pass\r\n# Fonction de tour de jeu\r\n\r\ndef player_timeplay(player, foe):\r\n print('{} : {} PDV\\n{} : {} PDV\\n'.format(player.name, player.PDV,\\\r\n foe.name, foe.PDV))\r\n print('Actions possibles : {}'.format(player.attacks))\r\n \r\n commande = ''\r\n while commande not in player.attacks:\r\n commande = input('Faites votre choix : ')\r\n print()\r\n \r\n return commande\r\n# Fonction d'attente de commande\r\n\r\ndef action_battle(combat, personnage, action_joueur):\r\n \"\"\"\r\n Fonction\r\n --------\r\n Fonction qui réalise la commande du joueur\r\n du combat passé en argument\r\n \r\n Paramètres\r\n ----------\r\n combat : Battle.object\r\n joueur : Character.object :: Joueur effectuant l'action\r\n action_joueur : str :: Nom de l'action\r\n \r\n Valeur_retournée\r\n ----------------\r\n None\r\n \"\"\"\r\n\r\n global liste_commandes\r\n \r\n action = liste_commandes['atk-{}'.format(action_joueur.lower())].split('/')\r\n if personnage == combat.player:\r\n # Si le personnage est le joueur\r\n attaquant = combat.player\r\n defenseur = combat.foe\r\n else:\r\n # Sinon le personnage est l'ennemi\r\n attaquant = combat.foe\r\n defenseur = combat.player\r\n \r\n action[1] = int(action[1])\r\n \r\n print('{} prépare son action.'.format(attaquant.name))\r\n time.sleep(1)\r\n \r\n if action[0] == 'atk':\r\n # Si l'action est une attaque\r\n\r\n print('Il attaque !\\n')\r\n time.sleep(1)\r\n \r\n power = attaquant.Attaque + action[1]\r\n # On calcule la puissance\r\n degats = power - defenseur.Defense\r\n # On calcule les dégâts\r\n \r\n if degats <= 0:\r\n degats = 1\r\n \r\n print('{} subit {} points de dégâts !'.format(defenseur.name, degats))\r\n time.sleep(1)\r\n \r\n defenseur.PDV -= degats\r\n if defenseur.PDV < 0:\r\n defenseur.PDV = 0\r\n \r\n elif action[0] == 'def':\r\n # Si l'action est une défense\r\n \r\n print('Il se prépare à parer un coup !\\n')\r\n time.sleep(1)\r\n \r\n power = attaquant.Special + action[1]\r\n # On calcule la puissance\r\n \r\n print('Sa défense augmente !')\r\n time.sleep(1)\r\n \r\n attaquant.Defense += power\r\n attaquant.Defense_count = 2\r\n \r\n elif action[0] == 'soin':\r\n # Si l'action est un soin\r\n \r\n print('Il se soigne !\\n')\r\n time.sleep(1)\r\n \r\n power = attaquant.Special + action[1]\r\n # On calcule la puissance\r\n \r\n attaquant.PDV += power\r\n if attaquant.PDV > attaquant.stats['PDV']:\r\n power = power - (attaquant.PDV - attaquant.stats['PDV'])\r\n attaquant.PDV = attaquant.stats['PDV']\r\n \r\n # On vérifie que l'attaquant ne dépasse pas son max de vie\r\n \r\n print('Il regagne {} points de vie.'.format(power))\r\n time.sleep(1)\r\n \r\n\r\n \r\n else:\r\n raise Exception(\"ERREUR : action[0] n'est pas d'une valeur attendue\\\r\n \\naction[0] = {} n'est pas dans [atk, def\\\r\n , soin]\".format(action[0]))\r\n \r\n attaquant.Defense_count -= 1\r\n \r\n if defenseur.Defense_count == 0:\r\n defenseur.Defense_count = -1\r\n defenseur.Defense = attaquant.stats['Defense']\r\n # Si l'action est d'un type inconnu\r\n\r\ndef create_combat(master, player, foe, zone):\r\n \"\"\"\r\n Fonction\r\n --------\r\n Fonction qui permet de créer un objet\r\n combat et d'y assigner le joueur et l'ennemi.\r\n \r\n Paramètres\r\n ----------\r\n combat : Battle.object\r\n joueur : Hero.object :: Joueur effectuant l'action\r\n ennemi : Character.object :: Ennemi du joueur\r\n zone : str :: Type de la zone (forest) \r\n \r\n Valeur_retournée\r\n ----------------\r\n None\r\n \"\"\" \r\n \r\n self.master = master\r\n # Fenêtre du jeu\r\n \r\n self.joueur = joueur\r\n self.foe = foe\r\n \r\n try:\r\n self.field = PhotoImage(file = self.sprites[zone + '_field'])\r\n self.marker = PhotoImage(file = self.sprites[zone + '_markerFoe'])\r\n # On crée des pyimages des sprites de terrain\r\n except KeyError:\r\n # Si zone + _field n'existent pas dans le dictionnaire des sprites\r\n raise Exception(\"ERREUR : zone est d'une valeur invalide.\")\r\n except tkinter.TclError:\r\n # Si PhotoImage retourne une erreur\r\n raise Exception(\"ERREUR : Les liens des images de la zone sont invalides\")\r\n # On récupère les liens des sprites\r\n \r\n \r\n \r\n# --- Flot principal - Initialisation ---\r\n\r\nkeys = Game_Donnees.Get_keys(section = 'BActions')\r\nliste_commandes = {}\r\nfor key in keys:\r\n liste_commandes[key] = Game_Donnees.Get_info(section = 'BActions',\r\n key = key)\r\n# On récupère les données des attaques\r\n\r\n# ---------------------------------------\r\n\r\n\r\n# ---- Section Test ----\r\n\r\ndef program_test():\r\n\r\n class Window():\r\n \"Cette classe définit la fenêtre principale du jeu\"\r\n \r\n def __init__(self, dimensions = (400,300), wintitle = 'GameTitle'):\r\n \"\"\"\r\n Fonction initiale\r\n -----------------\r\n Cette fonction crée la fenêtre principale ainsi que le canvas\r\n dont les dimensions sont à entrer en argument. Le titre est \r\n également modifié. Le canvas est automatiquement mis en focus\r\n pour permettre les interactions avec le clavier via les \r\n fonctions bind.\r\n \r\n Paramètres\r\n ----------\r\n dimensions : tuple :: dimensions du canvas de jeu\r\n wintitle : str :: Titre du jeu apparaîssant en haut de la fenêtre\r\n \r\n Valeur retournée\r\n ----------------\r\n None\r\n \"\"\"\r\n \r\n self.root = Tk()\r\n self.root.title(wintitle)\r\n # Fenêtre\r\n \r\n self.canvas = Canvas(self.root,\r\n width = dimensions[0],\r\n height = dimensions[1])\r\n self.canvas.pack()\r\n self.canvas.focus_set()\r\n # Canvas de jeu\r\n \r\n self.controls = []\r\n # Controles\r\n\r\n master = Window() \r\n \r\n player = Game_Donnees.Hero(name = 'Alex', lvl = 3)\r\n foe = Game_Donnees.Foe(name = 'Damien', lvl = 3)\r\n \r\n combat = create_combat(master = master,\r\n player = player,\r\n foe = foe,\r\n zone = 'forest')\r\n \r\n combat.run_battle()\r\n \r\nif '__main__' == __name__:\r\n program_test(combat)\r\n# ----------------------\r\n","sub_path":"Game_Battle.py","file_name":"Game_Battle.py","file_ext":"py","file_size_in_byte":15500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"328027349","text":"import sys\r\nsys.path.append(\"..\") # change environment to see tools\r\nfrom topo_grid import topogrid\r\n\r\nworkspace = r\"\"%(huc) # path to geodatabase type workspace\r\nhuc8 = \"huc8\" # outerwall feature class name\r\nbuffdist = \"50\" # buffer distance\r\ndendrite = \"NHDFlowline\" # dendrite feature class name\r\ndem = r\"\"%(huc) # path to projected and buffered DEM to re-process\r\ncellSize = \"10\" # output cell size\r\nvipPer = \"5\" # threshould of points to keep based on VIP score.\r\nsnapgrid = r\"\" # path to snap grid\r\n\r\ntopogrid(workspace,huc8,buffdist,dendrite,dem,cellSize,vipPer, snapgrid = snapgrid)","sub_path":"examples/topogrid.py","file_name":"topogrid.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"310151139","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\nimport datetime\n\n\n# noinspection PyInterpreter\ndef main():\n\tconsole_prefix = \"$ \"\n\tchannel_name = \"whatsapp\"\n\tdescription = \"Transform exported whatsapp discussions into ready-for-import slack.com threads.\"\n\t\n\tparser = argparse.ArgumentParser(description=description)\n\tparser.add_argument(\"input\", type=argparse.FileType('r'), help=\"Input filename\")\n\tparser.add_argument(\"-c\", \"--channel\", default=channel_name, help=\"Slack.com channel name, default: \"+channel_name)\n\tparser.add_argument(\"-o\", \"--output\", type=argparse.FileType('w'), help=\"Output filename\")\t\n\t# parser.print_help()\n\t\n\targs = parser.parse_args()\n\t\n\t# Print description in case of parse success\n\tprint(\"\\n 🚀 {0}: {1}\\n\".format(os.path.basename(sys.argv[0]), description))\n\n\tinput_file = args.input\n\toutput_file = open(\"Slack Import \"+args.input.name, 'w') if args.output is None else args.output\n\t\n\tprint(\"{0}input filename: '{1}'\".format(console_prefix, input_file.name))\n\tprint(\"{0}output filename: '{1}'\".format(console_prefix, output_file.name))\n\tprint(\"{0}slack channel name: '{1}'\".format(console_prefix, channel_name))\n\t\n\tprint(\"{0}Reading input file...\".format(console_prefix))\n\tinput_lines = input_file.readlines()\n\tusernames_mapping = {}\n\t\n\t# Looping through raw lines to group combine lines\n\toutput_line = None\n\toutput_elements = {}\n\t\n\twith open(output_file.name, 'w') as outfile:\t\n\t\n\t\tfor line in input_lines:\n\t\t\ttry:\n\t\t\t\tdt = datetime.datetime.strptime(line.split('-')[0].strip(), \"%m/%d/%y, %H:%M %p\")\n\t\t\texcept ValueError:\n\t\t\t\t# We cannot find a date, it's a continuation of a line, most probably...\n\t\t\t\tif (\"content\" in output_elements.keys()):\n\t\t\t\t\toutput_elements[\"content\"] += \"\\n\"+line.strip()\n\t\t\t\telse:\n\t\t\t\t\tprint(\"bad line\")\n\t\t\t\t\tprint(line)\n\t\t\telse:\n\t\t\t\tif output_elements.get(\"content\", None) is not None:\n\t\t\t\t\tnew_line = '\"{0}\",\"{1}\",\"{2}\",\"{3}\"'.format(int(output_elements[\"date\"].timestamp()), channel_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toutput_elements[\"username\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toutput_elements[\"content\"].replace('\"', \"'\"))\n\t\t\t\t\t# print(new_line)\n\t\t\t\t\toutfile.write(new_line+\"\\n\")\n\t\t\t\t\toutput_elements = {}\n\t\n\t\t\t\t# We can find a date at start of line, it's a new line\n\t\t\t\toutput_line = line.strip()\n\t\t\t\toutput_elements[\"date\"] = dt\n\t\t\t\t\n\t\t\t\t# Make sure to change all double quotes to standard ones\n\t\t\t\tfor quote in ['\"', '‟', '″', '˝', '“']:\n\t\t\t\t\toutput_line = output_line.replace(quote, '\\\"')\n\t\n\t\t\t\t# Oh, by the way, look for a username. The presence of a username followed by a colon is the only fkag we can use.\n\t\t\t\tinput_username = line.strip().split('-')[1].strip().split(':')[0].strip()\n\t\t\t\tif input_username not in usernames_mapping.keys():\n\t\t\t\t\toutput_username = input(\"\\n{0}Unknown username '{1}'. Enter corresponding Slack.com username (=identical): \".format(console_prefix, input_username))\n\t\t\t\t\tif len(output_username.strip()) > 0:\n\t\t\t\t\t\tusernames_mapping[input_username] = output_username.strip()\n\t\t\t\t\t\n\t\t\t\toutput_username = usernames_mapping.get(input_username, None)\n\t\t\t\tif output_username is not None:\n\t\t\t\t\toutput_elements[\"username\"] = output_username\n\t\t\t\t\toutput_elements[\"content\"] = ':'.join('-'.join(line.strip().split('-')[1:]).strip().split(':')[1:]).strip()\n\n\n\t\t# We need this to get the last line...\t\t\t\n\t\tif output_elements.get(\"content\", None) is not None:\n\t\t\tnew_line = '\"{0}\",\"{1}\",\"{2}\",\"{3}\"'.format(int(output_elements[\"date\"].timestamp()), channel_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\toutput_elements[\"username\"], output_elements[\"content\"].replace('\"', \"'\"))\n\t\t\t# print(new_line)\n\t\t\toutfile.write(new_line+\"\\n\")\n\t\t\toutput_elements = {}\n\n\t\t\t\n\tprint(\"\\n 🌖 {0}Done. Enjoy!\\n\".format(console_prefix))\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"388755931","text":"import pathlib\nimport re\nimport subprocess\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nfrom numpy.random import RandomState\nfrom scipy.stats import dirichlet\nfrom skopt.space import Categorical, Integer, Real\n\nfrom tune.utils import TimeControl\n\n__all__ = [\n \"counts_to_penta\",\n \"run_match\",\n \"parse_experiment_result\",\n \"reduce_ranges\",\n \"elo_to_prob\",\n \"prob_to_elo\",\n]\n\n\ndef elo_to_prob(elo, k=4.0):\n \"\"\"Convert an Elo score (logit space) to a probability.\n\n Parameters\n ----------\n elo : float\n A real-valued Elo score.\n k : float, optional (default=4.0)\n Scale of the logistic distribution.\n\n Returns\n -------\n float\n Win probability\n\n Raises\n ------\n ValueError\n if k <= 0\n\n \"\"\"\n if k <= 0:\n raise ValueError(\"k must be positive\")\n return 1 / (1 + np.power(10, -elo / k))\n\n\ndef prob_to_elo(p, k=4.0):\n \"\"\"Convert a win probability to an Elo score (logit space).\n\n Parameters\n ----------\n p : float\n The win probability of the player.\n k : float, optional (default=4.0)\n Scale of the logistic distribution.\n\n Returns\n -------\n float\n Elo score of the player\n\n Raises\n ------\n ValueError\n if k <= 0\n\n \"\"\"\n if k <= 0:\n raise ValueError(\"k must be positive\")\n return k * np.log10(-p / (p - 1))\n\n\ndef counts_to_penta(\n counts: np.ndarray,\n prior_counts: Optional[np.ndarray] = None,\n n_dirichlet_samples: int = 1000000,\n score_scale: float = 4.0,\n random_state: Union[int, RandomState, None] = None,\n **kwargs,\n) -> Tuple[float, float]:\n \"\"\"Compute mean Elo score and variance of the pentanomial model for a count array.\n\n Parameters\n ----------\n counts : np.ndarray\n Array of counts for WW, WD, WL/DD, LD and LL\n prior_counts : np.ndarray or None, default=None\n Pseudo counts to use for WW, WD, WL/DD, LD and LL in the\n pentanomial model.\n n_dirichlet_samples : int, default = 1 000 000\n Number of samples to draw from the Dirichlet distribution in order to\n estimate the standard error of the score.\n score_scale : float, optional (default=4.0)\n Scale of the logistic distribution used to calculate the score. Has to be a\n positive real number\n random_state : int, RandomState instance or None, optional (default: None)\n The generator used to initialize the centers. If int, random_state is\n the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number\n generator is the RandomState instance used by `np.random`.\n kwargs : dict\n Additional keyword arguments\n Returns\n -------\n tuple (float, float)\n Mean Elo score and corresponding variance\n \"\"\"\n if prior_counts is None:\n prior_counts = np.array([0.14, 0.19, 0.34, 0.19, 0.14]) * 2.5\n elif len(prior_counts) != 5:\n raise ValueError(\"Argument prior_counts should contain 5 elements.\")\n dist = dirichlet(alpha=counts + prior_counts)\n scores = [0.0, 0.25, 0.5, 0.75, 1.0]\n score = prob_to_elo(dist.mean().dot(scores), k=score_scale)\n error = prob_to_elo(\n dist.rvs(n_dirichlet_samples, random_state=random_state).dot(scores),\n k=score_scale,\n ).var()\n return score, error\n\n\ndef parse_experiment_result(\n outstr,\n prior_counts=None,\n n_dirichlet_samples=1000000,\n score_scale=4.0,\n random_state=None,\n **kwargs,\n):\n \"\"\"Parse cutechess-cli result output to extract mean score and error.\n\n Here we use a simple pentanomial model to exploit paired openings.\n We distinguish the outcomes WW, WD, WL/DD, LD and LL and apply the\n following scoring (note, that the optimizer always minimizes the score):\n\n +------+------+-------+-----+-----+\n | WW | WD | WL/DD | LD | LL |\n +======+======+=======+=====+=====+\n | -1.0 | -0.5 | 0.0 | 0.5 | 1.0 |\n +------+------+-------+-----+-----+\n\n Note: It is important that the match output was produced using\n cutechess-cli using paired openings, otherwise the returned score is\n useless.\n\n Parameters\n ----------\n output : string (utf-8)\n Match output of cutechess-cli. It assumes the output was coming from\n a head-to-head match with paired openings.\n prior_counts : list-like float or int, default=None\n Pseudo counts to use for WW, WD, WL/DD, LD and LL in the\n pentanomial model.\n n_dirichlet_samples : int, default = 1 000 000\n Number of samples to draw from the Dirichlet distribution in order to\n estimate the standard error of the score.\n score_scale : float, optional (default=4.0)\n Scale of the logistic distribution used to calculate the score. Has to be a\n positive real number\n random_state : int, RandomState instance or None, optional (default: None)\n The generator used to initialize the centers. If int, random_state is\n the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number\n generator is the RandomState instance used by `np.random`.\n Returns\n -------\n score : float (in [-1, 1])\n Expected (negative) score of the first player (the lower the stronger)\n error : float\n Estimated standard error of the score. Estimated by repeated draws\n from a Dirichlet distribution.\n \"\"\"\n wdl_strings = re.findall(r\"Score of.*:\\s*([0-9]+\\s-\\s[0-9]+\\s-\\s[0-9]+)\", outstr)\n array = np.array(\n [np.array([int(y) for y in re.findall(r\"[0-9]+\", x)]) for x in wdl_strings]\n )\n diffs = np.diff(array, axis=0, prepend=np.array([[0, 0, 0]]))\n\n # Parse order of finished games to be able to compute the correct pentanomial scores\n finished = np.array(\n [int(x) - 1 for x in re.findall(r\"Finished game ([0-9]+)\", outstr)]\n )\n diffs = diffs[np.argsort(finished)]\n\n counts = {\"WW\": 0, \"WD\": 0, \"WL/DD\": 0, \"LD\": 0, \"LL\": 0}\n for i in range(0, len(diffs) - 1, 2):\n match = diffs[i] + diffs[i + 1]\n if match[0] == 2:\n counts[\"WW\"] += 1\n elif match[0] == 1:\n if match[1] == 1:\n counts[\"WL/DD\"] += 1\n else:\n counts[\"WD\"] += 1\n elif match[1] == 1:\n counts[\"LD\"] += 1\n elif match[2] == 2:\n counts[\"WL/DD\"] += 1\n else:\n counts[\"LL\"] += 1\n counts_array = np.array(list(counts.values()))\n return counts_to_penta(\n counts=counts_array,\n prior_counts=prior_counts,\n n_dirichlet_samples=n_dirichlet_samples,\n score_scale=score_scale,\n random_state=random_state,\n **kwargs,\n )\n\n\ndef _construct_engine_conf(\n id,\n engine_npm=None,\n engine_tc=None,\n engine_st=None,\n engine_ponder=False,\n timemargin=None,\n):\n result = [\"-engine\", f\"conf=engine{id}\"]\n if engine_npm is not None:\n result.extend((\"tc=inf\", f\"nodes={engine_npm}\"))\n return result\n if engine_st is not None:\n result.append(f\"st={str(engine_st)}\")\n if timemargin is not None:\n result.append(f\"timemargin={str(timemargin)}\")\n if engine_ponder:\n result.append(\"ponder\")\n return result\n if isinstance(engine_tc, str):\n engine_tc = TimeControl.from_string(engine_tc)\n result.append(f\"tc={str(engine_tc)}\")\n if timemargin is not None:\n result.append(f\"timemargin={str(timemargin)}\")\n if engine_ponder:\n result.append(\"ponder\")\n return result\n\n\ndef run_match(\n rounds=10,\n engine1_tc=None,\n engine2_tc=None,\n engine1_st=None,\n engine2_st=None,\n engine1_npm=None,\n engine2_npm=None,\n engine1_ponder=False,\n engine2_ponder=False,\n timemargin=None,\n opening_file=None,\n adjudicate_draws=False,\n draw_movenumber=1,\n draw_movecount=10,\n draw_score=8,\n adjudicate_resign=False,\n resign_movecount=3,\n resign_score=550,\n adjudicate_tb=False,\n tb_path=None,\n concurrency=1,\n debug_mode=False,\n **kwargs,\n):\n \"\"\"Run a cutechess-cli match of two engines with paired random openings.\n\n Parameters\n ----------\n rounds : int, default=10\n Number of rounds to play in the match (each round consists of 2 games).\n engine1_tc : str or TimeControl object, default=None\n Time control to use for the first engine. If str, it can be a\n non-increment time control like \"10\" (10 seconds) or an increment\n time control like \"5+1.5\" (5 seconds total with 1.5 seconds increment).\n If None, it is assumed that engine1_npm or engine1_st is provided.\n engine2_tc : str or TimeControl object, default=None\n See engine1_tc.\n engine1_st : str or int, default=None\n Time limit in seconds for each move.\n If None, it is assumed that engine1_tc or engine1_npm is provided.\n engine2_st : str or TimeControl object, default=None\n See engine1_tc.\n engine1_npm : str or int, default=None\n Number of nodes per move the engine is allowed to search.\n If None, it is assumed that engine1_tc or engine1_st is provided.\n engine2_npm : str or int, default=None\n See engine1_npm.\n engine1_ponder : bool, default=False\n If True, allow engine1 to ponder.\n engine2_ponder : bool, default=False\n See engine1_ponder.\n timemargin : str or int, default=None\n Allowed number of milliseconds the engines are allowed to go over the time\n limit. If None, the margin is 0.\n opening_file : str, default=None\n Path to the file containing the openings. Can be .epd or .pgn.\n Make sure that the file explicitly has the .epd or .pgn suffix, as it\n is used to detect the format.\n adjudicate_draws : bool, default=False\n Specify, if cutechess-cli is allowed to adjudicate draws, if the\n scores of both engines drop below draw_score for draw_movecount number\n of moves. Only kicks in after draw_movenumber moves have been played.\n draw_movenumber : int, default=1\n Number of moves to play after the opening, before draw adjudication is\n allowed.\n draw_movecount : int, default=10\n Number of moves below the threshold draw_score, without captures and\n pawn moves, before the game is adjudicated as draw.\n draw_score : int, default=8\n Score threshold of the engines in centipawns. If the score of both\n engines drops below this value for draw_movecount consecutive moves,\n and there are no captures and pawn moves, the game is adjudicated as\n draw.\n adjudicate_resign : bool, default=False\n Specify, if cutechess-cli is allowed to adjudicate wins/losses based on\n the engine scores. If one engine’s score drops below -resign_score for\n resign_movecount many moves, the game is considered a loss for this\n engine.\n resign_movecount : int, default=3\n Number of consecutive moves one engine has to output a score below\n the resign_score threshold for the game to be considered a loss for this\n engine.\n resign_score : int, default=550\n Resign score threshold in centipawns. The score of the engine has to\n stay below -resign_score for at least resign_movecount moves for it to\n be adjudicated as a loss.\n adjudicate_tb : bool, default=False\n Allow cutechess-cli to adjudicate games based on Syzygy tablebases.\n If true, tb_path has to be set.\n tb_path : str, default=None\n Path to the folder containing the Syzygy tablebases.\n concurrency : int, default=1\n Number of games to run in parallel. Be careful when running time control\n games, since the engines can negatively impact each other when running\n in parallel.\n debug_mode : bool, default=False\n If True, pass ``-debug`` to cutechess-cli.\n\n Yields\n -------\n out : str\n Results of the cutechess-cli match streamed as str.\n \"\"\"\n string_array = [\"cutechess-cli\"]\n string_array.extend((\"-concurrency\", str(concurrency)))\n\n if (engine1_npm is None and engine1_tc is None and engine1_st is None) or (\n engine2_npm is None and engine2_tc is None and engine2_st is None\n ):\n raise ValueError(\"A valid time control or nodes configuration is required.\")\n string_array.extend(\n _construct_engine_conf(\n id=1,\n engine_npm=engine1_npm,\n engine_tc=engine1_tc,\n engine_st=engine1_st,\n engine_ponder=engine1_ponder,\n timemargin=timemargin,\n )\n )\n string_array.extend(\n _construct_engine_conf(\n id=2,\n engine_npm=engine2_npm,\n engine_tc=engine2_tc,\n engine_st=engine2_st,\n engine_ponder=engine2_ponder,\n timemargin=timemargin,\n )\n )\n\n if opening_file is None:\n raise ValueError(\"Providing an opening file is required.\")\n opening_path = pathlib.Path(opening_file)\n if not opening_path.exists():\n raise FileNotFoundError(\n f\"Opening file the following path was not found: {opening_path}\"\n )\n opening_format = opening_path.suffix\n if opening_format not in {\".epd\", \".pgn\"}:\n raise ValueError(\n \"Unable to determine opening format. \"\n \"Make sure to add .epd or .pgn to your filename.\"\n )\n string_array.extend(\n (\n \"-openings\",\n f\"file={str(opening_path)}\",\n f\"format={opening_format[1:]}\",\n \"order=random\",\n )\n )\n\n if adjudicate_draws:\n string_array.extend(\n (\n \"-draw\",\n f\"movenumber={draw_movenumber}\",\n f\"movecount={draw_movecount}\",\n f\"score={draw_score}\",\n )\n )\n if adjudicate_resign:\n string_array.extend(\n (\"-resign\", f\"movecount={resign_movecount}\", f\"score={resign_score}\")\n )\n if adjudicate_tb:\n if tb_path is None:\n raise ValueError(\"No path to tablebases provided.\")\n tb_path_object = pathlib.Path(tb_path)\n if not tb_path_object.exists():\n raise FileNotFoundError(\n f\"No folder found at the following path: {str(tb_path_object)}\"\n )\n string_array.extend((\"-tb\", str(tb_path_object)))\n\n string_array.extend((\"-rounds\", f\"{rounds}\"))\n string_array.extend((\"-games\", \"2\"))\n string_array.append(\"-repeat\")\n string_array.append(\"-recover\")\n if debug_mode:\n string_array.append(\"-debug\")\n string_array.extend((\"-pgnout\", \"out.pgn\"))\n\n with subprocess.Popen(\n string_array, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True\n ) as popen:\n for line in iter(popen.stdout.readline, \"\"):\n yield line\n\n\ndef reduce_ranges(X, y, noise, space):\n X_new = []\n y_new = []\n noise_new = []\n reduction_needed = False\n for row, yval, nval in zip(X, y, noise):\n include_row = True\n for dim, value in zip(space.dimensions, row):\n if isinstance(dim, Integer) or isinstance(dim, Real):\n lb, ub = dim.bounds\n if value < lb or value > ub:\n include_row = False\n elif isinstance(dim, Categorical):\n if value not in dim.bounds:\n include_row = False\n else:\n raise ValueError(f\"Parameter type {type(dim)} unknown.\")\n if include_row:\n X_new.append(row)\n y_new.append(yval)\n noise_new.append(nval)\n else:\n reduction_needed = True\n return reduction_needed, X_new, y_new, noise_new\n","sub_path":"tune/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":15887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"47250615","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Fermi catalog and source classes.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tarfile\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom astropy.utils.data import download_file\nfrom astropy.units import Quantity\nfrom ..utils.energy import EnergyBounds\nfrom ..datasets import gammapy_extra\nfrom .core import SourceCatalog, SourceCatalogObject\n\n__all__ = [\n 'fetch_fermi_catalog',\n 'fetch_fermi_extended_sources',\n 'SourceCatalog2FHL',\n 'SourceCatalog3FGL',\n 'SourceCatalogObject2FHL',\n 'SourceCatalogObject3FGL',\n]\n\n\ndef _is_galactic(source_class):\n \"\"\"Re-group sources into rough categories.\n\n Categories:\n - 'galactic'\n - 'extra-galactic'\n - 'unknown'\n - 'other'\n\n Source identifications and associations are treated identically,\n i.e. lower-case and upper-case source classes are not distinguished.\n\n References:\n - Table 3 in 3FGL paper: http://adsabs.harvard.edu/abs/2015arXiv150102003T\n - Table 4 in the 1FHL paper: http://adsabs.harvard.edu/abs/2013ApJS..209...34A\n \"\"\"\n source_class = source_class.lower().strip()\n\n gal_classes = ['psr', 'pwn', 'snr', 'spp', 'lbv', 'hmb',\n 'hpsr', 'sfr', 'glc', 'bin', 'nov']\n egal_classes = ['agn', 'agu', 'bzb', 'bzq', 'bll', 'gal', 'rdg', 'fsrq',\n 'css', 'sey', 'sbg', 'nlsy1', 'ssrq', 'bcu']\n\n if source_class in gal_classes:\n return 'galactic'\n elif source_class in egal_classes:\n return 'extra-galactic'\n elif source_class == '':\n return 'unknown'\n else:\n raise ValueError('Unknown source class: {}'.format(source_class))\n\n\ndef fetch_fermi_catalog(catalog, extension=None):\n \"\"\"Fetch Fermi catalog data.\n\n Reference: http://fermi.gsfc.nasa.gov/ssc/data/access/lat/.\n\n The Fermi catalogs contain the following relevant catalog HDUs:\n\n * 3FGL Catalog : LAT 4-year Point Source Catalog\n * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.\n * ``ExtendedSources`` Extended Source Catalog Table.\n * 2FGL Catalog : LAT 2-year Point Source Catalog\n * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.\n * ``ExtendedSources`` Extended Source Catalog Table.\n * 1FGL Catalog : LAT 1-year Point Source Catalog\n * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.\n * 2FHL Catalog : Second Fermi-LAT Catalog of High-Energy Sources\n * ``Count Map`` AIT projection 2D count image\n * ``2FHL Source Catalog`` Main catalog\n * ``Extended Sources`` Extended Source Catalog Table\n * ``ROIs`` Regions of interest\n * 1FHL Catalog : First Fermi-LAT Catalog of Sources above 10 GeV\n * ``LAT_Point_Source_Catalog`` Point Source Catalog Table.\n * ``ExtendedSources`` Extended Source Catalog Table.\n * 2PC Catalog : LAT Second Catalog of Gamma-ray Pulsars\n * ``PULSAR_CATALOG`` Pulsar Catalog Table.\n * ``SPECTRAL`` Table of Pulsar Spectra Parameters.\n * ``OFF_PEAK`` Table for further Spectral and Flux data for the Catalog.\n\n Parameters\n ----------\n catalog : {'3FGL', '2FGL', '1FGL', '1FHL', '2FHL', '2PC'}\n Specifies which catalog to display.\n extension : str\n Specifies which catalog HDU to provide as a table (optional).\n See list of catalog HDUs above.\n\n Returns\n -------\n hdu_list (Default) : `~astropy.io.fits.HDUList`\n Catalog FITS HDU list (for access to full catalog dataset).\n catalog_table : `~astropy.table.Table`\n Catalog table for a selected hdu extension.\n\n Examples\n --------\n >>> from gammapy.catalog import fetch_fermi_catalog\n >>> fetch_fermi_catalog('2FGL')\n [,\n ,\n ,\n ,\n ]\n\n >>> from gammapy.catalog import fetch_fermi_catalog\n >>> fetch_fermi_catalog('2FGL', 'LAT_Point_Source_Catalog')\n \n \"\"\"\n BASE_URL = 'http://fermi.gsfc.nasa.gov/ssc/data/access/lat/'\n\n if catalog == '3FGL':\n url = BASE_URL + '4yr_catalog/gll_psc_v16.fit'\n elif catalog == '2FGL':\n url = BASE_URL + '2yr_catalog/gll_psc_v08.fit'\n elif catalog == '1FGL':\n url = BASE_URL + '1yr_catalog/gll_psc_v03.fit'\n elif catalog == '1FHL':\n url = BASE_URL + '1FHL/gll_psch_v07.fit'\n elif catalog == '2FHL':\n url = 'https://github.com/gammapy/gammapy-extra/raw/master/datasets/catalogs/fermi/gll_psch_v08.fit.gz'\n elif catalog == '2PC':\n url = BASE_URL + '2nd_PSR_catalog/2PC_catalog_v03.fits'\n else:\n ss = 'Invalid catalog: {0}\\n'.format(catalog)\n raise ValueError(ss)\n\n filename = download_file(url, cache=True)\n hdu_list = fits.open(filename)\n\n if extension is None:\n return hdu_list\n\n # TODO: 2FHL doesn't have a 'CLASS1' column, just 'CLASS'\n # It's probably better if we make a `SourceCatalog` class\n # and then sub-class `FermiSourceCatalog` and `Fermi2FHLSourceCatalog`\n # and handle catalog-specific stuff in these classes,\n # trying to provide an as-uniform as possible API to the common catalogs.\n table = Table(hdu_list[extension].data)\n table['IS_GALACTIC'] = [_is_galactic(_) for _ in table['CLASS1']]\n\n return table\n\n\ndef fetch_fermi_extended_sources(catalog):\n \"\"\"Fetch Fermi catalog extended source images.\n\n Reference: http://fermi.gsfc.nasa.gov/ssc/data/access/lat/.\n\n Extended source are available for the following Fermi catalogs:\n\n * 3FGL Catalog : LAT 4-year Point Source Catalog\n * 2FGL Catalog : LAT 2-year Point Source Catalog\n * 1FHL Catalog : First Fermi-LAT Catalog of Sources above 10 GeV\n\n Parameters\n ----------\n catalog : {'3FGL', '2FGL', '1FHL'}\n Specifies which catalog extended sources to return.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n FITS HDU list of FITS ImageHDUs for the extended sources.\n\n Examples\n --------\n >>> from gammapy.catalog import fetch_fermi_extended_sources\n >>> sources = fetch_fermi_extended_sources('2FGL')\n >>> len(sources)\n 12\n \"\"\"\n BASE_URL = 'http://fermi.gsfc.nasa.gov/ssc/data/access/lat/'\n if catalog == '3FGL':\n url = BASE_URL + '4yr_catalog/LAT_extended_sources_v15.tgz'\n elif catalog == '2FGL':\n url = BASE_URL + '2yr_catalog/gll_psc_v07_templates.tgz'\n elif catalog == '1FHL':\n url = BASE_URL + '1FHL/LAT_extended_sources_v12.tar'\n else:\n ss = 'Invalid catalog: {0}\\n'.format(catalog)\n raise ValueError(ss)\n\n filename = download_file(url, cache=True)\n tar = tarfile.open(filename, 'r')\n\n hdu_list = []\n for member in tar.getmembers():\n if member.name.endswith(\".fits\"):\n file = tar.extractfile(member)\n hdu = fits.open(file)[0]\n hdu_list.append(hdu)\n hdu_list = fits.HDUList(hdu_list)\n\n return hdu_list\n\n\nclass SourceCatalogObject3FGL(SourceCatalogObject):\n \"\"\"\n One source from the Fermi-LAT 3FGL catalog.\n \"\"\"\n _ebounds = EnergyBounds(Quantity([100, 300, 1000, 3000, 10000, 100000], 'MeV'))\n _ebounds_suffix = ['100_300', '300_1000', '1000_3000', '3000_10000', '10000_100000'] \n\n def __str__(self):\n \"\"\"Print default summary info string\"\"\"\n return self.summary()\n\n def summary(self):\n \"\"\"Print summary info.\"\"\"\n d = self.data\n\n ss = 'Source: {}\\n'.format(d['Source_Name'])\n ss += '\\n'\n\n ss += 'RA (J2000) : {}\\n'.format(d['RAJ2000'])\n ss += 'Dec (J2000) : {}\\n'.format(d['DEJ2000'])\n ss += 'GLON : {}\\n'.format(d['GLON'])\n ss += 'GLAT : {}\\n'.format(d['GLAT'])\n ss += '\\n'\n\n val, err = d['Energy_Flux100'], d['Unc_Energy_Flux100']\n ss += 'Energy flux (100 MeV - 100 GeV) : {} +- {} erg cm^-2 s^-1\\n'.format(val, err)\n ss += 'Detection significance : {}\\n'.format(d['Signif_Avg'])\n\n return ss\n\n @property\n def spectrum(self):\n raise NotImplementedError\n\n @property\n def flux_points_differential(self):\n \"\"\"\n Get `~gammapy.spectrum.DifferentialFluxPoints` for a 3FGL source\n \"\"\"\n from ..spectrum import DifferentialFluxPoints\n \n energy = self._ebounds.log_centers\n \n nuFnu = self._get_flux_values('nuFnu', 'erg cm-2 s-1')\n diff_flux = (nuFnu * energy ** -2).to('erg-1 cm-2 s-1')\n\n # Get relativ error on integral fluxes\n int_flux_points = self.flux_points_integral\n diff_flux_err_hi = diff_flux * int_flux_points['INT_FLUX_ERR_HI_%'] / 100\n diff_flux_err_lo = diff_flux * int_flux_points['INT_FLUX_ERR_LO_%'] / 100\n\n return DifferentialFluxPoints.from_arrays(energy=energy, diff_flux=diff_flux,\n diff_flux_err_lo=diff_flux_err_lo,\n diff_flux_err_hi=diff_flux_err_hi)\n\n @property\n def flux_points_integral(self):\n \"\"\"\n Get `~gammapy.spectrum.IntegralFluxPoints` for a 3FGL source\n\n Parameters\n ----------\n source : dict\n 3FGL source\n \"\"\"\n from ..spectrum import IntegralFluxPoints\n \n flux = self._get_flux_values()\n flux_err = self._get_flux_values('Unc_Flux')\n \n return IntegralFluxPoints.from_arrays(self._ebounds, flux, flux + flux_err[:, 1],\n flux + flux_err[:, 0])\n\n def _get_flux_values(self, prefix='Flux', unit='cm-2 s-1'):\n if prefix not in ['Flux', 'Unc_Flux', 'nuFnu']:\n raise ValueError(\"Must be one of the following: 'Flux', 'Unc_Flux', 'nuFnu'\")\n\n values = [self.data[prefix + _] for _ in self._ebounds_suffix]\n return Quantity(values, unit)\n\n\n def plot_lightcurve(self, ax=None):\n \"\"\"Plot lightcurve.\n \"\"\"\n from gammapy.time import plot_fermi_3fgl_light_curve\n\n ax = plot_fermi_3fgl_light_curve(self.name, ax=ax)\n return ax\n\n\n def plot_spectrum(self, ax=None):\n \"\"\"Plot spectrum.\n \"\"\"\n import matplotlib.pyplot as plt\n from gammapy.extern.stats import gmean\n from astropy.modeling.models import PowerLaw1D, LogParabola1D, ExponentialCutoffPowerLaw1D\n\n ax = plt.gca() if ax is None else ax\n\n # Only work with indices where we have a valid detection and a lower bound\n flux_bounds = [self.cat_row[\"Unc_\" + self.y_labels[i]] for i in range(0, np.size(self.y_labels))]\n\n valid_indices = []\n\n for i in range(0, len(flux_bounds)):\n if np.size(flux_bounds[i]) == 2 and not np.isnan(flux_bounds[i][0]):\n valid_indices.append(i)\n\n y_vals = np.array([self.cat_row[i] for i in (self.y_labels[j] for j in valid_indices)])\n y_lower = np.array([self.cat_row[\"Unc_\" + i][0] for i in (self.y_labels[j] for j in valid_indices)])\n y_upper = np.array([self.cat_row[\"Unc_\" + i][1] for i in (self.y_labels[j] for j in valid_indices)])\n\n y_lower = y_vals + y_lower\n y_upper = y_vals + y_upper\n\n x_vals = [self.x_cens[i].value for i in valid_indices]\n bin_edges1 = [-(self.x_bins_edges[i] - self.x_cens[i]).value for i in valid_indices]\n bin_edges2 = [(self.x_bins_edges[i + 1] - self.x_cens[i]).value for i in valid_indices]\n\n y_vals = [y_vals[i] / x_vals[i] for i in range(0, np.size(y_vals))]\n y_upper = [y_upper[i] / x_vals[i] for i in range(0, np.size(y_vals))]\n y_lower = [y_lower[i] / x_vals[i] for i in range(0, np.size(y_vals))]\n\n y_cens = np.array([gmean([y_lower[i], y_upper[i]]) for i in range(0, np.size(y_lower))])\n\n y_upper = np.array([y_upper[i] - y_vals[i] for i in range(0, np.size(y_lower))])\n y_lower = np.array([y_vals[i] - y_lower[i] for i in range(0, np.size(y_lower))])\n\n ax.loglog()\n\n fmt = dict(elinewidth=1, linewidth=0, color='black')\n ax.errorbar(x_vals, y_vals, yerr=(y_lower, y_upper), **fmt)\n\n # Place the x-axis uncertainties in the center of the y-axis uncertainties.\n ax.errorbar(x_vals, y_cens, xerr=(bin_edges1, bin_edges2), **fmt)\n\n x_model = np.logspace(np.log10(min(x_vals)), np.log10(max(x_vals)), 25)\n\n if self.spec_type == \"PowerLaw\":\n\n y_model = PowerLaw1D(amplitude=self.flux_density,\n x_0=self.pivot_en,\n alpha=self.spec_index)\n\n elif self.spec_type == \"LogParabola\":\n\n y_model = LogParabola1D(amplitude=self.flux_density,\n x_0=self.pivot_en,\n alpha=self.spec_index,\n beta=self.beta)\n\n elif self.spec_type == \"PLExpCutoff\":\n\n y_model = ExponentialCutoffPowerLaw1D(amplitude=self.flux_density,\n x_0=self.pivot_en,\n alpha=self.spec_index,\n x_cutoff=self.cutoff)\n elif self.spec_type == \"PLSuperExpCutoff\":\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n ax.set_xlabel('Energy (MeV)')\n ax.set_ylabel('Flux (ph/cm^2/s/MeV)')\n ax.plot(x_model, y_model(x_model))\n\n return ax\n\n\nclass SourceCatalogObject2FHL(SourceCatalogObject):\n \"\"\"One source from the Fermi-LAT 2FHL catalog.\n \"\"\"\n\n def __str__(self):\n \"\"\"Print default summary info string\"\"\"\n return self.summary()\n\n def summary(self):\n \"\"\"Print summary info.\"\"\"\n # TODO: can we share code with 3FGL summary funtion?\n d = self.data\n\n ss = 'Source: {}\\n'.format(d['Source_Name'])\n ss += '\\n'\n\n ss += 'RA (J2000) : {}\\n'.format(d['RAJ2000'])\n ss += 'Dec (J2000) : {}\\n'.format(d['DEJ2000'])\n ss += 'GLON : {}\\n'.format(d['GLON'])\n ss += 'GLAT : {}\\n'.format(d['GLAT'])\n ss += '\\n'\n\n # val, err = d['Energy_Flux100'], d['Unc_Energy_Flux100']\n # ss += 'Energy flux (100 MeV - 100 GeV) : {} +- {} erg cm^-2 s^-1\\n'.format(val, err)\n # ss += 'Detection significance : {}\\n'.format(d['Signif_Avg'])\n\n return ss\n\n\nclass SourceCatalog3FGL(SourceCatalog):\n \"\"\"Fermi-LAT 3FGL source catalog.\n \"\"\"\n name = '3fgl'\n description = 'LAT 4-year point source catalog'\n source_object_class = SourceCatalogObject3FGL\n\n def __init__(self, filename=None):\n if not filename:\n filename = gammapy_extra.filename('datasets/catalogs/fermi/gll_psc_v16.fit.gz')\n\n self.hdu_list = fits.open(filename)\n self.extended_sources_table = Table(self.hdu_list['ExtendedSources'].data)\n\n table = Table(self.hdu_list['LAT_Point_Source_Catalog'].data)\n super(SourceCatalog3FGL, self).__init__(table=table)\n\n\nclass SourceCatalog2FHL(SourceCatalog):\n \"\"\"Fermi-LAT 2FHL source catalog.\n \"\"\"\n name = '2fhl'\n description = 'LAT second high-energy source catalog'\n source_object_class = SourceCatalogObject2FHL\n\n def __init__(self, filename=None):\n if not filename:\n filename = gammapy_extra.filename('datasets/catalogs/fermi/gll_psch_v08.fit.gz')\n\n self.hdu_list = fits.open(filename)\n self.count_map_hdu = self.hdu_list['Count Map']\n self.extended_sources_table = Table(self.hdu_list['Extended Sources'].data)\n self.rois = Table(self.hdu_list['ROIs'].data)\n\n table = Table(self.hdu_list['2FHL Source Catalog'].data)\n super(SourceCatalog2FHL, self).__init__(table=table)\n","sub_path":"gammapy/catalog/fermi.py","file_name":"fermi.py","file_ext":"py","file_size_in_byte":15988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91174160","text":"#!/xtal/anaconda3/bin/python\n# -*- coding: utf-8 -*-\n#\n# This script can plot Bluice scan file.\n# Written by Feng Yu.\n# Shanghai Synchrotron Radiation Facility\n# Shanghai Institute of Applied Physics, Chinese Academy of Science \n# Email: yufeng@sinap.ac.cn\n\n__version__ = \"2016.10.17\"\n\nimport sys, getopt\nfrom scipy.optimize import curve_fit\nimport numpy as np\nfrom math import fabs\nimport matplotlib.pyplot as plt\n\nfrom savitzky_golay_filter import savitzky_golay\n\n\ndef readHeader(fileName):\n headerInfo = {}\n fileIn = open(fileName, \"r\")\n list = fileIn.readlines()\n fileIn.close()\n line = list[2]\n headerInfo[\"Motor1\"] = line[13:-1].split(\" \")[0]\n line = list[3]\n headerInfo[\"Motor2\"] = line[13:-1].split(\" \")[0]\n line = list[4]\n headerInfo[\"Detector\"] = line[13:-1]\n return headerInfo\n\ndef readData(fileName):\n X = []\n Y = []\n fileIn = open(fileName, \"r\")\n list = fileIn.readlines()\n fileIn.close()\n for line in list[11:]:\n if line.strip() != \"\":\n x = float(line[0:15])\n y = float(line[22:37])\n X.append(x)\n Y.append(y)\n return np.array(X), np.array(Y)\n\ndef differentiate(X, Y):\n outX = []\n outY = []\n for i in range(1, len(X)-1):\n outX.append(X[i])\n itemY = (Y[i+1]-Y[i-1])/(X[i+1]-X[i-1])\n outY.append(itemY)\n return outX, outY\n\ndef gaussian(x, A, xc, w, y0): \n # y = y0+A*sqrt(2/PI)/w*exp(-2*((x-xc)/w)^2)\n return y0 + A*np.sqrt(2/np.pi)/w*np.exp(-2*np.square((x-xc)/w)) \n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 2:\n print(\"Usage: fitscan [-d] scanfile\")\n sys.exit()\n \n isDifferentiate = False\n isSmooth = False\n opts, args = getopt.getopt(sys.argv[1:], \"ds\")\n for op, value in opts:\n if op == \"-d\":\n isDifferentiate = True\n if op == '-s':\n isSmooth = True\n\n\n fileName = args[0]\n headerInfo = readHeader(fileName)\n X, Y = readData(fileName)\n\n if isSmooth == True:\n Y = savitzky_golay(Y, 21, 5)\n\n # Plot experment curve\n fig, ax1 = plt.subplots()\n ax1.plot(X,Y, \"bo-\", label=headerInfo[\"Motor1\"])\n ax1.spines['top'].set_visible(True)\n ax1.xaxis.set_ticks_position('bottom')\n ax1.yaxis.set_ticks_position('left')\n \n if isDifferentiate == True:\n X, Y = differentiate(X, Y)\n # Plot differentiate curve\n ax2 = ax1.twinx()\n ax2.plot(X,Y, \"g^-\", label=\"Differentiate\")\n ax2.yaxis.set_ticks_position('right')\n\n \n startX = X[0]\n endX = X[-1]\n\n # Set initial A and xc value\n if Y[np.argmax(np.abs(Y))] > 0:\n A = 1\n else:\n \tA = -1\n xc = X[np.argmax(np.abs(Y))]\n \n avgY = np.average(Y)\n \n # least-squares with trf method\n try:\n popt, pcov = curve_fit(gaussian, X, Y, method='trf', p0=(A, xc, 1, 0))\n except RuntimeError:\n print(\"The least-squares (trf) minimization fails!\")\n noLM1 = True\n else:\n noLM1 = False\n A1 = popt[0]\n xc1 = popt[1]\n w1 = popt[2]\n y01 = popt[3]\n SStot1 = np.sum(np.square(Y-avgY))\n SSres1 = np.sum(np.square(Y-gaussian(X, A1, xc1, w1, y01)))\n Rsquare1 = 1 - SSres1/SStot1\n\n \n # least-squares with dogbox method\n try:\n popt, pcov = curve_fit(gaussian, X, Y, method='dogbox', p0=(A, xc, 1, 0))\n except RuntimeError:\n print(\"The least-squares (dogbox) minimization fails!\")\n noLM2 = True\n else:\n noLM2 = False\n A2 = popt[0]\n xc2 = popt[1]\n w2 = popt[2]\n y02 = popt[3]\n SStot2 = np.sum(np.square(Y-avgY))\n SSres2 = np.sum(np.square(Y-gaussian(X, A2, xc2, w2, y02)))\n Rsquare2 = 1 - SSres2/SStot2\n \n # Set final value base on Rsquare\n if (noLM2 == True and noLM1 ==False) or (noLM1 == False and Rsquare1 >= Rsquare2):\n A = A1\n xc = xc1\n w = w1\n y0 = y01\n Rsquare = Rsquare1\n elif (noLM1 == True and noLM2 == False) or (noLM2 == False and Rsquare2 > Rsquare1):\n A = A2\n xc = xc2\n w = w2\n y0 = y02\n Rsquare = Rsquare2\n \n\n if (noLM1 == False) or (noLM2 == False):\n # Output result and plot fit curve\n print(\"A = \" + str(A))\n print(\"xc = \" + str(xc))\n print(\"w = \" + str(w))\n print(\"y0 = \" + str(y0))\n print(\"R2 = \" + str(round(Rsquare, 2)))\n print(\"FWHM = \" + str(round(fabs(w)*1000, 2)) + \" μm\") \n XFit = np.linspace(startX, endX, 500, endpoint=True)\n YFit = gaussian(XFit, A, xc, w, y0)\n if isDifferentiate == False:\n ax1.plot(XFit, YFit, \"r-\", label=\"Fit\")\n else:\n ax2.plot(XFit, YFit, \"r-\", label=\"Fit\")\n else:\n \tprint(\"The gaussian fit failed!\")\n \n # Draw legend\n if isDifferentiate == False:\n plt.legend(loc='upper right', frameon=False)\n else:\n \tplt.legend(loc='lower right', frameon=False)\n \n\n\n plt.show()\n \n\n\n\n","sub_path":"beamlineepics/bl17u1/fitscan/fitscan.py","file_name":"fitscan.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230187409","text":"from Player import Player\r\nfrom Deck import Deck\r\nfrom BlackJackMechanics import BlackJackMechanics\r\n\r\n\r\nclass Driver():\r\n player = Player()\r\n dealer = Player(\"Dealer\")\r\n deck = Deck(1)\r\n bj = BlackJackMechanics()\r\n\r\n print(\"\\nBEGINING ROUND\")\r\n play = True\r\n while play:\r\n bj.start_game(player, dealer, deck)\r\n player_busted = bj.players_decision(player, dealer, deck)\r\n if player_busted == False:\r\n dealer_busted = bj.dealers_decision(player, dealer, deck)\r\n\r\n if dealer_busted == False:\r\n bj.results(player, dealer)\r\n\r\n play = bj.play_again()\r\n if play:\r\n print(\"\\n\\nNEW ROUND\")\r\n\r\n\r\n\r\n\r\n","sub_path":"BlackJack/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454136577","text":"import numpy as np\nimport PTC_Samurai as sam\nimport matplotlib.pyplot as plt\nimport sys\n\ndrkframe = sys.argv[1]\nfolder = sys.argv[2]\n\ndarkframes = np.load(drkframe)\n# darkframes = np.load(\"/Users/RajSeehra/University/Masters/Semester 2/Git Upload Folder/Return1/Raj/PTC bits/darkframes.npy\")\n\ndirectory = sam.get_file_list(folder)\n# directory = sam.get_file_list(\"/Users/RajSeehra/University/Masters/Semester 2/test folder\")\n\n# This file is generated using the Lowest_std_region.py on your brightest file.\ncrop_box = np.load(\"crop_box.npy\")\n# crop_box = np.load(\"/Users/RajSeehra/University/Masters/Semester 2/Git Upload Folder/Return1/Raj/PTC bits/crop_box.npy\")\n\n\ndef ptc_data(directory, darkframes, crop_box):\n mean_values = np.zeros((100, 100, len(directory))) # empty list which will collect the data in order: mean, std we want\n std_values = np.zeros((100, 100, len(directory)))\n x1 = int(crop_box[0,0])\n x2 = int(crop_box[0,1])\n y1 = int(crop_box[1,0])\n y2 = int(crop_box[1,1])\n\n for i in range (0, len(directory)):\n array = sam.loadtiffs(directory[i])\n for z in range (0, array.shape[2]):\n array[:, :, z] = np.subtract(array[:, :, z], darkframes)\n array = array[y1:y2, x1:x2, :] # crop the stack to the stable window.\n mean_values[:, :, i] = np.mean(array, 2) # mean for the file\n std_values[:, :, i] = np.std(array, 2) # std for the file\n\n return mean_values, std_values\n\n\nmean_data, std_data = ptc_data(directory, darkframes, crop_box)\n\nnp.save(\"mean_Pixel_PTCdata.npy\", mean_data)\nnp.save(\"std_Pixel_PTCdata.npy\", std_data)","sub_path":"Return1/Raj/PTC bits/subtractor_pixels.py","file_name":"subtractor_pixels.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5035669","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n result = []\n lookup = {}\n for j, y in enumerate(nums):\n x = target - y\n i = lookup.get(x)\n if i is not None:\n result.append(i)\n result.append(j)\n break\n lookup[y] = j\n return result\n","sub_path":"src/p0001/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104379807","text":"import unittest\n\nfrom modi.module.input_module.env import Env\nfrom queue import Queue\n\n\nclass TestEnv(unittest.TestCase):\n \"\"\"Tests for 'Env' class.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n self.send_q = Queue()\n mock_args = (-1, -1, self.send_q)\n self.env = Env(*mock_args)\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n del self.env\n\n def test_get_temperature(self):\n \"\"\"Test get_temperature method.\"\"\"\n _ = self.env.temperature\n self.assertEqual(\n self.send_q.get(),\n Env.request_property(-1, Env.PropertyType.TEMPERATURE)\n )\n\n def test_get_humidity(self):\n \"\"\"Test get_humidity method.\"\"\"\n _ = self.env.humidity\n self.assertEqual(\n self.send_q.get(),\n Env.request_property(-1, Env.PropertyType.HUMIDITY)\n )\n\n def test_get_brightness(self):\n \"\"\"Test get_brightness method.\"\"\"\n _ = self.env.brightness\n self.assertEqual(\n self.send_q.get(),\n Env.request_property(-1, Env.PropertyType.BRIGHTNESS)\n )\n\n def test_get_red(self):\n \"\"\"Test get_red method.\"\"\"\n _ = self.env.red\n self.assertEqual(\n self.send_q.get(),\n Env.request_property(-1, Env.PropertyType.RED)\n )\n\n def test_get_green(self):\n \"\"\"Test get_green method.\"\"\"\n _ = self.env.green\n self.assertEqual(\n self.send_q.get(),\n Env.request_property(-1, Env.PropertyType.GREEN)\n )\n\n def test_get_blue(self):\n \"\"\"Test get_blue method.\"\"\"\n _ = self.env.blue\n self.assertEqual(\n self.send_q.get(),\n Env.request_property(-1, Env.PropertyType.BLUE)\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/module/input_module_tests/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"328336739","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom user.models import User\n\nclass Post(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n title = models.CharField(max_length=144)\n \n content = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n \n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, blank=True, on_delete=models.CASCADE)\n \n content = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n\n \n def __str__(self):\n return '%s -%s'%(self.user,self.content)","sub_path":"backend/board/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620458165","text":"import torch\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\n\n\nclass MLP(nn.Module):\n def __init__(self, dims, multiplxer=4):\n super(MLP, self).__init__()\n hidden = int(dims * multiplxer)\n\n self.out = nn.Sequential(\n nn.Linear(dims, hidden),\n nn.GELU(),\n nn.Linear(hidden, dims)\n )\n\n def forward(self, x):\n return self.out(x)\n\n\nclass MixerLayer(nn.Module):\n def __init__(self, patch_size, hidden_dim):\n super(MixerLayer, self).__init__()\n seq = patch_size\n dims = hidden_dim\n # LayerNorm1\n self.layer_norm1 = nn.LayerNorm(dims)\n # mlp1\n self.mlp1 = MLP(seq, multiplxer=0.5)\n # LayerNorm2\n self.layer_norm2 = nn.LayerNorm(dims)\n # mlp2\n self.mlp2 = MLP(dims)\n\n def forward(self, x):\n out = self.layer_norm1(x).transpose(1, 2)\n out = self.mlp1(out).transpose(1, 2)\n out += x\n out2 = self.layer_norm2(out)\n out2 = self.mlp2(out2)\n out2 += out\n return out2\n\n\nclass MLPMixer(nn.Module):\n def __init__(self, patch_size, hidden_dim, depth):\n super(MLPMixer, self).__init__()\n assert 28 % patch_size == 0, 'image_size must be divisible by patch_size'\n assert depth > 1, 'depth must be larger than 1'\n # 图片大小\n in_dims = 28\n # 维度\n dims = hidden_dim\n # 深度\n N = depth\n # 目标类别数\n n_classes = 10\n self.embedding = nn.Linear(in_dims, dims)\n self.layers = nn.ModuleList()\n for _ in range(N):\n self.layers.append(MixerLayer(in_dims, dims))\n self.gap = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(dims, n_classes)\n self.dims = dims\n\n def forward(self, x):\n out = self.embedding(x)\n out = out.permute(0, 2, 3, 1).view(x.size(0), -1, self.dims)\n for layer in self.layers:\n out = layer(out)\n out = out.mean(dim=1)\n out = self.fc(out)\n return out\n# 定义训练函数\n\n\ndef train(model, train_loader, optimizer, n_epochs, criterion):\n model.train()\n for epoch in range(n_epochs):\n for batch_idx, (data, target) in enumerate(train_loader):\n batch_size_train = data.shape[0]\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n pre_out = model(data)\n targ_out = torch.nn.functional.one_hot(target, num_classes=10)\n targ_out = targ_out.view((batch_size_train, 10)).float()\n loss = criterion(pre_out, targ_out)\n loss.backward()\n optimizer.step()\n if batch_idx % 100 == 0:\n print('Train Epoch: {}/{} [{}/{}]\\tLoss: {:.6f}'.format(\n epoch, n_epochs, batch_idx * len(data), len(train_loader.dataset), loss.item()))\n\n# 定义测试函数\n\n\ndef test(model, test_loader, criterion):\n model.eval()\n test_loss = 0\n num_correct = 0\n total = 0\n with torch.no_grad():\n for data, target in test_loader:\n batch_size_test = data.shape[0]\n data, target = data.to(device), target.to(device)\n pre_out = model(data)\n targ_out = torch.nn.functional.one_hot(target, num_classes=10)\n targ_out = targ_out.view((batch_size_test, 10)).float()\n test_loss += criterion(pre_out, targ_out) # 将一批的损失相加\n t = pre_out.argmax(dim=1)\n num_correct += sum(t == target)\n total += batch_size_test\n # 准确率\n accuracy = num_correct/total\n # 平均损失\n test_loss /= len(test_loader.dataset)\n print(\"Test set: Average loss: {:.4f}\\t Acc {:.2f}\".format(\n test_loss, accuracy))\n\n\nif __name__ == '__main__':\n n_epochs = 5\n batch_size = 128\n learning_rate = 0.001\n\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])\n\n trainset = MNIST(root='./data', train=True,\n download=True, transform=transform)\n train_loader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=True, num_workers=2)\n\n testset = MNIST(root='./data', train=False,\n download=True, transform=transform)\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=batch_size, shuffle=False, num_workers=2)\n\n # device = torch.device(\"cpu\")\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n #n = (28 * 28) // 4 ** 2\n model = MLPMixer(patch_size=4, hidden_dim=14, depth=12)\n model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n mse = nn.MSELoss()\n\n train(model, train_loader, optimizer, n_epochs, mse)\n\n test(model, test_loader, mse)\n","sub_path":"LAB2_for_student/src2/MLP_Mixer.py","file_name":"MLP_Mixer.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"236123091","text":"# Imports from external libraries\nimport sqlite3\n# Imports from internal libraries\nimport configs\n\n\nclass IndexDB:\n def __init__(self, path):\n self.db_path = path\n\n def get_connection(self):\n connection = sqlite3.connect(self.db_path)\n return connection\n\n def table_exists(self, name):\n connection = sqlite3.connect(self.db_path)\n cursor = connection.cursor()\n q = \"\"\"SELECT name FROM sqlite_master\n WHERE type= 'table' AND\n name = ?\n ORDER BY name;\n \"\"\"\n cursor.execute(q, [name])\n table = cursor.fetchone()\n connection.close()\n\n if not table:\n return False\n if name in table:\n return True\n else:\n return False\n\n def init_database(self):\n connection = sqlite3.connect(self.db_path)\n cursor = connection.cursor()\n\n q_IndexWord = \"\"\"\n CREATE TABLE IndexWord (\n word TEXT PRIMARY KEY\n );\n \"\"\"\n\n q_Posting = \"\"\"\n CREATE TABLE Posting (\n word TEXT NOT NULL,\n documentName TEXT NOT NULL,\n frequency INTEGER NOT NULL,\n indexes TEXT NOT NULL,\n PRIMARY KEY(word, documentName),\n FOREIGN KEY (word) REFERENCES IndexWord(word)\n );\n \"\"\"\n\n if not self.table_exists(\"IndexWord\"):\n cursor.execute(q_IndexWord)\n if not self.table_exists(\"Posting\"):\n cursor.execute(q_Posting)\n connection.commit()\n connection.close()\n\n return self\n\n def drop_all_tables(self):\n connection = sqlite3.connect(self.db_path)\n cursor = connection.cursor()\n q_drop_index_word = \"\"\"DROP TABLE IF EXISTS IndexWord\"\"\"\n q_drop_posting = \"\"\"DROP TABLE IF EXISTS Posting\"\"\"\n\n cursor.execute(q_drop_index_word)\n cursor.execute(q_drop_posting)\n\n connection.commit()\n connection.close()\n\n return self\n\n def reset_databse(self):\n self.drop_all_tables().init_database()\n return self\n\n\nindex_database = IndexDB(configs.DB_PATH)\n\nif __name__ == \"__main__\":\n index_database.reset_databse()\n connection = sqlite3.connect(index_database.db_path)\n c = connection.cursor()\n c.execute('''\n INSERT INTO IndexWord VALUES \n ('Spar'),\n ('Mercator'), \n ('Tuš');\n ''')\n\n c.execute('''\n INSERT INTO Posting VALUES \n ('Spar', 'spar.si/info.html', 1, '92'),\n ('Mercator', 'mercator.si/prodaja.html', 3, '4,12,55'), \n ('Mercator', 'tus.si/index.html', 1, '18'),\n ('Tuš', 'mercator.si/prodaja.html', 1, '42');\n ''')\n\n connection.commit()\n connection.close()\n","sub_path":"implementation_indexing/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503732114","text":"# -*- coding: utf-8 -*-\n\n\nfrom openerp.osv import fields, osv\n\nclass visitas(osv.osv):\n _name = 'visitas.visitas'\n _columns = {\n 'nombre_id' : fields.many2one('res.partener','Nombre'),\n 'sexo': fields.selection([('h','hombre'),('m','mujer')],'Sexo'),\n 'edad': fields.integer('Edad'),\n 'altura': fields.float('Altura',digits=(3,1)),\n 'fotografia': fields.related('nombre_id','image',type='binary',relation='res.partner',string='Fotografia'),\n 'entrada': fields.date('Entrada'),\n 'salida': fields.date('Salida')\n\n }","sub_path":"practica7/visitas.py","file_name":"visitas.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214194777","text":"from Crawler.goodsParser import GoodsParser\n\ngoods = GoodsParser()\nwith open('0.html', 'r', encoding='utf8') as f:\n html = f.read()\ngoods.parser_goods(html, 'the_asin', '')\ncount = goods._get_review_count(html_code=html)\ncode = goods._get_review_rating(html_code=html)\nprint(count)\nprint(code)","sub_path":"rw_count_and_rrg_test/rrg_test.py","file_name":"rrg_test.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327498961","text":"import os\nimport subprocess\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nimport requests\nimport json\nimport time\nfrom typing import List, Tuple\n\n\ndef main():\n fail_limit, sleep_time = get_runtime_vars()\n results = [0] * int(fail_limit)\n while True:\n return_code = check_stream()\n results = process_results(results, return_code)\n\n time.sleep(sleep_time)\n\n\ndef get_runtime_vars() -> Tuple[int, int]:\n fail_limit = int(os.getenv(\"AMOUNT_OF_FAILS_BEFORE_NOTIFICATION\"))\n if fail_limit == None:\n fail_limit = 1\n\n sleep_time = int(os.getenv(\"SLEEP_TIME\"))\n if sleep_time == None:\n sleep_time = 5\n\n return fail_limit, sleep_time\n\n\ndef check_stream():\n # path of this script\n script_path = Path(os.path.abspath(__file__))\n script_folder_path = script_path.parent\n\n check_stream_script = f'{script_folder_path.joinpath((\"check_stream.sh\"))} -y {os.getenv(\"YOUTUBE_URL\")}'\n\n encode_process = subprocess.Popen(\n check_stream_script,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True\n )\n\n stdout, stderr = encode_process.communicate()\n\n return_code = encode_process.returncode\n\n return return_code\n\n\ndef process_results(results: List[int], return_code: int) -> List[int]:\n results.append(return_code)\n results = results[1:]\n\n time_tag = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if all(results):\n print(f\"[{time_tag}] [ERROR] Stream is down!!!!\")\n res = post_message_to_slack(f\"[{time_tag}] Stream is down!\")\n if not res[\"ok\"]:\n print(\" FAILED TO SEND TO SLACK!!!!\")\n print(\" REASON: \", res[\"error\"])\n else:\n print(\" SENT TO SLACK!\")\n elif any(results) and results[-1]:\n fails = len(results) - results.count(0)\n print(\n f\"[{time_tag}] [WARNING] {fails}/{len(results)} of recent checks failed.\"\n )\n else:\n print(f\"[{time_tag}] [OK] Stream is still up\")\n\n return results\n\n\ndef post_message_to_slack(text: str, blocks: List = None):\n return requests.post('https://slack.com/api/chat.postMessage', {\n 'token': os.getenv(\"SLACK_TOKEN\"),\n 'channel': os.getenv(\"SLACK_CHANNEL\"),\n 'text': text,\n 'icon_url': os.getenv(\"SLACK_ICON_URL\"),\n 'username': os.getenv(\"SLACK_USER_NAME\"),\n 'blocks': json.dumps(blocks) if blocks else None\n }).json()\n\n\nif __name__ == \"__main__\":\n load_dotenv()\n for var in [\"YOUTUBE_URL\", \"SLACK_TOKEN\", \"SLACK_CHANNEL\", \"SLACK_USER_NAME\"]:\n if os.getenv(var) == None:\n print(f\"env var {var} not set\")\n exit()\n main()\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"92193178","text":"from keras.models import Sequential\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Convolution1D\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dropout\r\n\r\nimport mysql\r\nimport mysql.connector\r\nimport re\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nimport numpy as np\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem.porter import PorterStemmer\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import confusion_matrix\r\n# define documents\r\nfrom itertools import chain\r\n#import time\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import cohen_kappa_score\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.manifold import TSNE\r\n\r\n#import plotly.express as go\r\n#import plotly.plotly as py\r\n\r\nimport chart_studio.plotly as py\r\nimport plotly.graph_objects as go\r\n\r\n\r\ncnx = mysql.connector.connect(user='root', password='',\r\n host='127.0.0.1',\r\n database='new_schema')\r\nprint(cnx)\r\ncursor = cnx.cursor()\r\n\r\nqueryString = \"SELECT * FROM new_table3 ORDER BY RAND() LIMIT 0,4000\"\r\ncursor.execute(queryString)\r\nfetchrows=4000\r\nrows = cursor.fetchmany(fetchrows)\r\nmessages=[]\r\nlabels=[]\r\nfor row in rows:\r\n messages.append(str(row[1]))\r\n labels.append(int(row[2]))\r\n \r\n#messages=['hi.this is test','welcome bro test','aweomse dance',\"who was that?\"]\r\nmessages1=[]\r\n\r\nfor i in messages:\r\n tokens = word_tokenize(i)\r\n #print(tokens)\r\n \r\n \r\n porter = PorterStemmer()\r\n stemmed = [porter.stem(word) for word in tokens]\r\n tokenized_reports = [word_tokenize(report) for report in stemmed]\r\n # View tokenized_reports\r\n #print(tokenized_reports)\r\n \r\n regex = re.compile('[%s]' % re.escape(string.punctuation)) #see documentation here: http://docs.python.org/2/library/string.html\r\n \r\n tokenized_reports_no_punctuation = []\r\n \r\n for review in tokenized_reports:\r\n \r\n new_review = []\r\n for token in review: \r\n new_token = regex.sub(u'', token)\r\n if not new_token == u'':\r\n new_review.append(new_token)\r\n \r\n tokenized_reports_no_punctuation.append(new_review)\r\n \r\n #print(tokenized_reports_no_punctuation)\r\n \r\n tokenized_reports_no_stopwords = []\r\n for report in tokenized_reports_no_punctuation:\r\n new_term_vector = []\r\n for word in report:\r\n if not word in stopwords.words('english'):\r\n new_term_vector.append(word)\r\n tokenized_reports_no_stopwords.append(new_term_vector)\r\n \r\n #print(tokenized_reports_no_stopwords)\r\n \r\n\r\n tokenized_reports_no_stopwords1 = [x for x in tokenized_reports_no_stopwords if x != []]\r\n #print(tokenized_reports_no_stopwords1)\r\n #print(list(chain.from_iterable(tokenized_reports_no_stopwords1)))\r\n tokenized_reports_no_stopwords2=list(chain.from_iterable(tokenized_reports_no_stopwords1))\r\n messages1.append(tokenized_reports_no_stopwords2)\r\n #print(messages1)\r\n \r\nmessages2=[]\r\nfor k in messages1:\r\n #print(k)\r\n t=\" \".join(str(x) for x in k)\r\n messages2.append(t)\r\nt = Tokenizer()\r\nt.fit_on_texts(messages2)\r\n#print(t.word_counts)\r\n#print(t.document_count)\r\n#print(t.word_index)\r\n#print(t.word_docs)\r\nvocab_size = len(t.word_index) + 1\r\nprint(vocab_size)\r\nprint(\"This is vocabulary Size\")\r\n\r\n# integer encode the documents\r\nencoded_docs = t.texts_to_sequences(messages2)\r\n#print(encoded_docs)\r\n\r\n# pad documents to a max length of 300 words\r\nmax_length = 300\r\npadded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\r\n#print(padded_docs)\r\n#print(padded_docs.shape)\r\n \r\nembeddings_index = dict()\r\nf = open('C:\\glove.6b\\glove.6B.300d.txt', encoding=\"utf8\")\r\nfor line in f:\r\n values = line.split()\r\n word = values[0]\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\nf.close()\r\nprint('Loaded %s word vectors.' % len(embeddings_index))\r\n\r\nembedding_matrix = np.zeros((vocab_size, 300))\r\n#print(embedding_matrix)\r\n#print(embedding_matrix.shape)\r\n# \r\nfor word, i in t.word_index.items():\r\n# \r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n embedding_matrix[i] = embedding_vector\r\n# \r\n#print(embedding_matrix)\r\n\r\nsize=int(0.8*fetchrows)\r\nsize1=int(0.2*fetchrows)\r\nprint(size)\r\nprint(size1)\r\n\r\nmodel = Sequential()\r\ne = Embedding(input_dim=vocab_size, output_dim=300, input_length=max_length, weights=[embedding_matrix], trainable=False)\r\nmodel.add(e)\r\n\r\nmodel.add(Convolution1D(128, 3, padding='same'))\r\nmodel.add(Convolution1D(64, 3, padding='same'))\r\nmodel.add(Convolution1D(32, 3, padding='same'))\r\n\r\nmodel.add(Flatten())\r\n\r\nmodel.add(Dense(300,activation='sigmoid'))\r\nmodel.add(Dropout(0.4))\r\n\r\nmodel.add(Dense(300,activation='sigmoid'))\r\nmodel.add(Dropout(0.4))\r\n\r\nmodel.add(Dense(300,activation='sigmoid'))\r\nmodel.add(Dropout(0.4))\r\n\r\nmodel.add(Dense(1,activation='sigmoid'))\r\n\r\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nprint(model.summary())\r\n\r\nhistory = model.fit(x=padded_docs[:size], y=labels[:size], validation_split=0.33, batch_size = 15, epochs = 100)\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('Model Accuracy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.legend(['train', 'test'], loc='lower right')\r\nplt.show()\r\n \r\nloss, accuracy = model.evaluate(padded_docs[:size], labels[:size], verbose=0)\r\nprint('Training Accuracy: %f' % (accuracy*100))\r\nprint('Training Loss: %f' % loss)\r\nloss1, accuracy1 = model.evaluate(padded_docs[-size1:], labels[-size1:], verbose=0)\r\nprint('Testing Accuracy : %f' % (accuracy1*100))\r\nprint('Testing Loss: %f' % loss1)\r\n\r\ny_predict = model.predict(padded_docs[-size1:], verbose=0)\r\nyhat_classes = model.predict_classes(padded_docs[-size1:], verbose=0)\r\n\r\n# reduce to 1d array\r\nyhat_probs = y_predict[:, 0]\r\nprint(yhat_probs)\r\nyhat_classes = yhat_classes[:, 0]\r\nprint(\"yhat_classes\")\r\nprint(yhat_classes)\r\n\r\nf=labels[-size1:]\r\nprint(\"labels [-size1:]\")\r\nprint(f)\r\n\r\n#*****************************************\r\nacc = history.history['acc']\r\nval_acc = history.history['val_acc']\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\n\r\n#plt.plot(history.history['acc'])\r\n#plt.plot(history.history['val_acc'])\r\n#plt.plot(history.history['loss'])\r\n#plt.plot(history.history['val_loss'])\r\nx = range(1, len(acc) + 1)\r\n\r\nplt.figure(figsize=(12, 5))\r\nplt.subplot(1, 2, 1)\r\nplt.plot(x, acc, 'b', label='Training acc')\r\nplt.plot(x, val_acc, 'r', label='Validation acc')\r\nplt.title('Training and Validation accuracy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epochs')\r\nplt.legend()\r\nplt.subplot(1, 2, 2)\r\nplt.plot(x, loss, 'b', label='Training loss')\r\nplt.plot(x, val_loss, 'r', label='Validation loss')\r\nplt.title('Training and Validation loss')\r\nplt.ylabel('Loss')\r\nplt.xlabel('Epochs')\r\nplt.legend()\r\n\r\n#************************************\r\n\r\n# accuracy: (tp + tn) / (p + n)\r\naccuracy = accuracy_score(labels[-size1:], yhat_classes)\r\nprint('Accuracy: %f' % accuracy)\r\n\r\n# precision tp / (tp + fp)\r\nprecision = precision_score(yhat_classes, labels[-size1:])\r\nprint('Precision: %f' % precision)\r\n\r\n# recall: tp / (tp + fn)\r\nrecall = recall_score(yhat_classes, labels[-size1:])\r\nprint('Recall: %f' % recall)\r\n\r\n# f1: 2 tp / (2 tp + fp + fn)\r\nf1 = f1_score(yhat_classes, labels[-size1:])\r\nprint('F1 score: %f' % f1)\r\n\r\n# kappa\r\nkappa = cohen_kappa_score(labels[-size1:], yhat_classes)\r\nprint('Cohens Kappa: %f' % kappa)\r\n\r\n# ROC AUC\r\nauc = roc_auc_score(labels[-size1:], yhat_probs)\r\nprint('ROC AUC: %f' % auc)\r\n\r\n# confusion matrix\r\nmatrix = confusion_matrix(yhat_classes, labels[-size1:])\r\nprint(matrix)\r\n\r\n\r\n\r\n\r\n","sub_path":"Phishing_CNN - Stem.py","file_name":"Phishing_CNN - Stem.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144056816","text":"import socket\nimport threading\n\nclass JaeServer:\n # A server that general purpose\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.thread_func = None\n\n def set_function_thread(self, t_func):\n self.thread_func = t_func\n\n def run(self):\n self.sock.listen()\n while True:\n client, addr = self.sock.accept()\n print(\"Client Connected\")\n print(client)\n print(addr)\n threading.Thread(target=self.thread_func, args=(client, addr)).start()","sub_path":"jae_server/jae_server.py","file_name":"jae_server.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427703153","text":"from mininet.net import Mininet\nfrom mininet.node import Controller, RemoteController, OVSKernelSwitch, UserSwitch\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel\nfrom mininet.link import Link, TCLink\n \ndef topology():\n net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )\n\n # Add hosts and switches\n h1 = net.addHost('h1', ip=\"10.10.10.1/24\", mac=\"00:00:00:00:00:01\" )\n h2 = net.addHost('h2', ip=\"20.20.20.1/24\", mac=\"00:00:00:00:00:02\" )\n r1 = net.addHost('r1', mac=\"00:00:00:00:01:00\" )\n s1 = net.addSwitch('s1')\n c0 = net.addController( 'c0', controller=RemoteController, ip='127.0.0.1', port=6633 )\n\n net.addLink(h1, s1)\n net.addLink(s1, r1)\n net.addLink(s1, r1)\n net.addLink(r1, h2)\n net.build()\n c0.start()\n s1.start([c0])\n r1.cmd(\"ifconfig r1-eth0 0\")\n r1.cmd(\"ifconfig r1-eth1 0\")\n r1.cmd(\"ifconfig r1-eth0 hw ether 00:00:00:00:01:01\")\n r1.cmd(\"ifconfig r1-eth1 hw ether 00:00:00:00:01:02\")\n #assign ip addr\n r1.cmd(\"ip addr add 10.10.10.2/24 brd + dev r1-eth0\")\n r1.cmd(\"ip addr add 10.10.10.3/24 brd + dev r1-eth1\")\n r1.cmd(\"ip addr add 20.20.20.2/24 brd + dev r1-eth2\")\n # r1.cmd(\"ip route add default via r1-eth1\")\n # r1.cmd(\"route add -net 10.10.10.0 netmask 255.255.255.0 r1-eth1\")\n # r1.cmd(\"route add -net 20.20.20.0 netmask 255.255.255.0 r1-eth2\")\n r1.cmd(\"echo 1 > /proc/sys/net/ipv4/ip_forward\")\n # s1.setIP(\"10.10.10.2/24\", intf = 's1-eth1')\n # s1.setIP(\"30.30.30.1/24\", intf = 's1-eth2')\n # s1.setIP(\"40.40.40.1/24\", intf = 's1-eth3')\n h1.cmd(\"ip route add default via 10.10.10.2\")\n h2.cmd(\"ip route add default via 20.20.20.2\")\n\n print (\"*** Running CLI\")\n CLI( net )\n\n print (\"*** Stopping network\")\n net.stop()\n \nif __name__ == '__main__':\n setLogLevel( 'info' )\n topology() ","sub_path":"MININET TOPO.py","file_name":"MININET TOPO.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"19658610","text":"\"\"\"\nThis module is used for obtaining execution paramteters.\n\n\"\"\"\n\nimport re\n\n\ndef get_execution_parameters(options, args):\n \"\"\"\n Obtains execution paramteters.\n \"\"\"\n randomSeed_re = re.compile(r'[R|r]andom[S|s]eed=[0-9]+')\n inputFile_re = re.compile(r'[I|i]nput[F|f]ile=.*\\.in')\n\n if options.debug:\n print( 'option debug is activated')\n else:\n print( 'option debug is deactivated') \n if options.randomized:\n print ('option randomized is activated')\n else:\n print( 'option randomized is deactivated') \n if options.debug:\n print(\"Command-line parameters are:\", end=' ')\n for arg in args:\n print( arg, end = ' ')\n print() \n if len(args) > 2:\n raise ValueError(\"Too many command line arguments. Format: 'inputFile=XXX.in radnomSeed=999'.\")\n if len(args) <= 1:\n raise ValueError(\"Too few command line arguments. Format: 'inputFile=XXX.in radnomSeed=999'.\") \n \n parameters = {'InputFile': 'XXX.in', 'RandomSeed': 111}\n if not randomSeed_re.match(args[0]) :\n if not randomSeed_re.match(args[1]):\n raise ValueError(\"There must be an argument: 'randomSeed=DDD', where 'DDD' is postitive integer.\")\n else:\n parameters['RandomSeed'] = args[1].split('=')[1]\n else:\n parameters['RandomSeed'] = args[0].split('=')[1]\n if not inputFile_re.match(args[0]):\n if not inputFile_re.match(args[1]):\n raise ValueError(\"There must be an argument: 'inputFile=XXX.in' where 'XXX' is file name.\")\n else:\n parameters['InputFile'] = args[1].split('=')[1]\n else:\n parameters['InputFile'] = args[0].split('=')[1]\n return parameters\n ","sub_path":"command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652304960","text":"# acmp.ru/index.asp?main=task&id_task=41\n\nimport sys\nN, = [int(s) for s in sys.stdin.readline().strip().split()]\nlst = [int(s) for s in sys.stdin.readline().strip().split()]\n\n# lst_i = [num for num in range(-100, 101)]\nlst_count = [0]*201\n\ni = 0\nwhile i < len(lst):\n \n lst_count[lst[i] + 100] += 1\n i += 1\n\n# print (lst_count)\n\n\nlst_sort = []\ni = 0\nwhile i < len(lst_count):\n if lst_count[i] != 0:\n p = 1\n while p <= lst_count[i]: \n lst_sort.append(i - 100)\n# lst_sort.append(lst_i[i])\n p += 1\n i += 1\n\n\n\np = 0\nwhile p < len(lst_sort):\n lst_sort[p] = str(lst_sort[p])\n p += 1\nprint (' '.join(lst_sort))\n","sub_path":"0041-sort_counting.py","file_name":"0041-sort_counting.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350488593","text":"# p2wsh input (2-of-2 multisig)\n# p2wpkh output\n\nimport argparse\nimport hashlib\nimport ecdsa\n\ndef dSHA256(data):\n hash_1 = hashlib.sha256(data).digest()\n hash_2 = hashlib.sha256(hash_1).digest()\n return hash_2\n\ndef hash160(s):\n '''sha256 followed by ripemd160'''\n return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()\n\ndef privkey_to_pubkey(privkey):\n signing_key = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve\n verifying_key = signing_key.get_verifying_key()\n\n # Use this code block if the address you gave corresponds to the compressed public key\n x_cor = bytes.fromhex(verifying_key.to_string().hex())[:32] # The first 32 bytes are the x coordinate\n y_cor = bytes.fromhex(verifying_key.to_string().hex())[32:] # The last 32 bytes are the y coordinate\n if int.from_bytes(y_cor, byteorder=\"big\", signed=True) % 2 == 0: # We need to turn the y_cor into a number.\n public_key = bytes.fromhex(\"02\" + x_cor.hex())\n else:\n public_key = bytes.fromhex(\"03\" + x_cor.hex())\n return public_key\n\n################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--cust_privkey\", \"-csk\", help=\"private key of customer for escrow\", required=True)\nparser.add_argument(\"--merch_privkey\", \"-msk\", help=\"private key of merchant for escrow\", required=True)\nparser.add_argument(\"--merch_close_pubkey\", \"-mcpk\", help=\"public key of merchant output\", required=True)\nparser.add_argument(\"--cust_close_pubkey\", \"-ccpk\", help=\"public key of cust close to-self output\", required=True)\nparser.add_argument(\"--revocation_lock\", \"-rl\", help=\"revocation lock (hash160{revocation_secret})\", required=True)\nparser.add_argument(\"--merch_disp_pubkey\", \"-mdpk\", help=\"public key of merchant dispute\", required=True)\nparser.add_argument(\"--to_self_delay\", \"-tsd\", help=\"to_self_delay (in unit of blocks) for the merchant's to-self output\", required=True)\nparser.add_argument(\"--txid\", \"-tx\", help=\"txid of outpoint as hex string\", required=True)\nparser.add_argument(\"--index\", \"-ind\", help=\"index of outpoint (default=0)\", default=0, required=False)\nparser.add_argument(\"--amount_btc\", \"-a\", help=\"amount of btc in\", required=True)\nparser.add_argument(\"--script_output_btc\", \"-cso\", help=\"btc to cust close script output\", required=True)\nparser.add_argument(\"--merch_output_btc\", \"-mo\", help=\"btc to merchant close output\", required=True)\nparser.add_argument(\"--verbose\", \"-v\", help=\"increase output verbosity\", action=\"store_true\")\nargs = parser.parse_args()\n\n################################\nverbose = args.verbose\nif verbose:\n print(\"<============Tx Details============>\")\n\n# version is 4-bytes little endian. Version 2 should be default\nversion = bytes.fromhex(\"0200 0000\")\nmarker = bytes.fromhex(\"00\") # this must be 00\nflag = bytes.fromhex(\"01\") # this must be 01\n\n# txID_str = \"f4df16149735c2963832ccaa9627f4008a06291e8b932c2fc76b3a5d62d462e1\"\n# tx_index = 0 # index starts at 0\ntxID_str = args.txid\ntxid = (bytes.fromhex(txID_str))[::-1]\ntx_index = int(args.index)\nindex = tx_index.to_bytes(4, byteorder=\"little\", signed=False)\n\ntxid = (bytes.fromhex(txID_str))[::-1]\nindex = tx_index.to_bytes(4, byteorder=\"little\", signed=False)\n\nnSequence_str = \"ffffffff\"\nsequence = bytes.fromhex(nSequence_str)\n\ninput_amount_sat = int(float(args.amount_btc) * 100000000)\nmerch_output_value_sat = int(float(args.merch_output_btc) * 100000000)\nscript_output_value_sat = int(float(args.script_output_btc) * 100000000)\n\ninput_amount = input_amount_sat.to_bytes(8, byteorder=\"little\", signed=True)\nmerch_output_value = merch_output_value_sat.to_bytes(8, byteorder=\"little\", signed=True)\nscript_output_value = script_output_value_sat.to_bytes(8, byteorder=\"little\", signed=True)\nop_return_output_value = (0).to_bytes(8, byteorder=\"little\", signed=True)\n\n# keys for the funding tx 2-of-2 multisig\nmerch_privkey_hex = args.merch_privkey\nmerch_privkey = bytes.fromhex(merch_privkey_hex)\nmerch_pubkey = privkey_to_pubkey(merch_privkey)\n\ncust_privkey_hex = args.cust_privkey\ncust_privkey = bytes.fromhex(cust_privkey_hex)\ncust_pubkey = privkey_to_pubkey(cust_privkey)\n\ncust_close_pubkey = bytes.fromhex(args.cust_close_pubkey)\n\nmerch_disp_pubkey = bytes.fromhex(args.merch_disp_pubkey)\n\nmerch_close_pubkey = bytes.fromhex(args.merch_close_pubkey)\n\nrevocation_lock = bytes.fromhex(args.revocation_lock)\n\nescrow_script = (\n bytes.fromhex(\"5221\")\n + merch_pubkey\n + bytes.fromhex(\"21\")\n + cust_pubkey\n + bytes.fromhex(\"52ae\")\n)\n\n# P2WSH cust-close scriptPubKey\n# 0x63 OP_IF\n# 0xa8 OP_SHA256\n# 0x20 OP_DATA - len(revocation_lock {sha256[revocation-secret]})\n# revocation_lock\n# 0x88 OP_EQUALVERIFY\n# 0x21 OP_DATA - len(merch_disp_pubkey)\n# merch_disp_pubkey\n# 0x67 OP_ELSE\n# 0x__ OP_DATA - len(to_self_delay) (probably ~0x02)\n# to_self_delay\n# 0xb2 OP_CHECKSEQUENCEVERIFY\n# 0x75 OP_DROP\n# 0x21 OP_DATA - len(cust_close_pubkey)\n# cust_close_pk\n# 0x68 OP_ENDIF\n# 0xac OP_CHECKSIG\n\n\nnSequence_as_blocks = int(args.to_self_delay, 16)\n# todo: find a nicer way to do this\nl = int(len(args.to_self_delay)/2)\nshort_sequence = nSequence_as_blocks.to_bytes(l, byteorder=\"little\", signed=False)\n\n\ncust_close_script = (\n bytes.fromhex(\"63 a8 20\")\n + revocation_lock\n + bytes.fromhex(\"88 21\")\n + merch_disp_pubkey\n + bytes.fromhex(\"67\")\n + (len(short_sequence)).to_bytes(1, byteorder=\"little\", signed=False)\n + short_sequence\n + bytes.fromhex(\"b2 75 21\")\n + cust_close_pubkey\n + bytes.fromhex(\"68 ac\")\n)\n\nscript_sha32 = hashlib.sha256(cust_close_script).digest()\noutput_scriptPK = bytes.fromhex(\"0020\") + script_sha32\n\nto_merch_scriptPK = bytes.fromhex(\"0014\") + hash160(merch_close_pubkey)\n\nop_return_scriptPK = (\n # 0x6a OP_RETURN\n bytes.fromhex(\"6a\")\n # OP_DATA - needs to cover the length of the RL and cust_close_pk (32 bytes)\n + (len(revocation_lock) + 33).to_bytes(1, byteorder=\"little\", signed=False)\n + revocation_lock\n + cust_close_pubkey\n)\nif verbose:\n print(\"1 - to_customer: \", output_scriptPK.hex())\n print(\"2 - to_merchant: \", to_merch_scriptPK.hex())\n print(\"3 - OP_RETURN script_pubkey: \", op_return_scriptPK.hex())\n\nlocktime = bytes.fromhex(\"00000000\")\n\nsighash = bytes.fromhex(\"01000000\")\nsighash_type_flag = bytes.fromhex(\"01\")\n\ntx_in_count = bytes.fromhex(\"01\")\ntx_out_count = bytes.fromhex(\"03\")\n\n##########################################\n\n# hashPrevOuts and outpoint\noutpoint = (\n txid\n + index\n)\n\nhashPrevOuts = dSHA256(outpoint)\n\n# hashSequence\nhashSequence = dSHA256(sequence)\n\n# hashOutputs and output\noutputs = (\n script_output_value\n + (len(output_scriptPK)).to_bytes(1, byteorder=\"little\", signed=False)\n + output_scriptPK\n\n + merch_output_value\n + (len(to_merch_scriptPK)).to_bytes(1, byteorder=\"little\", signed=False)\n + to_merch_scriptPK\n\n + op_return_output_value\n + (len(op_return_scriptPK)).to_bytes(1, byteorder=\"little\", signed=False)\n + op_return_scriptPK\n)\nif verbose:\n print(\"hashOutputs preimage: \", outputs.hex())\n\nhashOutputs = dSHA256(outputs)\nif verbose:\n print(\"hashOutputs: \", hashOutputs.hex())\n\nscriptcode = (\n (len(escrow_script)).to_bytes(1, byteorder=\"little\", signed=False)\n + escrow_script\n)\n\n# serialized bip_143 object\nbip_143 = (\n version\n + hashPrevOuts\n + hashSequence\n + outpoint\n + scriptcode\n + input_amount\n + sequence\n + hashOutputs\n + locktime\n + sighash\n)\nif verbose:\n print(\"Tx Preimage: \", bip_143.hex())\n print(\"<============Tx Details============>\")\n\nhashed_bip_143 = dSHA256(bip_143)\n\nsigning_key_merch = ecdsa.SigningKey.from_string(merch_privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve\nsignature_merch = signing_key_merch.sign_digest(hashed_bip_143, sigencode=ecdsa.util.sigencode_der_canonize)\n\nsigning_key_cust = ecdsa.SigningKey.from_string(cust_privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve\nsignature_cust = signing_key_cust.sign_digest(hashed_bip_143, sigencode=ecdsa.util.sigencode_der_canonize)\n\nwitness = (\n # indicate the number of stack items for the txin\n bytes.fromhex(\"04\")\n\n # OP_CHECKMULTISIG bug\n + bytes.fromhex(\"00\")\n\n # signature 1\n + (len(signature_merch)+1).to_bytes(1, byteorder=\"little\", signed=False)\n + signature_merch\n + sighash_type_flag\n\n # signature 2\n + (len(signature_cust)+1).to_bytes(1, byteorder=\"little\", signed=False)\n + signature_cust\n + sighash_type_flag\n\n # witnessScript\n # This is the script that the creator of this transaction needs to privide, and\n # solve, in order to redeem the UTXO listed in the input\n + (len(escrow_script)).to_bytes(1, byteorder=\"little\", signed=False)\n + escrow_script\n)\n\nscriptSig = (\n bytes.fromhex(\"00\") # length of empty scriptSig\n)\n\nfinal_tx = (\n version\n + marker\n + flag\n + tx_in_count\n + outpoint\n + scriptSig\n + sequence\n + tx_out_count\n + outputs\n + witness\n + locktime\n)\n\nprint(final_tx.hex())\n","sub_path":"tx/tx_builder/bitcoin/cust_close_from_escrow.py","file_name":"cust_close_from_escrow.py","file_ext":"py","file_size_in_byte":9074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"333274438","text":"import os\nfrom types import SimpleNamespace\n\n# Global Path Constants\nmigration_dir = os.path.join(os.path.abspath(os.sep), \"migration\")\nmongo_migration_dir = os.path.join(migration_dir, \"mongo-dump\")\nmigration_arg = \"dir\"\nprogram_file_dir = os.environ.get(\"ProgramW6432\")\nprogram_data_dir = os.environ.get(\"ProgramData\")\n\n# Variables for calling EXEs\nslconf_cmd = os.path.join(program_file_dir, \"National Instruments\", \"Shared\", \"Skyline\", \"NISystemLinkServerConfigCmd.exe\")\nslconf_cmd_stop_all = slconf_cmd + \" stop-all-services wait \"\nslconf_cmd_start_all = slconf_cmd + \" start-all-services wait \"\nslconf_cmd_stop_service = slconf_cmd + \" stop-service \"\nslconf_cmd_start_service = slconf_cmd + \" start-service \"\nmongo_dump = os.path.join(program_file_dir, \"National Instruments\", \"Shared\", \"Skyline\", \"NoSqlDatabase\", \"bin\", \"mongodump.exe\")\nmongo_restore = os.path.join(program_file_dir, \"National Instruments\", \"Shared\", \"Skyline\", \"NoSqlDatabase\", \"bin\", \"mongorestore.exe\")\nmongod_exe = os.path.join(program_file_dir, \"National Instruments\", \"Shared\", \"Skyline\", \"NoSqlDatabase\", \"bin\", \"mongod.exe\")\nmongo_config = os.path.join(program_data_dir, \"National Instruments\", \"Skyline\", \"NoSqlDatabase\", \"mongodb.conf\")\n\nservice_config_dir = config_file = os.path.join(program_data_dir, \"National Instruments\", \"Skyline\", \"Config\")\n\n# Global constants for argparse\nsubparser_storage_attr = 'action'\n\n# Service Dictionaries\ntag_dict = {\n 'arg': 'tag',\n 'name': 'TagHistorian',\n 'directory_migration': False,\n 'singlefile_migration': True,\n 'singlefile_migration_dir': os.path.join(migration_dir, \"keyvaluedb\"),\n 'singlefile_source_dir': os.path.join(program_data_dir, \"National Instruments\", \"Skyline\", \"KeyValueDatabase\"),\n 'singlefile_to_migrate': 'dump.rdb'\n}\ntag = SimpleNamespace(**tag_dict)\n\nopc_dict = {\n 'arg': 'opc',\n 'name': \"OpcClient\",\n 'directory_migration': True,\n 'singlefile_migration': False,\n 'migration_dir': os.path.join(migration_dir, \"OpcClient\"),\n 'source_dir': os.path.join(program_data_dir, \"National Instruments\", \"Skyline\", \"Data\", \"OpcClient\")\n}\nopc = SimpleNamespace(**opc_dict)\n\nfis_dict = {\n 'arg': 'fis',\n 'name': \"FileIngestion\",\n 'directory_migration': True,\n 'singlefile_migration': False,\n 'migration_dir': os.path.join(migration_dir, \"FileIngestion\"),\n 'source_dir': os.path.join(program_data_dir, \"National Instruments\", \"Skyline\", \"Data\", \"FileIngestion\")\n}\nfis = SimpleNamespace(**fis_dict)\n\ntestmonitor_dict = {\n 'arg': 'testmonitor',\n 'name': \"TestMonitor\",\n 'directory_migration': False,\n 'singlefile_migration': False,\n}\ntestmonitor = SimpleNamespace(**testmonitor_dict)\n\nalarmrule_dict = {\n 'arg': 'alarmrule',\n 'name': \"TagRuleEngine\",\n 'directory_migration': False,\n 'singlefile_migration': False,\n}\nalarmrule = SimpleNamespace(**alarmrule_dict)\n\nasset_dict = {\n 'arg': 'asset',\n 'name': \"AssetPerformanceManagement\",\n 'directory_migration': False,\n 'singlefile_migration': False,\n}\nasset = SimpleNamespace(**asset_dict)\n\nrepository_dict = {\n 'arg': 'repository',\n 'name': \"Repository\",\n 'directory_migration': True,\n 'singlefile_migration': False,\n 'migration_dir': os.path.join(migration_dir, \"Respository\"),\n 'source_dir': os.path.join(program_file_dir, \"National Instruments\", \"Shared\", \"Web Services\", \"NI\", \"repo_webservice\", \"files\")\n}\nrepository = SimpleNamespace(**repository_dict)\n\nuserdata_dict = {\n 'arg': 'userdata',\n 'name': \"UserData\",\n 'directory_migration': False,\n 'singlefile_migration': False,\n}\nuserdata = SimpleNamespace(**userdata_dict)\n\nnotification_dict = {\n 'arg': 'notification',\n 'name': \"Notification\",\n 'directory_migration': False,\n 'singlefile_migration': False,\n}\nnotification = SimpleNamespace(**notification_dict)\n\nstates_dict = {\n 'arg': 'states',\n 'name': \"SystemsStateManager\",\n 'directory_migration': True,\n 'singlefile_migration': False,\n 'migration_dir': os.path.join(migration_dir, \"SystemsStateManager\"),\n 'source_dir': os.path.join(program_data_dir, \"National Instruments\", \"Skyline\", \"Data\", \"SystemsStateManager\")\n}\nstates = SimpleNamespace(**states_dict)\n\nno_sql_dict = {\n 'name': 'NoSqlDatabase'\n\n}\nno_sql = SimpleNamespace(**no_sql_dict)\n\nthdbbug_dict = {\n 'arg': 'thdbbug',\n 'name': 'TagHistorian',\n 'directory_migration': False,\n 'singlefile_migration': False,\n 'intradb_migration': True,\n 'collections_to_migrate': ['metadata', 'values'],\n 'source_db': 'admin',\n 'destination_db': 'nitaghistorian'\n}\nthdbbug = SimpleNamespace(**thdbbug_dict)\n\n# Argument constants\ncapture_arg = 'capture'\nrestore_arg = 'restore'\nsource_db_arg = 'source_db'\nsource_db = 'admin'\n","sub_path":"slmigrate/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565052533","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"users\"\nurlpatterns = [\n path(\"login/\", views.user_login, name=\"login\"),\n path(\"logout/\", views.user_logout, name=\"logout\"),\n path(\"reports/\", views.reports, name=\"reports\"),\n]\n","sub_path":"samanabrah/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"441817910","text":"import datetime\nfrom django.shortcuts import render, get_object_or_404, HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom .forms import LeaveRequestForm\nfrom .models import LeaveRequest, LeaveType, LeavesAllottedTotal, ApprovedLeaves\nfrom Employee.models import Employee,LeaveApprover\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Q\nfrom django.contrib.auth.decorators import login_required\nfrom django.views import View\nfrom calendar import monthrange\n\n# import generic FormView\nfrom django.views.generic import (\n FormView,\n CreateView,\n UpdateView,\n ListView,\n DetailView,\n)\n\nclass LeaveRequestCreateView(CreateView):\n ONE = 1\n TWO =2\n THREE =3\n FIVE = 5\n # specify the Form you want to use\n form_class = LeaveRequestForm\n # sepcify name of template\n template_name = \"leave/leave_create.html\"\n def get_success_url(self):\n return reverse('leave:leave-detail',args=(self.object.id,))\n\n def get_context_data(self,**kwargs):\n context = super(LeaveRequestCreateView,self).get_context_data(**kwargs)\n user = self.request.user\n employee = Employee.objects.get(user=user)\n context['employee']=employee\n return context\n\n def form_valid(self, form):\n # This method is called when valid form data has been POSTed.\n # It should return an HttpResponse.\n login_user = self.request.user\n employee = self.get_employee()\n form.instance.employee = employee\n form.instance.created_by = login_user\n emp_dept = employee.department\n approvers = LeaveApprover.objects.filter(department=emp_dept)\n loginuser_approve_pos = 0\n approver1 = None\n approver2 =None\n approver3 = None \n #GOT ALL APPROVERS, NEED TO USE THIS VALUE\n approvers_dict = {}\n for a in approvers:\n approvers_dict[a.position]=a\n if a.position==self.ONE:\n approver1=a.approver\n if a.position==self.TWO:\n approver2=a.approver\n if a.position==self.THREE:\n approver3=a.approver\n if login_user==a.approver.user:\n loginuser_approve_pos = a.position # Login user is a leave approver and position of approval is a.position\n\n #print(a)\n leaves_exists_in = self.leave_entry_exists(form)\n if leaves_exists_in:\n form.add_error(None,\"Leaves applied are overlapping with previously applied leave entries. Kindly check !\")\n return super().form_invalid(form)\n\n leave_days_td = form.instance.leavetill-form.instance.leavefrom\n leave_days = leave_days_td.days+1\n print(leave_days)\n #return\n if leave_days == self.ONE: # Only 1 line manager approval is neede for 1 day leave\n form.instance.second_approver_status = LeaveRequest.NA \n form.instance.third_approver_status = LeaveRequest.NA \n elif leave_days < self.FIVE: # Only 2 line manager approval is neede for 1 day leave\n form.instance.third_approver_status = LeaveRequest.NA #\n #Leave request created by ownself and is a first level approver\n if employee.user == login_user:\n if employee == approver1:\n form.instance.first_approver_status = LeaveRequest.NA\n form.instance.second_approver_status = LeaveRequest.PENDING\n form.instance.first_approver_remark = form.instance.comments\n elif employee == approver2:\n form.instance.first_approver_status = LeaveRequest.NA\n form.instance.second_approver_status = LeaveRequest.NA\n form.instance.third_approver_status = LeaveRequest.PENDING\n form.instance.second_approver_remark = form.instance.comments \n elif employee == approver3:\n form.instance.first_approver_status = LeaveRequest.NA\n form.instance.second_approver_status = LeaveRequest.NA\n form.instance.third_approver_status = LeaveRequest.APPROVED\n form.instance.third_approver_remark = form.instance.comments \n form.instance.leave_state=LeaveRequest.APPROVED\n else:\n if leave_days==self.ONE:\n form.instance.first_approver_status = LeaveRequest.PENDING\n form.instance.first_approver_remark = form.instance.comments\n elif leave_days < self.FIVE:\n form.instance.first_approver_status = LeaveRequest.PENDING\n form.instance.second_approver_status = LeaveRequest.PENDING\n else:\n form.instance.first_approver_status = LeaveRequest.PENDING\n form.instance.second_approver_status = LeaveRequest.PENDING\n form.instance.third_approver_status = LeaveRequest.PENDING\n #If Line manager is applying leave for his/her staff\n elif login_user==approver1.user:\n form.instance.first_approver_status = LeaveRequest.APPROVED\n form.instance.first_approver_remark = form.instance.comments\n if leave_days == self.ONE:\n form.instance.leave_state=LeaveRequest.APPROVED\n elif login_user==approver2.user:\n form.instance.first_approver_status = LeaveRequest.NA\n form.instance.second_approver_status = LeaveRequest.APPROVED\n form.instance.second_approver_remark = form.instance.comments \n if leave_days < self.FIVE:\n form.instance.leave_state=LeaveRequest.APPROVED \n elif login_user==approver3.user:\n form.instance.second_approver_status = LeaveRequest.NA\n form.instance.third_approver_status = LeaveRequest.APPROVED\n form.instance.third_approver_remark = form.instance.comments \n form.instance.leave_state = LeaveRequest.APPROVED\n else:\n form.add_error(None,\"You are not an authorised person to apply leave. Please contact your Line Manager !\")\n return super().form_invalid(form)\n # perform a action here\n print(form.cleaned_data)\n is_leave_approved = False\n if form.instance.leave_state == LeaveRequest.APPROVED:\n is_leave_approved = True\n self.object = form.save(commit=False)\n #Save all\n is_sick = form.cleaned_data['is_sick']\n form = self.object.save()\n #ret = distribute_leaves(form, leave_days, is_sick)\n\n cl_leaves_consumed = None\n el_leaves_consumed = None\n if is_sick == 'YES':\n make_sickleave()\n else:\n if is_leave_approved:\n self.manage_leaves(employee,leave_days,self.object)\n cl_type = LeaveType.objects.getLeaveTypeOnCode('CL')\n el_type = LeaveType.objects.getLeaveTypeOnCode('EL')\n cl_leaves_consumed = ApprovedLeaves.objects.getTotalSpecificLeave(cl_type,employee)\n el_leaves_consumed = ApprovedLeaves.objects.getTotalSpecificLeave(el_type,employee)\n \n #return HttpResponseRedirect(self.get_success_url())\n\n context = {'id':self.object.pk ,'CL': cl_leaves_consumed, 'EL': el_leaves_consumed,}\n print(context)\n return HttpResponseRedirect(reverse('leave:leave-detail',\n args=(self.object.pk,)))\n #return render(self.request, 'leave/leave_detail.html', context=context)\n\n def manage_leaves(self,employee,leave_demanded,form):\n leave_left = leave_demanded\n today = datetime.date.today()\n this_month = today.month\n cl_apply = 0\n el_apply =0\n lop_apply = 0\n cl_type = LeaveType.objects.getLeaveTypeOnCode('CL')\n el_type = LeaveType.objects.getLeaveTypeOnCode('EL')\n lop_type = LeaveType.objects.getLeaveTypeOnCode('LOP')\n #\n cl_yearly_allotted = LeavesAllottedTotal.objects.total(employee,cl_type)#.filter(leave_type=cl_type).first()\n cl_leaves_consumed = ApprovedLeaves.objects.getTotalSpecificLeave(cl_type,employee)\n el_yearly_allotted = LeavesAllottedTotal.objects.total(employee,el_type)\n el_leaves_consumed = ApprovedLeaves.objects.getTotalSpecificLeave(el_type,employee)\n #\n cl_inhand = cl_yearly_allotted.total_leaves - cl_leaves_consumed\n el_inhand = el_yearly_allotted.total_leaves - el_leaves_consumed\n #\n cl_can = this_month-cl_leaves_consumed\n\n if cl_can > 0:\n if cl_can <= leave_left:\n cl_apply = cl_can\n leave_left = leave_left - cl_can\n else:\n cl_apply = leave_left\n leave_left = 0\n\n if el_inhand > 0: # EL leaves are available\n if el_inhand >= leave_left:\n el_apply = leave_left\n leave_left = 0\n else:\n el_apply = el_inhand\n leave_left = leave_left - el_inhand\n if cl_inhand <= leave_left:\n cl_apply = cl_apply + cl_inhand\n leave_left = leave_left - cl_inhand\n else:\n cl_apply = cl_apply + leave_left\n leave_left = 0\n\n if leave_left > 0:\n lop_apply = leave_left \n \n \n print('CL:' + str(cl_apply))\n print('EL:' + str(el_apply))\n print('LOP:' + str(leave_left))\n if cl_apply > 0:\n ApprovedLeaves.objects.create_me(employee,cl_type,cl_apply,form)\n if el_apply > 0:\n ApprovedLeaves.objects.create_me(employee,el_type,el_apply,form)\n if lop_apply > 0:\n ApprovedLeaves.objects.create_me(employee,lop_type,lop_apply,form)\n return\n\n def make_sickleave():\n pass\n\n def get_employee(self):\n id_ = self.kwargs.get(\"id\")\n return get_object_or_404(Employee, id=id_)\n \n def leave_entry_exists(self, form):\n login_user = self.request.user\n employee = self.get_employee()\n leavefrom = form.instance.leavefrom\n leavetill = form.instance.leavetill\n if leavefrom and leavetill and employee:\n query = Q(employee=employee)\n query.add(Q(leavetill__gte=leavefrom), Q.AND)\n query.add(Q(leavefrom__lte=leavetill), Q.AND)\n lreq = LeaveRequest.objects.filter(query)\n # print('leavefrom:'+leavefrom+': leavetill:'+leavetill+' : empid:'+employee)\n print(lreq.exists())\n data =False\n if lreq.exists():\n return True\n #entries exists\n return False\n\n\nclass LeaveDetailView(DetailView):\n template_name = 'leave/leave_detail.html'\n #queryset = Article.objects.all()\n def get_context_data(self, **kwargs):\n context = super(LeaveDetailView, self).get_context_data(**kwargs)\n print('Inside Detailview-get_context_data')\n this_obj = context['object']\n leavetypeset = {}\n if this_obj:\n employee = this_obj.employee\n if employee:\n leavetypes = LeaveType.objects.filter(can_apply=LeaveType.YES)\n leavetypeset = {}\n if leavetypes.exists():\n for lt in leavetypes:\n ap = ApprovedLeaves.objects.getTotalSpecificLeave(lt,employee)\n leavetypeset[lt.code]= ap\n context['leavebalance'] = leavetypeset\n print(context)\n return context\n\n def get_object(self):\n id_ = self.kwargs.get(\"id\")\n print('Inside Detailview-get_object')\n #print(id_)\n return get_object_or_404(LeaveRequest, id=id_)\n\n\nclass LeaveRequestListView(ListView):\n ONE = 1\n TWO = 2\n THREE = 3\n models = LeaveRequest\n queryset = LeaveRequest.objects.none()\n\n def get_request_set(self):\n dept = ''\n if self.request:\n user = self.request.user\n employee = Employee.objects.get(user=user)\n print('inside get_request_set')\n try:\n approvers = LeaveApprover.objects.filter(approver=employee)\n reqs = set()\n for apr in approvers:\n level = apr.position\n val = LeaveRequest.objects.get_by_department(apr.department).filter(leave_state=LeaveRequest.PENDING)\n if val.exists():\n for v in val:\n reqs.add(v)\n sorted_list = sorted(reqs,key=lambda x: x.leavefrom)\n return sorted_list\n except LeaveApprover.DoesNotExist:\n return None\n \n def get_context_data(self,**kwargs):\n context = super(LeaveRequestListView,self).get_context_data(**kwargs)\n user = self.request.user\n employee = Employee.objects.get(user=user)\n context['employee']=employee\n context['requests']=self.get_request_set()\n return context\n\n template_name = 'leave/leave_request_list.html'\n paginate_by = 10\n ordering = ['-leavefrom']\n\nclass MonthwiseView(View):\n #form_class = MyForm\n initial = {'key': 'value'}\n template_name = 'leave/leave_monthly.html'\n\n def get(self, request, *args, **kwargs):\n mnt = int(datetime.date.today().strftime('%m'))\n monthname = datetime.date.today().strftime('%B')\n yr = int(datetime.date.today().strftime('%Y'))\n num_days = monthrange(yr, mnt)[1]\n\n departments = get_user_dept_staff(request)\n \n emps = Employee.objects.filter(department__in=departments)\n # query.add(Q(leavetill__gte=leavefrom), Q.AND)\n # query.add(Q(leavefrom__lte=leavetill), Q.AND)\n m_firstday = datetime.date.today().replace(day=1)\n m_lastday = datetime.date.today().replace(day=num_days)\n leavereqests = LeaveRequest.objects.filter(\n employee__in=emps,leavetill__gte=m_firstday,leavefrom__lte=m_lastday\n )\n #print(range(1,int(num_days)))\n table = draw_table(range(1,int(num_days)+1),emps,leavereqests,mnt,num_days)\n\n context= { 'days':range(1,int(num_days)+1), 'monthname':monthname, 'table':table}\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n context= { 'days':range(1,32) }\n # if form.is_valid():\n # # \n # return HttpResponseRedirect('/success/')\n\n return render(request, self.template_name, context)\n\ndef draw_table(days,employees,leavereqests,mnt,num_days):\n table = '
'\n table += ''\n table += ''\n table += ''\n for d in days:\n table += ''\n table += '' \n table += ''\n table += ''\n for emp in employees:\n lr = leavereqests.filter(employee=emp)\n start = {}\n end = {}\n if lr.exists():\n for lvr in lr:\n st = int(lvr.leavefrom.strftime('%d'))\n start[st]= st\n if int(lvr.leavefrom.strftime('%m')) < mnt:\n start[1] = 1\n st = 1\n ed = int(lvr.leavetill.strftime('%d'))\n end[st] = ed \n if int(lvr.leavetill.strftime('%m')) > mnt:\n end[st] = num_days\n #print(start)\n # lfrom = lr[0].leavefrom\n # ltill = lr[0].leavetill\n # start = int(lfrom.strftime('%d'))\n # end = int(ltill.strftime('%d'))\n # if int(lfrom.strftime('%m')) < mnt:\n # start = 1\n # if int(ltill.strftime('%m')) > mnt:\n # end = num_days\n table += \"\" \n table += ''\n itr = False\n end_date = -1\n loop = 0\n for d in days:\n if start.get(d) and end.get(d):\n if d >= start.get(d) and d <= end.get(d):\n itr = True\n if end_date == -1:\n end_date = end.get(d) \n print(end_date) \n if end_date >= d and end_date != -1:\n table += ''\n if end_date == d:\n end_date = -1\n else:\n table += ''\n end_date = -1\n print(end_date) \n table += '' \n table += ''\n table += '
Name' + str(d) + '
' + str(emp.id) + '--' + emp.get_full_name + '
'\n\n return table\n\ndef get_user_dept_staff(request):\n user = request.user\n employee = Employee.objects.get(user=user)\n approvers = LeaveApprover.objects.get_departments(employee)\n list = []\n for a in approvers:\n list.append(a.department.id)\n return list\n\n@login_required(login_url='/login/')\ndef leave_monthly(request,**args):\n context= { 'days':range(1,32) }\n return render(request, 'leave/leave_monthly.html',context=context)\n\n@login_required(login_url='/login/')\ndef reject_leave(request,**args):\n ONE = 1\n TWO = 2\n THREE = 3\n FIVE = 5\n\n user = request.user\n user_emp = Employee.objects.get(user=user)\n ret_obj = None\n print(user_emp.id)\n id = args.get(\"id\")\n lr = LeaveRequest.objects.get(id=id)\n employee = lr.employee\n department = employee.department\n approver = LeaveApprover.objects.get(department=department,approver=user_emp)\n if approver.approver==user_emp:\n if approver.position == ONE:\n lr.first_approver_status = LeaveRequest.REJECTED\n if lr.get_leave_duration == ONE:\n lr.second_approver_status = LeaveRequest.NA \n lr.third_approver_status = LeaveRequest.NA \n lr.leave_state = lr.REJECTED\n ret_obj = lr.save()\n elif approver.position == TWO:\n lr.second_approver_status = LeaveRequest.REJECTED\n if lr.get_leave_duration < FIVE and lr.get_leave_duration > ONE:\n lr.third_approver_status = LeaveRequest.NA \n if lr.APPROVED != lr.first_approver_status:\n lr.first_approver_status = lr.NA\n lr.leave_state = lr.REJECTED\n ret_obj = lr.save() \n elif approver.position == THREE:\n lr.third_approver_status = LeaveRequest.REJECTED\n lr.leave_state = lr.REJECTED\n if lr.APPROVED != lr.first_approver_status:\n lr.first_approver_status = lr.NA\n if lr.APPROVED != lr.second_approver_status:\n lr.first_approver_status = lr.NA\n ret_obj = lr.save()\n return HttpResponseRedirect('/leave/leaverequests') \n\n@login_required(login_url='/login/')\ndef approve_leave_request(request,**args):\n ONE = 1\n TWO = 2\n THREE = 3\n FIVE = 5\n user = request.user\n user_emp = Employee.objects.get(user=user)\n ret_obj = None\n print(user_emp.id)\n id = args.get(\"id\")\n lr = LeaveRequest.objects.get(id=id)\n employee = lr.employee\n department = employee.department\n approver = LeaveApprover.objects.get(department=department,approver=user_emp)\n if approver.approver==user_emp:\n if approver.position == ONE:\n lr.first_approver_status = LeaveRequest.APPROVED\n if lr.get_leave_duration == ONE:\n lr.second_approver_status = LeaveRequest.NA \n lr.third_approver_status = LeaveRequest.NA \n lr.leave_state = lr.APPROVED\n distribute(employee,lr.get_leave_duration,lr)\n ret_obj = lr.save()\n elif approver.position == TWO:\n lr.second_approver_status = LeaveRequest.APPROVED\n if lr.get_leave_duration < FIVE and lr.get_leave_duration > ONE:\n lr.third_approver_status = LeaveRequest.NA \n lr.leave_state = lr.APPROVED\n distribute(employee,lr.get_leave_duration,lr)\n if lr.APPROVED != lr.first_approver_status:\n lr.first_approver_status = lr.NA\n ret_obj = lr.save()\n elif approver.position == THREE:\n lr.third_approver_status = LeaveRequest.APPROVED\n lr.leave_state = lr.APPROVED\n distribute(employee,lr.get_leave_duration,lr)\n if lr.APPROVED != lr.first_approver_status:\n lr.first_approver_status = lr.NA\n if lr.APPROVED != lr.second_approver_status:\n lr.first_approver_status = lr.NA\n ret_obj = lr.save()\n return HttpResponseRedirect('/leave/leaverequests')\n #pass \n\ndef distribute(employee,leave_demanded,form):\n print ('inside distribute')\n leave_left = leave_demanded\n today = datetime.date.today()\n this_month = today.month\n cl_apply = 0\n el_apply =0\n lop_apply = 0\n cl_type = LeaveType.objects.getLeaveTypeOnCode('CL')\n el_type = LeaveType.objects.getLeaveTypeOnCode('EL')\n lop_type = LeaveType.objects.getLeaveTypeOnCode('LOP')\n #\n cl_yearly_allotted = LeavesAllottedTotal.objects.total(employee,cl_type)#.filter(leave_type=cl_type).first()\n cl_leaves_consumed = ApprovedLeaves.objects.getTotalSpecificLeave(cl_type,employee)\n el_yearly_allotted = LeavesAllottedTotal.objects.total(employee,el_type)\n el_leaves_consumed = ApprovedLeaves.objects.getTotalSpecificLeave(el_type,employee)\n #\n cl_inhand = cl_yearly_allotted.total_leaves - cl_leaves_consumed\n el_inhand = el_yearly_allotted.total_leaves - el_leaves_consumed\n #\n cl_can = this_month-cl_leaves_consumed\n\n if cl_can > 0:\n if cl_can <= leave_left:\n cl_apply = cl_can\n leave_left = leave_left - cl_can\n else:\n cl_apply = leave_left\n leave_left = 0\n\n if el_inhand > 0: # EL leaves are available\n if el_inhand >= leave_left:\n el_apply = leave_left\n leave_left = 0\n else:\n el_apply = el_inhand\n leave_left = leave_left - el_inhand\n if cl_inhand <= leave_left:\n cl_apply = cl_apply + cl_inhand\n leave_left = leave_left - cl_inhand\n else:\n cl_apply = cl_apply + leave_left\n leave_left = 0\n\n if leave_left > 0:\n lop_apply = leave_left \n \n \n print('CL:' + str(cl_apply))\n print('EL:' + str(el_apply))\n print('LOP:' + str(leave_left))\n #return\n if cl_apply > 0:\n ApprovedLeaves.objects.create_me(employee,cl_type,cl_apply,form)\n print('updated')\n if el_apply > 0:\n ApprovedLeaves.objects.create_me(employee,el_type,el_apply,form)\n if lop_apply > 0:\n ApprovedLeaves.objects.create_me(employee,lop_type,lop_apply,form)\n return\n\n","sub_path":"Swasthya/leave/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507791738","text":"from django.shortcuts import render\nfrom django.db import transaction\nimport xlrd\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\nfrom bokeh.io import show, output_notebook\nfrom bokeh.plotting import figure\nfrom bokeh.embed import components\nfrom bokeh.models import HoverTool, NumeralTickFormatter, \\\n LinearAxis, Range1d,LabelSet, ColumnDataSource, FixedTicker, Label, Legend\nfrom bokeh.layouts import column\nfrom numbers import Number\nfrom math import pi\n\nfrom .models import DartOpsReport\nfrom .form import UploadOpsDataForm\n#from .modelform import UploadFileModelForm\n\n\ndef UploadExcel(request):\n\n # 这个函数用于检查excel表单元格中的值是否是特殊字符,以及调整空值的数据类型,因为xlrd从表格中读出的值是遵循原始格式的\n def symbol_check(theValue):\n if isinstance(theValue,str) and theValue.strip() in ['-','']: # \n theValue = 0\n elif isinstance(theValue,int) and theValue in ['']:\n theValue = 0\n return theValue\n\n # 初始化表格的点睛之笔:request.Post or None, request.FILES or None\n myform = UploadOpsDataForm(request.POST or None, request.FILES or None)\n if myform.is_valid():\n uploaded_excel = request.FILES.get('myfile') # myfile是form中定义的FileField的值,如果在template的template中即form中的name属性\n #print(uploaded_excel)\n\n excel_type = uploaded_excel.name.split('.')[-1].strip() # 读取一个excel文件的扩展名,xls还是xlsx等。。。\n if excel_type in ['xls']:\n excel_data = xlrd.open_workbook(filename=None, file_contents=uploaded_excel.read(), formatting_info=True) # xls文件\n elif excel_type in ['xlsx']:\n excel_data = xlrd.open_workbook(filename=None, file_contents=uploaded_excel.read())\n else:\n print(\"Are you sure it is an EXCEL file?\")\n excel_data = False\n\n sheet = excel_data.sheets()[0] # wb里的第一个sheet\n rows = sheet.nrows # 总行数\n cols = sheet.ncols # 总列数\n header_count = 1 # 前几行是表头,不用导入数据库\n\n # 开始尝试将表格导入数据库\n try:\n with transaction.atomic(): #控制数据库事务交易\n # 读出excel表中的每一行数据变成row_values列表\n for row in range(header_count,rows):\n print(\"This is row # {row} from the spreadsheet...\".format(row=row))\n row_values = sheet.row_values(row) # row_values是一个列表\n #print(row_values[1])\n\n # 如果想插入的数据是空值,或在表中已经存在,则跳过这行 \n target_data = row_values[9] # 关注这个数据:flight_id\n value_existed = DartOpsReport.objects.filter(flight_id = symbol_check(target_data))\n if str(target_data).strip() == '' or value_existed.count():\n print(\"This is not a valid record, or, it has existed in the database...\")\n continue\n\n # 确认插入的数据不会重复后,将该行excel数据插入数据表中,其中的列来自于并同名与models,必须贴合实际excel中列的含义,避免excel格式变更带来的问题\n DartOpsReport.objects.create(\n operator = symbol_check(row_values[1]),\n aircraft = symbol_check(row_values[2]),\n aircraft_type = symbol_check(row_values[4]),\n flight_type = symbol_check(row_values[5]),\n antenna_type = symbol_check(row_values[6]),\n xid = symbol_check(row_values[7]),\n bc_gen = symbol_check(row_values[8]),\n flight_id = symbol_check(row_values[9]), # flight_id是表中必须唯一的\n flight_num = symbol_check(row_values[10]),\n excluded = symbol_check(row_values[11]),\n exclusion_reason = symbol_check(row_values[12]),\n departure_airport = symbol_check(row_values[13]),\n arrival_airport = symbol_check(row_values[14]),\n departure_time = symbol_check(row_values[15]),\n arrival_time = symbol_check(row_values[16]),\n flight_time = symbol_check(row_values[17]),\n connected_sec = symbol_check(row_values[19]),\n connected_sec_expected = symbol_check(row_values[21]),\n avail_raw = symbol_check(row_values[23]),\n avail_calibrated = symbol_check(row_values[24]),\n latency = symbol_check(row_values[25]),\n latency_std = symbol_check(row_values[26]),\n packet_loss = symbol_check(row_values[27]),\n packet_loss_std = symbol_check(row_values[28]),\n beam_switch_count = symbol_check(row_values[29]),\n beam_switch_average_sec = symbol_check(row_values[30]),\n beam_switch_excluded_sec = symbol_check(row_values[32]),\n kbpu = symbol_check(row_values[37]),\n device_count = symbol_check(row_values[38])\n )\n return render(request, 'upload/upload_success.html') # 成功后返回成功页面\n\n except Exception as e:\n print(\"Something went wrong when importing excel data to the database...\")\n print(e)\n myform = UploadOpsDataForm()\n\n template_name = 'upload/upload.html'\n content = {'form':myform}\n return render(request,template_name,content)\n\n \n\ndef SlaReport(request):\n qs = OpsData.objects.all()\n template_name = 'sla/sla_report.html'\n content = {'sla',qs}\n return render(request,template_name,content)\n\n# 定义2个将十进制和百分数相互转换的函数\ndef decimal2percent(v, precision='0.2'): \n \"\"\"Convert number to percentage string.\"\"\"\n if isinstance(v, Number):\n return \"{{:{}%}}\".format(precision).format(v)\n else:\n raise TypeError(\"Numeric type required\")\n\ndef percent2decimal(x):\n return float(x.strip('%'))/100\n\ndef plot_basic(df,gen_tag,airline):\n \n color_flight_count = '#fdb44b'\n if gen_tag == 'Gen1':\n color_avail = 'steelblue'\n #color_avail = 'lightcoral'\n else:\n #color_avail = 'seagreen'\n color_avail = 'lightcoral'\n\n # 生成一个可用率低于一定值的df,避免显示过多项,如果不存在就避开生成plot\n df_avail = df.loc[df['Availability_decimal'] <= .95]\n if df_avail.empty:\n return False\n \n # 在有符合条件的行的情况下\n operator_list = df_avail['Operator'].tolist()\n aircraft_list = df_avail['Aircraft'].tolist()\n flight_count = df_avail['Flight_Count'].tolist()\n gen_list = list(df_avail['Gen'].tolist())\n avail_percent_list = df_avail['Availability'].tolist()\n avail_decimal_list = df_avail['Availability_decimal'].tolist()\n latency_list = df_avail['Latency'].tolist()\n packet_loss_list = df_avail['Packet_Loss'].tolist()\n\n # 此处需整理TODO\n source = ColumnDataSource(data=dict(\n aircraft_list=aircraft_list,\n avail_decimal_list=avail_decimal_list,\n avail_percent_list=avail_percent_list,\n flight_count=flight_count,\n latency_list=latency_list,\n packet_loss_list=packet_loss_list,\n ))\n\n TOOLTIPS = [\n (\"Aircraft\",\"@aircraft_list\"),\n (\"Availability\",\"@avail_percent_list\"),\n (\"Flt Count\",\"@flight_count\"),\n (\"Latency\",\"@latency_list\"),\n (\"Pkt Loss\",\"@packet_loss_list\"),\n ]\n\n\n # 正式画图\n if len(aircraft_list)>30:\n p = figure(x_range=aircraft_list,plot_height=400,plot_width=40*len(aircraft_list),\n title=\"{airline} {gen_tag} SLA Bottom Tails\".format(airline=airline,gen_tag=gen_tag,\n tooltips=TOOLTIPS))\n xlabel_rotation = pi/2\n x_offset=4\n y_offset=-40\n label_font_size = '8pt'\n\n elif len(aircraft_list)<6:\n p = figure(x_range=aircraft_list,plot_height=400,plot_width=120*len(aircraft_list),\n title=\"{airline} {gen_tag} SLA Bottom Tails\".format(airline=airline,gen_tag=gen_tag,\n tooltips=TOOLTIPS))\n xlabel_rotation = 0\n x_offset=-20\n y_offset=-18\n label_font_size = '10pt'\n\n else:\n p = figure(x_range=aircraft_list,plot_height=400,plot_width=80*len(aircraft_list),\n title=\"{airline} {gen_tag} SLA Bottom Tails\".format(airline=airline,gen_tag=gen_tag,\n tooltips=TOOLTIPS))\n xlabel_rotation = 0\n x_offset=-20\n y_offset=-18\n label_font_size = '10pt'\n\n\n \n\n # 定义主轴(y轴)图形\n p.vbar(x=\"aircraft_list\", top=\"avail_decimal_list\",width=0.8,\n fill_color=color_avail,line_color=color_avail,source=source,legend=\"Availability\")\n\n # 定义主轴范围\n p.y_range = Range1d(start=int((min(avail_decimal_list))*10)/10, end=max(avail_decimal_list)+0.005)\n # 主轴标签改为百分比格式\n p.yaxis.formatter = NumeralTickFormatter(format=\"0%\")\n # 定义主轴标签名\n #p.yaxis.axis_label = \"Flight Time Availability\"\n # 主轴副轴标签字体设置\n p.yaxis.axis_label_text_font_style = \"bold\"\n # 隐藏主轴标签\n p.yaxis.visible = False\n\n\n\n label_avail = LabelSet(x='aircraft_list', y='avail_decimal_list',\\\n text='avail_percent_list',\\\n source=source,\\\n render_mode='canvas',\\\n text_font_size=label_font_size,angle=xlabel_rotation,text_color='white',\\\n x_offset=x_offset, y_offset=y_offset)\n\n p.add_layout(label_avail)\n\n # 给副轴数据点插入标签,还没搞懂。。。\n \n # 定义多个副轴(y轴)的名字和范围\n p.extra_y_ranges = {\"Flight_Count\": Range1d(start=0, end=200),\\\n \"Latency\": Range1d(start=500, end=1500),\n \"Packet_Loss\": Range1d(start=0, end=10),\n \"Availability\": Range1d(start=int((min(avail_decimal_list))*10)/10, end=1)}\n # 定义各副轴的图形和标签\n p.circle(x=\"aircraft_list\",y=\"flight_count\",color='darkgray',y_range_name=\"Flight_Count\",source=source,legend=\"Flt Count\")\n p.square(x=\"aircraft_list\",y=\"latency_list\",color='gold',y_range_name=\"Latency\",source=source,legend=\"Latency\")\n p.x(x=\"aircraft_list\",y=\"packet_loss_list\",color='navy',y_range_name=\"Packet_Loss\",source=source,legend=\"Pkt Loss\")\n\n \n\n # 将副轴加入现有图表中\n #p.add_layout(LinearAxis(y_range_name=\"Flight_Count\",axis_label='Flight Count'), 'right')\n #p.add_layout(LinearAxis(y_range_name=\"Packet_Loss\",axis_label='x: Packet Loss'), 'right')\n\n p.add_tools(HoverTool(tooltips=TOOLTIPS))\n\n # 主轴副轴标签字体设置\n p.yaxis.axis_label_text_font_style = \"bold\"\n\n \n # x轴文字标签旋转\n p.xaxis.major_label_orientation = 1\n # 取消网格线\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n\n # 设置legend\n p.legend.orientation = \"horizontal\"\n p.legend.location = \"top_left\"\n p.legend.label_text_font_size = \"8pt\"\n p.legend.border_line_width = 0\n p.legend.border_line_color = \"navy\"\n p.legend.border_line_alpha = 0\n p.legend.click_policy=\"hide\"\n p.legend.background_fill_color = None\n p.legend.background_fill_alpha = 0\n p.legend.spacing = 2\n p.legend.padding = 2\n p.legend.margin = 2\n\n\n return p\n\ndef create_detailed_plots(df):\n color_avail = '#005792'\n color_avail_gen1 = 'steelblue'\n #color_avail = '#cf455c'\n color_avail_gen3 = 'seagreen'\n color_flight_count = '#fdb44b'\n\n airline_list = df.operator.unique().tolist()\n plot_list = []\n for airline in airline_list:\n sla_airline = df[df.operator == airline].pivot_table(index=['aircraft','bc_gen'],\\\n values=[\"connected_sec\",\"connected_sec_expected\",\"flight_num\",\"latency\",\"packet_loss\"],fill_value=0,\\\n aggfunc={\"connected_sec\":np.sum,\"connected_sec_expected\":\"sum\",\"flight_num\":'count',\"latency\":np.mean,\"packet_loss\":np.mean})\n \n sla_airline['Availability_decimal'] = sla_airline['connected_sec']/sla_airline['connected_sec_expected']\n sla_airline['Availability'] = sla_airline['Availability_decimal'].apply(decimal2percent)\n sla_airline['Operator'] = airline\n sla_report = pd.DataFrame(sla_airline.to_records())\n\n sla_report = sla_report.rename(index=str, columns={\\\n #\"operator\": \"Operator\", \\\n \"aircraft\": \"Aircraft\",\\\n \"bc_gen\": \"Gen\",\\\n \"flight_num\": \"Flight_Count\",\\\n \"connected_sec_expected\": \"SLA_Expected\",\\\n \"connected_sec\": \"SLA_Actual\",\\\n \"latency\": \"Latency\",\n \"packet_loss\": \"Packet_Loss\"})\n\n sla_report_sorted = sla_report.sort_values('Availability_decimal',ascending=True)\n\n sla_report_Gen1 = sla_report_sorted[sla_report['Gen']==\"GEN1\"]\n sla_report_Gen3 = sla_report_sorted[sla_report['Gen']==\"GEN3\"]\n #print(sla_report_Gen1,sla_report_Gen3)\n \n \n # Convert dataframe column data to a list -- the tolist() function\n if sla_report_Gen1.empty == False: # Gen1机队存在\n gen_tag = \"Gen1\"\n plot_gen1 = plot_basic(sla_report_Gen1,gen_tag,airline)\n if plot_gen1:\n plot_list.append(plot_gen1)\n \n \n\n if sla_report_Gen3.empty == False: # Gen3机队存在\n \n gen_tag = \"Gen3\"\n plot_gen3 = plot_basic(sla_report_Gen3,gen_tag,airline)\n if plot_gen3:\n plot_list.append(plot_gen3)\n\n return column(plot_list)\n \ndef create_overview_plot(df):\n\n # 2. 用pandas对进行data cleaning\n df_overview = df.pivot_table(index=[\"operator\"],\\\n values=[\"connected_sec\",\"connected_sec_expected\",\"flight_num\"],fill_value=0,\\\n aggfunc={\"connected_sec\":np.sum,\"connected_sec_expected\":\"sum\",\"flight_num\":'count'})\n \n df_overview['Availability_decimal'] = df_overview['connected_sec']/df_overview['connected_sec_expected']\n df_overview['Availability'] = df_overview['Availability_decimal'].apply(decimal2percent)\n\n # 将透视表转换回普通表格 -- to_records() function\n sla_report = pd.DataFrame(df_overview.to_records())\n\n # Rename the columns to easily understandable names\n sla_report = sla_report.rename(index=str, columns={\n \"operator\": \"Operator\", \\\n \"flight_num\": \"Flight_Count\",\\\n \"connected_sec_expected\": \"SLA_Expected\",\\\n \"connected_sec\": \"SLA_Actual\",\\\n \"SLA_Actual\": \"SLA_Actual\",\\\n \"SLA_Expected\": \"SLA_Expected\"\n })\n #print(sla_report)\n\n # Convert dataframe column data to a list -- the tolist() function\n operator_list = sla_report['Operator'].tolist()\n flight_count = sla_report['Flight_Count'].tolist()\n avail_percent_list = sla_report['Availability'].tolist()\n avail_decimal_list = sla_report['Availability_decimal'].tolist()\n\n # Start Bokeh plotting!!!\n p = figure(x_range=operator_list, plot_height = 500, title=\"SLA Overview\")\n p.vbar(operator_list, top=avail_decimal_list,width=1.5,line_width=200)\n\n #p.xgrid.grid_line_color = None\n p.y_range.start = 0\n\n #show(p)\n return p\n\ndef BokehChart(request):\n \"\"\"此函数对数据表进行data cleaning\"\"\"\n # 1. 连接数据库并去除表中所有内容\n # 2. 用pandas对进行data cleaning\n\n # 1. 连接数据库并去除表中所有内容\n db_name = 'sladb'\n table_name = 'dart_ops_report'\n col_name = \"*\"\n db_engine = 'mysql'\n driver = 'pymysql'\n db_login = 'root:noway.man'\n db_server = 'localhost:3306'\n db_charset = 'utf8mb4'\n\n \n db_conn = '{db_engine}+{driver}://{db_login}@{db_server}/{db_name}?charset={db_charset}'\\\n .format(db_engine=db_engine,driver=driver,db_login=db_login,\\\n db_server=db_server,db_name=db_name,db_charset=db_charset)\n\n conn = create_engine(db_conn)\n sql = 'select {col_name} from {db_name}.{table_name};'\\\n .format(col_name=col_name,db_name=db_name,table_name=table_name)\n\n df = pd.read_sql(sql,conn)\n #df.to_csv('OpsData.csv',index=False)\n\n # 只对没有被exclud的数据进行处理\n df_excluded = df.loc[df['excluded'] != 0]\n df = df.loc[df['excluded'] == 0]\n #print(df.head())\n\n\n sla_overview_plot = create_overview_plot(df)\n sla_detailled_plots = create_detailed_plots(df)\n\n script, div = components(sla_detailled_plots)\n template_name = 'sla/sla_report.html'\n content = {\n 'script':script,\n 'div':div\n }\n\n return render(request,template_name,content)\n\n \n\n\n","sub_path":"src/sla/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"494203264","text":"import requests\n\n\nx = input('Quieres sugerencias de Peliculas o Musica, pon alguna de tu gusto: \\n')\ny = input('Ecribe otra pelicula o Musico: \\n')\n\nconsulta = x + ', ' + y\n\nparametros = {'k': '286940-DevFSens-0KCM0XSU', 'q': consulta}\n\nurl = 'https://tastedive.com/api/similar?'\nrespuesta = requests.get(url, params = parametros)\n\nprint(respuesta.url)\n\njson_objets_respuesta = respuesta.json()\n\nprint (json_objets_respuesta)\n\na = json_objets_respuesta['Similar']['Info'][0]\nb = json_objets_respuesta['Similar']['Info'][0]['Type']\n\nlista_a = []\nfor x in range (0,19):\n c = json_objets_respuesta['Similar']['Results'][x]['Name']\n lista_a.append(c)\n print(c)\nprint(lista_a)\n\nfor x in range(len(lista_a)):\n print(lista_a[x])\n\nprint(a)\nprint(b)\ndata = json_objets_respuesta\nresultsall = data['Similar']['Results']\nResults(resultsall)\n\nclass Results(object):\n lista = []\n\n def __init__(self, results):\n self.results = results\n for x in range(len(self.results)):\n self.lista.append(self.results[x])\n name(self.lista)\n\n\n# class Similar():\n# def__init__(self):\n# pass\n#\n# class Info():\n# pass\n\n# git\n# Github\n\n\n\n","sub_path":"req.py","file_name":"req.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157277035","text":"from prepare_data import read_data, text2sequences, get_embeddings_matrix\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nimport tensorflow as tf\r\nfrom model import biLSTM_crf\r\n\r\nconfig = {\r\n 'max_len':130,\r\n 'embeddings_dim':100,\r\n 'n_hidden':100,\r\n 'n_classes':10,\r\n 'epochs':100,\r\n 'batch_size':20\r\n}\r\n\r\n\r\n#prepare data\r\nx_train, y_train = read_data(type = 'train')\r\nx_val, y_val = read_data(type = 'valid')\r\nx_train, y_train, word_index, label_index, train_seq_len = text2sequences(x_train, y_train)\r\nx_val, y_val, word_index_val, _, val_seq_len = text2sequences(x_val, y_val)\r\n\r\nconfig['embeddings_matrix'] = get_embeddings_matrix(embedding_dim = config['embeddings_dim'], word_index = word_index)\r\n\r\nx_train = pad_sequences(x_train, truncating='post', padding = 'post',maxlen=config['max_len'])\r\ny_train = pad_sequences(y_train,truncating='post', padding = 'post', maxlen=config['max_len'])\r\nx_val = pad_sequences(x_val,truncating='post', padding = 'post',maxlen=config['max_len'])\r\ny_val = pad_sequences(y_val,truncating='post', padding = 'post', maxlen=config['max_len'])\r\n\r\n\r\nwith tf.Session() as sess:\r\n\r\n with tf.variable_scope(\"Model\", reuse=None):\r\n train_model = biLSTM_crf(is_training=True, config=config, input = {'x':x_train, 'y':y_train, 'seq_len':train_seq_len})\r\n\r\n with tf.variable_scope(\"Model\", reuse=True):\r\n val_model = biLSTM_crf(is_training=True, config=config, input = {'x':x_val, 'y':y_val, 'seq_len':val_seq_len})\r\n\r\n\r\n for i in range(config['epochs']):\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n print('Epoch: '+ str(i) + ' Training....')\r\n train_model.run_epoch(sess)\r\n\r\n print('Valdation:')\r\n val_model.run_epoch(sess)\r\n\r\n\r\n\r\n\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220903969","text":"class fAddPlafondKesehatan:\n def __init__(self,parentForm,FormObj):\n self.form = parentForm\n self.app = parentForm.ClientApplication\n\n def KaryawanAL(self, sender, linkui):\n uipart = self.uipart\n uipart.Edit()\n EmpId= uipart.GetFieldValue('LEmployee.EmployeeId')\n params = self.app.CreateValues(['Key',EmpId])\n self.FormObject.SetDataWithParameters(params)\n st = self.FormContainer.Show()\n if st == 1:\n return 1\n else:\n return 0\n\n def onRealisasiExit(self,sender):\n UI =self.uipart\n UI.Edit()\n sisa = UI.TotalPlafond - float(UI.Realization or 0)\n UI.FinalBalance=sisa\n\n def bProsesClick(self, button):\n app = self.app\n IsErr = self.CekInput()\n if IsErr !='N':\n app.ShowMessage(IsErr)\n return 0\n\n self.FormObject.CommitBuffer()\n ph = self.FormObject.GetDataPacket()\n res = self.FormObject.CallServerMethod(\"Simpan\", ph)\n\n status = res.FirstRecord\n if status.IsErr :\n pesan = status.ErrMessage\n if 'duplicate key' in pesan:\n pesan ='Anda Sudah pernah input data tunjangan untuk jenis,grade dan status yang sama sebelumnya.'\n self.app.ShowMessage(pesan)\n return 0\n\n button.ExitAction = 2\n self.FormObject.Close(2)\n\n def CekInput (self):\n IsErr = 'N'\n if self.uipart.GetFieldValue('LEmployee.EmployeeId')==None:\n IsErr ='Karyawan Belum dipilih..'\n elif self.uipart.Realization==None:\n IsErr ='Realisasi harus diisi'\n\n return IsErr\n\n","sub_path":"dialogs/parameter/fAddPlafondKesehatan_intr.py","file_name":"fAddPlafondKesehatan_intr.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"141815156","text":"import re\nimport urllib.request as request\nf = open('pic\\\\1.txt', 'r')\ntemp = f.read()\nf.close()\npic_url = re.findall('img src=\"(.*?)\" alt=\"ro', temp, re.S)\n\nprint(pic_url)\n\n'''\nurl = 'http://www.meineihan.cc/diaosifuli/26223_3.html'\ncontent = request.urlopen(url)\nfp = open('pic\\\\helo.txt', 'wb')\nfp.write(content.read())\nfp.close()\n'''","sub_path":"mywork/python/reptile/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"52418532","text":"'''\nРеализуйте класс «Стадион». Необходимо хранить в\nполях класса: название стадиона, дату открытия, страну,\nгород, вместимость. Реализуйте методы класса для ввода\nданных, вывода данных, реализуйте доступ к отдельным\nполям через методы класса.\n'''\n\nclass Stadium:\n \"\"\"Class represents a list of stadiums\"\"\"\n numberStadium = 0\n\n def __init__(self,\n stadium_name: str,\n opening_date: str,\n capacity: int = 0,\n country: str = '',\n city: str = '',\n ):\n \"\"\"Initializes the book data attributes\"\"\"\n self.stadium_name = stadium_name\n self.opening_date = opening_date\n self.capacity = capacity\n self.country = country\n self.city = city\n\n\n Stadium.numberStadium += 1\n\n def __str__(self) -> str:\n '''Returns data'''\n return f'''\n Stadium name: {self.stadium_name} \n Opening date: {self.opening_date} \n Country: {self.country} \n City: {self.city} \n Capacity: {self.capacity}\n '''\n\n def __repr__(self) -> str:\n '''Returns the string representation of an object'''\n return f''' {self.stadium_name} \n {self.opening_date} \n {self.country} \n {self.city} \n {self.capacity}'''\n\n def __del__(self):\n \"\"\"Destructor call, when the object(Stadium) is removed from the list\"\"\"\n print(f'Stadium {self.stadium_name} removed from the list!')\n Stadium.numberStadium -= 1\n if Stadium.numberStadium == 0:\n print(\"No stadiums listed!\")\n else:\n print(f\"Left: {Stadium.numberStadium}.\")\n\n @property\n def set_country(self) -> str:\n \"\"\"Returns the country \"\"\"\n return self.country\n\t\t\n @set_country.setter\n def set_country(self, value: str):\n \"\"\"Sets the country\"\"\"\n self.country = value\n\t\n @property\n def set_city(self) -> str:\n \"\"\"Returns the city\"\"\" \n return self.city\n\t\t\n @set_city.setter\n def set_city(self, value: str):\n \"\"\"Sets the city\"\"\"\n self.city = value\n\nif __name__ == \"__main__\":\n \n my_stadium1 = Stadium('\"Olympic\"', '12.11.2000', 5500) \n\n my_stadium1.set_country = \"England\" \n my_stadium1.set_city = \"London\" \n\n print(my_stadium1)\n \n my_stadium2 = Stadium('\"Spartacus\"', '02.01.1980', 4300)\n my_stadium2.set_country = \"France\" \n my_stadium2.set_city = \"Paris\"\n\n print(my_stadium2)\n\n print('Number of stadiums: ', Stadium.numberStadium)\n\n \n #del my_stadium1\n #print('Number of stadiums: ', Stadium.numberStadium)\n #output = input('Press to continue...')\n","sub_path":"Lesson_21_DZ_Nichipurenko_A.V/Lesson_21_DZ_3_Nichipurenko_A.V.py","file_name":"Lesson_21_DZ_3_Nichipurenko_A.V.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123673927","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom math import pi\nimport pygame\nimport random\nfrom pygame.locals import Rect\nfrom vectores import Vector2\n\nWIDTH, HEIGHT = SIZE = 800, 600\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nFPS = 30\n\n\ndef draw_vector(screen, color, a, b):\n pygame.draw.circle(screen, color, a, 3, 0)\n pygame.draw.line(screen, color, a, b, 1)\n left = b - a\n left = left.unit() * 10\n left.theta -= 7. * pi / 8.0\n pygame.draw.line(screen, color, b, b+left, 1)\n right = b - a\n right = right.unit() * 10\n right.theta += 7. * pi / 8.0\n pygame.draw.polygon(screen, color, [b, b+left, b+right, b], 0)\n\n\nclass Ball():\n def __init__(self, size):\n self.box = Rect((0, 0), size)\n self.pos = Vector2(*self.box.center)\n self.vel = Vector2.random_unit()\n self.speed = random.randrange(3, 10)\n \n def draw(self, canvas):\n pygame.draw.circle(canvas, (0, 0, 255), self.pos, 15)\n # traza\n draw_vector(canvas, GREEN, Vector2(0, 0), self.pos) \n draw_vector(canvas, RED, self.pos, self.pos+self.vel*self.speed*10)\n \n def update(self):\n self.pos += self.vel * self.speed\n\n \ndef main():\n pygame.init()\n screen = pygame.display.set_mode(SIZE)\n pygame.display.set_caption(\"Rebotes\")\n \n ball = Ball(SIZE)\n clock = pygame.time.Clock()\n in_game = True\n \n while in_game:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n in_game = False\n \n ball.update()\n \n screen.fill(BLACK)\n ball.draw(screen)\n pygame.display.flip()\n clock.tick(FPS)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"rebotes.py","file_name":"rebotes.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551812630","text":"# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =======================================================================\n\nfrom __future__ import absolute_import\nimport json\nimport os\n\nfrom google.protobuf.json_format import MessageToJson\n\nfrom . import graphviz_graph as gg\nfrom . import onnx\n\n\ndef debug_print(json_obj):\n print(\n json.dumps(json_obj, sort_keys=True, indent=4, separators=(',', ': ')))\n\n\ndef reorganize_inout(json_obj, key):\n \"\"\"\n :param json_obj: the model's json obj\n :param key: \"input or output\"\n :return:\n \"\"\"\n for index in range(len(json_obj[key])):\n var = json_obj[key][index]\n var_new = dict()\n\n # set name\n var_new['name'] = var['name']\n\n tensor_type = var['type']['tensorType']\n\n # set data_type\n var_new['data_type'] = tensor_type['elemType']\n\n # set shape\n shape = [dim['dimValue'] for dim in tensor_type['shape']['dim']]\n var_new['shape'] = shape\n\n json_obj[key][index] = var_new\n\n\ndef rename_model(model_json):\n def rename_edge(model_json, old_name, new_name):\n for node in model_json['node']:\n inputs = node['input']\n for idx in range(len(inputs)):\n if inputs[idx] == old_name:\n inputs[idx] = new_name\n outputs = node['output']\n for idx in range(len(outputs)):\n if outputs[idx] == old_name:\n outputs[idx] = new_name\n\n def rename_variables(model, variables):\n for variable in variables:\n old_name = variable['name']\n new_shape = [int(dim) for dim in variable['shape']]\n new_name = old_name + '\\ndata_type=' + str(\n variable['data_type']) + '\\nshape=' + str(new_shape)\n variable['name'] = new_name\n rename_edge(model, old_name, new_name)\n\n rename_variables(model_json, model_json['input'])\n rename_variables(model_json, model_json['output'])\n\n # rename\n all_nodes = model_json['node']\n for idx in range(len(all_nodes)):\n name = \"\"\n if \"name\" in all_nodes[idx]:\n name = all_nodes[idx]['name']\n op_type = all_nodes[idx]['opType']\n new_name = str(idx) + '\\n' + str(op_type)\n if name != \"\":\n new_name = new_name + \"\\n\" + name\n all_nodes[idx]['name'] = new_name\n\n\ndef get_links(model_json):\n links = []\n\n for input in model_json['input']:\n name = input['name']\n for node in model_json['node']:\n if name in node['input']:\n links.append({'source': name, \"target\": node['name']})\n\n for source_node in model_json['node']:\n for output in source_node['output']:\n for target_node in model_json['node']:\n if output in target_node['input']:\n links.append({\n 'source': source_node['name'],\n 'target': target_node['name']\n })\n\n return links\n\n\ndef get_node_links(model_json):\n \"\"\"\n :return:\n {\n \"0\": {\n \"input\": [],\n \"output\": [\n 1\n ]\n },\n \"1\": {\n \"input\": [\n 0\n ],\n \"output\": [\n 2\n ]\n }\n }\n \"\"\"\n node_links = dict()\n nodes = model_json['node']\n\n # init all nodes\n for idx in range(len(nodes)):\n node_links[idx] = {'input': list(), 'output': list()}\n\n for src_idx in range(len(nodes)):\n for out_name in nodes[src_idx]['output']:\n for dst_idx in range(len(nodes)):\n if out_name in nodes[dst_idx]['input']:\n node_links[src_idx]['output'].append(dst_idx)\n node_links[dst_idx]['input'].append(src_idx)\n\n return node_links\n\n\ndef add_level_to_node_links(node_links):\n \"\"\"\n :return:\n {\n \"0\": {\n \"input\": [],\n \"output\": [\n 1\n ],\n \"level\": 1\n },\n \"1\": {\n \"input\": [\n 0\n ],\n \"output\": [\n 2\n ],\n \"level\": 2\n }\n }\n \"\"\"\n # init level\n for key in node_links:\n node_links[key]['level'] = None\n for idx in range(len(node_links)):\n # the start up op's level is 1\n if len(node_links[idx]['input']) == 0:\n node_links[idx]['level'] = 1\n else:\n cur_level = node_links[idx]['level']\n for in_idx in node_links[idx]['input']:\n in_level = node_links[in_idx]['level']\n assert in_level is not None\n if cur_level is None or in_level >= cur_level:\n node_links[idx]['level'] = in_level + 1\n\n\ndef get_level_to_all(node_links, model_json):\n \"\"\"\n level_to_nodes {level -> [node_1, node_2]}\n output:\n {\n \"35\": {\n \"inputs\": [\n 38,\n 39\n ],\n \"nodes\": [\n 46\n ],\n \"outputs\": []\n }, {}\n \"\"\"\n level_to_nodes = dict()\n for idx in node_links:\n level = node_links[idx]['level']\n if level not in level_to_nodes:\n level_to_nodes[level] = list()\n level_to_nodes[level].append(idx)\n # debug_print(level_to_nodes)\n \"\"\"\n input_to_level {idx -> level}\n level_to_inputs {level -> [input1, input2]}\n \"\"\"\n nodes = model_json['node']\n\n input_to_level = dict()\n inputs = model_json['input']\n for in_idx in range(len(inputs)):\n in_name = inputs[in_idx]['name']\n for node_idx in range(len(nodes)):\n if in_name in nodes[node_idx]['input']:\n node_level = node_links[node_idx]['level']\n in_level = node_level - 1\n if in_idx not in input_to_level:\n input_to_level[in_idx] = in_level\n elif input_to_level[in_idx] > in_level:\n input_to_level[in_idx] = in_level\n\n level_to_inputs = dict()\n for in_idx in input_to_level:\n level = input_to_level[in_idx]\n if level not in level_to_inputs:\n level_to_inputs[level] = list()\n level_to_inputs[level].append(in_idx)\n\n # debug_print(level_to_inputs)\n\n # get output level\n output_to_level = dict()\n outputs = model_json['output']\n for out_idx in range(len(outputs)):\n out_name = outputs[out_idx]['name']\n for node_idx in range(len(nodes)):\n if out_name in nodes[node_idx]['output']:\n node_level = node_links[node_idx]['level']\n out_level = node_level + 1\n if out_level not in output_to_level:\n output_to_level[out_idx] = out_level\n else:\n raise Exception(\"output \" + out_name +\n \"have multiple source\")\n level_to_outputs = dict()\n for out_idx in output_to_level:\n level = output_to_level[out_idx]\n if level not in level_to_outputs:\n level_to_outputs[level] = list()\n level_to_outputs[level].append(out_idx)\n\n level_to_all = dict()\n\n def init_level(level):\n if level not in level_to_all:\n level_to_all[level] = {\n 'nodes': list(),\n 'inputs': list(),\n 'outputs': list()\n }\n\n # merge all levels\n for level in level_to_nodes:\n init_level(level)\n level_to_all[level]['nodes'] = level_to_nodes[level]\n for level in level_to_inputs:\n init_level(level)\n level_to_all[level]['inputs'] = level_to_inputs[level]\n for level in level_to_outputs:\n init_level(level)\n level_to_all[level]['outputs'] = level_to_outputs[level]\n\n # debug_print(level_to_all)\n\n return level_to_all\n\n\ndef level_to_coordinate(level_to_all):\n default_x = 100\n x_step = 100\n default_y = 10\n y_step = 100\n\n node_to_coordinate = dict()\n input_to_coordinate = dict()\n output_to_coordinate = dict()\n\n def get_coordinate(x_idx, y_idx):\n x = default_x + x_idx * x_step\n y = default_y + y_idx * y_step\n return {\"x\": int(x), \"y\": int(y)}\n\n for level in level_to_all:\n nodes = level_to_all[level]['nodes']\n inputs = level_to_all[level]['inputs']\n outputs = level_to_all[level]['outputs']\n x_idx = 0\n for node_idx in nodes:\n node_to_coordinate[node_idx] = get_coordinate(x_idx, level)\n x_idx += 1\n for in_idx in inputs:\n input_to_coordinate[in_idx] = get_coordinate(x_idx, level)\n x_idx += 1\n for out_idx in outputs:\n output_to_coordinate[out_idx] = get_coordinate(x_idx, level)\n x_idx += 1\n\n return node_to_coordinate, input_to_coordinate, output_to_coordinate\n\n\ndef add_edges(json_obj):\n # TODO(daming-lu): should try to de-duplicate node's out-edge\n # Currently it is counted twice: 1 as out-edge, 1 as in-edge\n json_obj['edges'] = []\n label_incrementer = 0\n\n for node_index in range(0, len(json_obj['node'])):\n cur_node = json_obj['node'][node_index]\n\n # input edges\n if 'input' in cur_node and len(cur_node['input']) > 0:\n for source in cur_node['input']:\n json_obj['edges'].append({\n 'source':\n source,\n 'target':\n 'node_' + str(node_index),\n 'label':\n 'label_' + str(label_incrementer)\n })\n label_incrementer += 1\n\n # output edge\n if 'output' in cur_node and len(cur_node['output']) > 0:\n json_obj['edges'].append({\n 'source': 'node_' + str(node_index),\n 'target': cur_node['output'][0],\n 'label': 'label_' + str(label_incrementer)\n })\n label_incrementer += 1\n\n return json_obj\n\n\ndef to_IR_json(model_pb_path):\n model = onnx.load(model_pb_path)\n graph = model.graph\n del graph.initializer[:]\n\n # to json string\n json_str = MessageToJson(model.graph)\n model_json = json.loads(json_str)\n reorganize_inout(model_json, 'input')\n reorganize_inout(model_json, 'output')\n return model_json\n\n\ndef load_model(model_pb_path):\n model_json = to_IR_json(model_pb_path)\n model_json = add_edges(model_json)\n return model_json\n\n\nclass GraphPreviewGenerator(object):\n '''\n Generate a graph image for ONNX proto.\n '''\n\n def __init__(self, model_json):\n self.model = model_json\n # init graphviz graph\n self.graph = gg.Graph(\n self.model['name'],\n layout=\"dot\",\n concentrate=\"true\",\n rankdir=\"TB\", )\n\n self.op_rank = self.graph.rank_group('same', 2)\n self.param_rank = self.graph.rank_group('same', 1)\n self.arg_rank = self.graph.rank_group('same', 0)\n\n def __call__(self, path='temp.dot', show=False):\n self.nodes = {}\n self.params = set()\n self.ops = set()\n self.args = set()\n\n for item in self.model['input'] + self.model['output']:\n node = self.add_param(**item)\n self.nodes[item['name']] = node\n self.params.add(item['name'])\n\n for id, item in enumerate(self.model['node']):\n node = self.add_op(**item)\n name = \"node_\" + str(id)\n self.nodes[name] = node\n self.ops.add(name)\n\n for item in self.model['edges']:\n source = item['source']\n target = item['target']\n\n if source not in self.nodes:\n self.nodes[source] = self.add_arg(source)\n self.args.add(source)\n if target not in self.nodes:\n self.nodes[target] = self.add_arg(target)\n self.args.add(target)\n\n if source in self.args or target in self.args:\n self.add_edge(style=\"dashed,bold\", color=\"#aaaaaa\", **item)\n else:\n self.add_edge(style=\"bold\", color=\"#aaaaaa\", **item)\n\n if not show:\n self.graph.display(path)\n else:\n self.graph.show(path)\n\n def add_param(self, name, data_type, shape):\n label = '\\n'.join([\n '<',\n ' ',\n ' ',\n ' ',\n ' ',\n ' '\n ' ',\n ' ',\n ' '\n ' ',\n '
',\n ' ',\n name,\n ' ',\n '
',\n data_type,\n '
',\n '[%s]' % 'x'.join(shape),\n '
>',\n ])\n return self.graph.node(\n label,\n prefix=\"param\",\n shape=\"none\",\n style=\"rounded,filled,bold\",\n width=\"1.3\",\n color=\"#148b97\",\n fontcolor=\"#ffffff\",\n fontname=\"Arial\")\n\n def add_op(self, opType, **kwargs):\n return self.graph.node(\n \"<%s>\" % opType,\n prefix=\"op\",\n shape=\"box\",\n style=\"rounded, filled, bold\",\n color=\"#303A3A\",\n fontname=\"Arial\",\n fontcolor=\"#ffffff\",\n width=\"1.3\",\n height=\"0.84\", )\n\n def add_arg(self, name):\n return self.graph.node(\n gg.crepr(name),\n prefix=\"arg\",\n shape=\"box\",\n style=\"rounded,filled,bold\",\n fontname=\"Arial\",\n fontcolor=\"#999999\",\n color=\"#dddddd\")\n\n def add_edge(self, source, target, label, **kwargs):\n source = self.nodes[source]\n target = self.nodes[target]\n return self.graph.edge(source, target, **kwargs)\n\n\ndef draw_graph(model_pb_path):\n json_str = load_model(model_pb_path)\n return json_str\n\n\nif __name__ == '__main__':\n import sys\n current_path = os.path.abspath(os.path.dirname(sys.argv[0]))\n json_str = load_model(current_path + \"/mock/inception_v1_model.pb\")\n # json_str = load_model(current_path + \"/mock/squeezenet_model.pb\")\n # json_str = load_model('./mock/shufflenet/model.pb')\n debug_print(json_str)\n assert json_str\n\n g = GraphPreviewGenerator(json_str)\n g('./temp.dot', show=False)\n","sub_path":"visualdl/server/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":14985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190047729","text":"#!/usr/bin/env python\nimport sys, random, copy\nimport rospy, tf, rospkg, argparse, actionlib\nfrom gazebo_msgs.srv import SpawnModel\nfrom gazebo_msgs.msg import ModelStates\nfrom gazebo_msgs.msg import ModelState\nfrom gazebo_msgs.srv import SetModelState\nfrom geometry_msgs.msg import *\nfrom std_msgs.msg import *\nimport control_msgs.msg\n\nfrom trajectory_msgs.msg import JointTrajectory\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom gazebo_ros_link_attacher.srv import Attach, AttachRequest, AttachResponse\n\n\nimport moveit_commander\nimport moveit_msgs.msg\nfrom moveit_commander.conversions import pose_to_list\n\n\ndef gripper_control(close):\n\n # Create an action client\n client = actionlib.SimpleActionClient(\n 'simul_robot1/gripper_controller/gripper_cmd', # namespace of the action topics\n control_msgs.msg.GripperCommandAction # action type\n )\n client.wait_for_server()\n goal = control_msgs.msg.GripperCommandGoal()\n if(close == True):\n goal.command.position = 0.7 # From 0.0 to 0.8\n else:\n goal.command.position = 0.0\n\n goal.command.max_effort = 1.0 # Do not limit the effort\n client.send_goal(goal)\n client.wait_for_result()\n return client.get_result()\n\n\nrospy.init_node(\"move_test\")\n'''\n\nmoveit_commander.roscpp_initialize(sys.argv)\nrobot = moveit_commander.RobotCommander()\n\ngroup_name1 = \"arm\"\ngroup1 = moveit_commander.MoveGroupCommander(group_name1)\nscene = moveit_commander.PlanningSceneInterface()\nprint(scene)\n\ngroup1.set_named_target(\"ready\")\nplan1 = group1.plan()\ngroup1.execute(plan1, wait=True)\n'''\n\njoint_state_topic = ['joint_states:=/simul_robot1/joint_states']\n\nm1 = moveit_commander\nm1.roscpp_initialize(joint_state_topic)\n\nrobot = m1.RobotCommander(\"/simul_robot1/robot_description\")\n\nscene = moveit_commander.PlanningSceneInterface()\n\ngroup_name1 = \"arm\"\ngroup1 = m1.MoveGroupCommander(group_name1,\"/simul_robot1/robot_description\",\"simul_robot1\")\n\ngroup_env_name = \"hand\"\ngroup_env = m1.MoveGroupCommander(group_env_name,\"/simul_robot1/robot_description\",\"simul_robot1\")\n\nplanning_frame = group1.get_planning_frame()\neef_link = group1.get_end_effector_link()\ngroup_names = robot.get_group_names()\n\nprint(planning_frame)\nprint(\"\")\nprint(eef_link)\nprint(\"\")\nprint(group_names) \nprint(\"\")\nprint(robot.get_current_state())\nprint(\"\")\n\ntouch_links = robot.get_link_names(group=group_env_name)\nprint(touch_links)\nprint(\"\")\nbox_name = \"chair_part3\"\nscene.attach_box(eef_link, box_name, touch_links=touch_links)\n\ngroup1.set_named_target(\"zero\")\nplan1 = group1.plan()\ngroup1.execute(plan1,wait=True)\n\ngroup1.set_named_target(\"ready\")\nplan1 = group1.plan()\ngroup1.execute(plan1,wait=True)\n\n#group1.pick(box_name)","sub_path":"env_setting/src/move_test2.py","file_name":"move_test2.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144901271","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 17 15:22:24 2020\n\n@author: stevenalsheimer\n\"\"\"\nfrom pyspark import SparkContext\nfrom pyspark.sql.session import SparkSession\nfrom collections import defaultdict\nsc = SparkContext()\nspark = SparkSession(sc)\ndef dd():\n return defaultdict(list)\ndef ddd():\n return defaultdict(dd)\ndef main():\n def pandas_file():\n import pandas as pd\n #CSCL = dfcent.select(\"*\").toPandas()\n CSCL = pd.read_csv('Parking_Violations/nyc_cscl.csv')\n #CSCL = pd.read_csv('nyc_cscl.csv')\n CSCL = CSCL.dropna(subset=['L_LOW_HN','R_LOW_HN','R_HIGH_HN','L_HIGH_HN',], axis=0)\n CSCL['ST_LABEL'] = CSCL['ST_LABEL'].map(lambda x: x.lower())\n CSCL['FULL_STREE'] = CSCL['FULL_STREE'].map(lambda x: x.lower() if pd.notnull(x) else x)\n CSCL['L_LOW_HN'] = CSCL['L_LOW_HN'].map(lambda x: tuple([int(i) for i in x.split('-')]))\n CSCL[['L_LOW_HN','L_LOW_HN1']] = pd.DataFrame(CSCL['L_LOW_HN'].tolist(),index =CSCL.index)\n\n CSCL['L_HIGH_HN'] = CSCL['L_HIGH_HN'].map(lambda x: tuple([int(i) for i in x.split('-')]))\n CSCL[['L_HIGH_HN','L_HIGH_HN1']] = pd.DataFrame(CSCL['L_HIGH_HN'].tolist(),index =CSCL.index)\n\n CSCL['R_LOW_HN'] = CSCL['R_LOW_HN'].map(lambda x: tuple([int(i) for i in x.split('-')]))\n CSCL[['R_LOW_HN','R_LOW_HN1']] = pd.DataFrame(CSCL['R_LOW_HN'].tolist(),index =CSCL.index)\n\n CSCL['R_HIGH_HN'] = CSCL['R_HIGH_HN'].map(lambda x: tuple([int(i) for i in x.split('-')]))\n CSCL[['R_HIGH_HN','R_HIGH_HN1']] = pd.DataFrame(CSCL['R_HIGH_HN'].tolist(),index =CSCL.index)\n \n CSCL = CSCL[['BOROCODE','ST_LABEL','FULL_STREE','L_LOW_HN','L_HIGH_HN','R_LOW_HN','R_HIGH_HN','L_LOW_HN1','L_HIGH_HN1','R_LOW_HN1','R_HIGH_HN1','PHYSICALID']]\n print(\"Mem\",CSCL.memory_usage(index=True, deep=True).sum())\n CSCL = CSCL.values.tolist()\n return CSCL\n #C_file = '/data/share/bdm/nyc_cscl.csv'\n C_file = 'Parking_Violations/nyc_cscl.csv'\n\n dfcent = spark.read.load(C_file, format='csv',\n header = True,\n inferSchema = True)\n def GetPhys(boro):\n ind = CSCL.index[CSCL['BOROCODE']==boro].tolist()\n return CSCL.loc[ind,'PHYSICALID']\n def BoroT(boro):\n if boro == 'K':\n return 3\n if boro == 'BK':\n return 3\n if boro == 'KING':\n return 3\n if boro == 'KINGS':\n return 3\n if boro == 'Q':\n return 4\n if boro == 'QN':\n return 4\n if boro == 'QNS':\n return 4\n if boro == 'QU':\n return 4\n if boro == 'QUEENS':\n return 4\n if boro == 'BX':\n return 2\n if boro == 'BRONX':\n return 2\n if boro == 'R':\n return 5\n if boro == 'RICHMOND':\n return 5\n if boro == 'MAN':\n return 1\n if boro == 'MH':\n return 1\n if boro == 'MN':\n return 1\n if boro == 'NEWY':\n return 1\n if boro == 'NEW Y':\n return 1\n if boro == 'NY':\n return 1\n\n def processTrips(pid, records):\n import csv\n #CSCL = CSCL_B.value\n CSCL_T = defaultdict(ddd)\n CSCL_T2 = defaultdict(ddd)\n CSCL = pandas_file()\n\n\n for row in CSCL:\n CSCL_T[row[0]][row[1]][row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10]].append(row[11])\n \n \n for row in CSCL:\n CSCL_T2[row[0]][row[2]][row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10]].append(row[11])\n \n # Skip the header\n if pid==0:\n next(records)\n reader = csv.reader(records)\n counts = {}\n# CSCL_T = CSCL_TB.value\n# CSCL_T2 = CSCL_T2B.value\n \n for row in reader:\n if row[21] == \"Violation County\":\n continue\n BoroV = BoroT(row[21])\n StreetV = row[24].lower()\n try:\n year = int(row[4].split('/')[2])\n except:\n continue\n House_Num = row[23]\n House_Num = House_Num.split('-')\n if House_Num == None or BoroV == None or StreetV == None:\n pass\n try:\n House_Num = tuple([int(i) if int(i) else 0 for i in House_Num])\n except:\n House_Num = (0,)\n ID=None\n if StreetV in CSCL_T[BoroV]:\n if len(House_Num)==1:\n if House_Num[0]%2==0:\n for key2, val2 in CSCL_T[BoroV][StreetV].items():\n if key2[2]<=House_Num[0] and key2[3]>=House_Num[0]:\n ID = val2[0]\n else:\n for key2, val2 in CSCL_T[BoroV][StreetV].items():\n if key2[0]<=House_Num[0] and key2[1]>=House_Num[0]:\n ID = val2[0]\n if len(House_Num)==2:\n if House_Num[1]%2==0:\n for key2, val2 in CSCL_T[BoroV][StreetV].items():\n if key2[2]<=House_Num[0] and key2[3]>=House_Num[0] and key2[6]<=House_Num[1] and key2[7]>=House_Num[1]:\n ID = val2[0]\n else:\n for key2, val2 in CSCL_T[BoroV][StreetV].items():\n if key2[0]<=House_Num[0] and key2[1]>=House_Num[0] and key2[4]<=House_Num[1] and key2[5]>=House_Num[1]:\n ID = val2[0]\n if StreetV in CSCL_T2[BoroV]:\n if len(House_Num)==1:\n if House_Num[0]%2==0:\n for key2, val2 in CSCL_T2[BoroV][StreetV].items():\n if key2[2]<=House_Num[0] and key2[3]>=House_Num[0]:\n ID = val2[0]\n else:\n for key2, val2 in CSCL_T2[BoroV][StreetV].items():\n if key2[0]<=House_Num[0] and key2[1]>=House_Num[0]:\n ID = val2[0]\n if len(House_Num)==2:\n if House_Num[1]%2==0:\n for key2, val2 in CSCL_T2[BoroV][StreetV].items():\n if key2[2]<=House_Num[0] and key2[3]>=House_Num[0] and key2[6]<=House_Num[1] and key2[7]>=House_Num[1]:\n ID = val2[0]\n else:\n for key2, val2 in CSCL_T2[BoroV][StreetV].items():\n if key2[0]<=House_Num[0] and key2[1]>=House_Num[0] and key2[4]<=House_Num[1] and key2[5]>=House_Num[1]:\n ID = val2[0]\n if ID != None:\n counts[(ID,year)] = counts.get((ID,year), 0) + 1\n return counts.items()\n \n #rdd = sc.textFile('/data/share/bdm/nyc_parking_violation/*.csv')\n rdd = sc.textFile('Parking_Violations/Parking_Violations_Issued_-_Fiscal_Year_2019_MEDIUM.csv')\n counts = rdd.mapPartitionsWithIndex(processTrips) \\\n .reduceByKey(lambda x,y: x+y) \\\n .map(lambda x: (x[0][0],(x[1],x[0][1]))) \\\n# .collect()\n# counts[:20]\n from pyspark.sql.functions import col, when, lit, array\n DF_C = counts.toDF([\"PHYSID\",\"YearCount\"])\n DF_C = DF_C.withColumn(\"YearCount\", array([col(\"YearCount\").getField(\"_1\"),col(\"YearCount\").getField(\"_2\")]))\n DF_C = DF_C.withColumn('2019',when(DF_C.YearCount[1]==2019,DF_C.YearCount[0]).otherwise(0))\n DF_C = DF_C.withColumn('2018',when(DF_C.YearCount[1]==2018,DF_C.YearCount[0]).otherwise(0))\n DF_C = DF_C.withColumn('2017',when(DF_C.YearCount[1]==2017,DF_C.YearCount[0]).otherwise(0))\n DF_C = DF_C.withColumn('2016',when(DF_C.YearCount[1]==2016,DF_C.YearCount[0]).otherwise(0))\n DF_C = DF_C.withColumn('2015',when(DF_C.YearCount[1]==2015,DF_C.YearCount[0]).otherwise(0))\n DF_C = DF_C.select(DF_C[\"PHYSID\"],DF_C[\"2015\"],DF_C[\"2016\"],DF_C[\"2017\"],DF_C[\"2018\"],DF_C[\"2019\"])\n from pyspark.sql.functions import size, col,split\n dfcent = dfcent.select('PHYSICALID')\n dfcent = dfcent.withColumn('2015',lit(0))\n dfcent = dfcent.withColumn('2016',lit(0))\n dfcent = dfcent.withColumn('2017',lit(0))\n dfcent = dfcent.withColumn('2018',lit(0))\n dfcent = dfcent.withColumn('2019',lit(0))\n DF_C = DF_C.union(dfcent)\n DF_C = DF_C.groupby('PHYSID').sum('2015','2016','2017','2018','2019')\n DF_C = DF_C.withColumn('OLS',(-2*(DF_C[1]-((DF_C[1]+DF_C[2]+DF_C[3]+DF_C[4]+DF_C[5])/5))-1*(DF_C[2]-((DF_C[1]+DF_C[2]+DF_C[3]+DF_C[4]+DF_C[5])/5))-0*(DF_C[3]-((DF_C[1]+DF_C[2]+DF_C[3]+DF_C[4]+DF_C[5])/5))+1*(DF_C[4]-((DF_C[1]+DF_C[2]+DF_C[3]+DF_C[4]+DF_C[5])/5))+2*(DF_C[5]-((DF_C[1]+DF_C[2]+DF_C[3]+DF_C[4]+DF_C[5])/5)))/(10))\n DF_C = DF_C.orderBy('PHYSID')\n\n DF_C.show()\n #DF_C.write.csv('Output2019_test.csv')\n\nif __name__ == \"__main__\":\n# sc = SparkContext()\n# spark = SparkSession(sc)\n main()\n","sub_path":"BDM_SFinalChallenge_519_912.py","file_name":"BDM_SFinalChallenge_519_912.py","file_ext":"py","file_size_in_byte":9063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508408935","text":"#Meltdays\n\nimport numpy as np\n\ndef meltdays(numbelow,pixbtvals,stopthresh, meltthresh):\n \"\"\"\n meltdays(numbelow,pixbtvals) returns the number of days of melting\n in the given dataset.\n\n stopthresh input is the slope threshold for assuming slope has\n significantly decreased. Should be value between 0 and 1, and finds if\n slope < stopthresh*maxslope\n \"\"\"\n\n veclen=len(pixbtvals)\n\n if veclen < 10:\n return 0\n\n #print pixbtvals\n #print len(pixbtvals)\n\n #5 point stencil to solve slope\n slope5=np.zeros(veclen-4)\n\n #average slope across 3 points:\n s3a=np.zeros(veclen-6)\n\n slopeindex=np.arange(2,veclen-2)\n s3aindex=np.arange(3,veclen-3)\n #provides index values for\n\n slopelen=len(slopeindex)\n\n #ok fine, I'll just use a for loop I guess\n\n for i in range(veclen-4):\n #slope5 index is shifted over 2 to the right, so corrosponding\n #indicies of pixbtvals are i+2\n slope5[i]=1./12*(-pixbtvals[i+4]+8*pixbtvals[i+3]-8*pixbtvals[i+1]+pixbtvals[i])\n\n for i in range(veclen-6):\n s5i=i+1 #slope5 index\n s3a[i]=(slope5[s5i-1]+slope5[s5i]+slope5[s5i+1])/3\n\n\n maxslope=max(s3a[int(float(slopelen)*(65./100)):])\n\n maxslopeloc=s3aindex[s3a==maxslope]\n\n stoploop=True\n meltloc = maxslopeloc[0] # Using [0] prevents both varaible from having\n #the same reference point.\n\n if type(veclen) != int or float:\n if len(meltloc) > 1:\n meltlocset = meltloc[meltloc>float(slopelen)*(65./100)]\n meltloc = meltlocset[0]\n #meltloc = meltloc[0]\n\n #print meltloc\n #print veclen\n\n #veclen - 3 (for s5a indexloc) and - 1 (for comparing a length to index)\n #Thus when comparing meltloc to veclen use:\n if meltloc < veclen - 4:\n while stoploop:\n meltloc += 1\n\n #At only 3 days of melting, then stop. Assume no melting.\n if meltloc == veclen - 4:\n stoploop = False\n\n\n if s3a[meltloc - 3] < stopthresh * maxslope:\n stoploop = False\n\n meltlength = veclen - meltloc\n\n #print meltloc\n\n for btval in pixbtvals[meltloc:]:\n if btval < meltthresh:\n meltlength -= 1\n\n if meltlength < 4:\n meltlength = 0\n\n return meltlength\n","sub_path":"meltdays.py","file_name":"meltdays.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"74291687","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# @Time : 2019/11/22 9:57\n# @Author : wiken\n# @Site : \n# @File : system_service.py\n# @Software: PyCharm\n# @Desc :\n\nfrom datetime import datetime\nimport requests\n\n\nBASE_URL = \"http://192.168.0.100:5000\"\n\n\ndef now():\n url = BASE_URL + \"/api/System/GetServerInfo\"\n res = requests.get(url)\n return parse_time(res.json())\n\n\ndef parse_time(time_dic):\n if time_dic.get(\"isSuccess\"):\n _time = time_dic.get(\"data\", {}).get(\"serverTime\")\n _time = _time[:19]\n a = datetime.strptime(_time, \"%Y-%m-%dT%H:%M:%S\")\n return a\n else:\n return None\n\nif __name__ == '__main__':\n print(now())","sub_path":"build/lib/ys_service/http_service/system_service.py","file_name":"system_service.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"93265268","text":"import uuid\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.views import logout_then_login\n\n# Create your views here.\nfrom django.views import View\nfrom rest_framework import viewsets\n\nfrom base.forms import PersonagemForm\nfrom base.models.classe import Classe\nfrom base.models.inventario import Inventario, InventarioItem\nfrom base.models.personagem import Personagem\nfrom base.models.pocao import Pocao\nfrom base.serializers import ClasseSerializer\nfrom base.util.util import valida_jogador\n\n\ndef logout_view(request):\n del request.session['player_id']\n logout_then_login(request, '')\n\n\nclass PersonagensListView(LoginRequiredMixin, View):\n login_url = '/'\n template = 'base/personagem/personagens.html'\n\n def get(self, request):\n jogadores = Personagem.objects.filter(user=request.user)\n return render(request, self.template, {'personagens': jogadores})\n\n\nclass PersonagemCreatedView(LoginRequiredMixin, View):\n login_url = '/'\n template = 'base/personagem/novo_personagem.html'\n\n def get(self, request):\n form = PersonagemForm()\n return render(request, self.template, {'form': form})\n\n def post(self, request):\n form = PersonagemForm(request.POST)\n if form.is_valid():\n # Obter Classe.\n classe = get_object_or_404(Classe, pk=form.cleaned_data['classe'].pk)\n # Criar o Jogador\n personagem = Personagem()\n personagem.criar_personagem(classe, request.user, form.cleaned_data['nome'])\n\n # Cria Invetario para o personagem\n inv = Inventario()\n inv.criar_inventario(personagem)\n\n p_hp = Pocao.objects.get(pk=1)\n p_energia = Pocao.objects.get(pk=2)\n p_raiva = Pocao.objects.get(pk=3)\n\n InventarioItem.objects.bulk_create([\n InventarioItem(id=uuid.uuid4(), pocao=p_hp, inventario=inv),\n InventarioItem(id=uuid.uuid4(), pocao=p_energia, inventario=inv),\n InventarioItem(id=uuid.uuid4(), pocao=p_raiva, inventario=inv)]\n )\n\n return redirect('personagens')\n\n\nclass PersonagemDeleteView(LoginRequiredMixin, View):\n template = 'base/personagem/personagens.html'\n login_url = '/'\n\n def get(self, request, player):\n personagem_deletar = Personagem.objects.get(pk=player)\n if valida_jogador(request, personagem_deletar):\n personagem_deletar.delete()\n return redirect('personagens')\n\n\nclass SelecionarView(LoginRequiredMixin, View):\n login_url = '/'\n template = 'base/cidade.html'\n\n def get(self, request):\n jogador = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, jogador):\n jogador.refresh()\n jogador.save()\n\n return render(request, self.template, {'personagem': jogador})\n\n def post(self, request):\n player = request.POST['player']\n request.session['player_id'] = player\n jogador = Personagem.objects.get(pk=player)\n jogador.refresh()\n jogador.save()\n return render(request, self.template, {'personagem': jogador})\n\n\nclass PesonagemDetailView(LoginRequiredMixin, View):\n\n template = 'base/personagem/personagem_detail.html'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n personagem.refresh()\n inv = Inventario.objects.get(personagem=personagem) # Feito assim para ficar mais transparente pode não ser\n itens = list(inv.itens.all()) # a melhor forma mas é mais e de facil entendimento\n return render(request, self.template, {'personagem': personagem, 'itens': itens})\n\n\ndef distribuir_atributo(personagem, atributo):\n\n personagem.refresh()\n personagem.save()\n if personagem.pontos > 0:\n if atributo == 'ataque':\n personagem.forca += 1\n elif atributo == 'agilidade':\n personagem.agilidade += 1\n elif atributo == 'inteligencia':\n personagem.inteligencia += 1\n elif atributo == 'sabedoria':\n personagem.sabedoria += 1\n elif atributo == 'carisma':\n personagem.carisma += 1\n elif atributo == 'energia':\n personagem.energia += 1\n elif atributo == 'raiva':\n personagem.raiva += 1\n personagem.pontos -= 1\n personagem.save()\n\n\nclass AddAtaque(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'ataque')\n return redirect('personagem_detail')\n\n\nclass AddAgilidade(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'agilidade')\n return redirect('personagem_detail')\n\n\nclass AddInteligencia(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'inteligencia')\n return redirect('personagem_detail')\n\n\nclass AddSabedoria(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'sabedoria')\n return redirect('personagem_detail')\n\n\nclass AddCarisma(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'carisma')\n return redirect('personagem_detail')\n\n\nclass AddEnergia(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'energia')\n return redirect('personagem_detail')\n\n\nclass AddRaiva(LoginRequiredMixin, View):\n login_url = '/'\n\n def get(self, request):\n personagem = Personagem.objects.get(pk=request.session['player_id'])\n if valida_jogador(request, personagem):\n distribuir_atributo(personagem, 'raiva')\n return redirect('personagem_detail')\n\n\nclass ClasseViewSet(viewsets.ModelViewSet):\n\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = Classe.objects.all()\n serializer_class = ClasseSerializer","sub_path":"base/views/personagem.py","file_name":"personagem.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"127362945","text":"from flask import Flask, render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\nimport os\n\ndbDir = \"sqlite:///\" + os.path.abspath(os.getcwd()) + \"/database.db\"\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = dbDir\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\nclass Posts(db.Model):\n\tid = db.Column(db.Integer, primary_key = True)\n\ttitleD = db.Column(db.String(30))\t\t\n\n@app.route(\"/filePython//\")\ndef userA(project = \"Project\", user = \"Linares\"):\n\ttitle = \"Welcome!!\"\n\tlis = []\n\treturn render_template(\"Index.html\", title=title, lis=lis)\n\n@app.route(\"/insert/inquilino\") \ndef insert_inquilino():\n\t#new_post = Posts(titleD = \"Resident sick\")\n\t#db.session.add(new_post)\n\t#db.session.commit()\n\tinquilino_Created = \"The new inquilino was created\"\n\n\treturn render_template(\"insert_inquilino.html\", inquilino = inquilino_Created)\n \n@app.route(\"/select/inquilino\") \ndef queryId_inquilino():\n\tpost = Posts.query.filter_by(id = 4).first()\n\tqueryR = \"The result of the Query is: \", post.titleD\n\n\treturn render_template(\"findInquilinos.html\", queryR = queryR)\n\nif __name__ == \"__main__\":\n\tdb.create_all()\n\tapp.run(debug = True, host ='0.0.0.0', port = 8090)","sub_path":"Excersice.py","file_name":"Excersice.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59703138","text":"import numpy as np\nimport random as rand\nimport matplotlib.pyplot as plt\n#import sys\n\n\n# function to determine fractal dimension of the food\n#def fractal_dimension(Z, label):\n#\n# # Only for 2d image\n# assert(len(Z.shape) == 2)\n#\n# def boxcount(Z, k):\n# points = 0\n# for i in range(k):\n# for j in range(k):\n# if Z[i,j] == label:\n# points += 1\n# return points\n# # Minimal dimension of image\n# p = min(Z.shape)\n#\n# # Greatest power of 2 less than or equal to p\n# n = 2**np.floor(np.log(p)/np.log(2))\n#\n# # Extract the exponent\n# n = int(np.log(n)/np.log(2))\n#\n# # Build successive box sizes (from 2**n down to 2**1)\n# sizes = 2**np.arange(n, 1, -1)\n#\n# # Actual box counting with decreasing size\n# counts = []\n# for size in sizes:\n# counts.append(boxcount(Z, size))\n#\n# # Fit the successive log(sizes) with log (counts)\n# #plt.plot(sizes, counts)\n# coeffs = np.polyfit(np.log(sizes), np.log(counts), 1)\n# return coeffs[0]\n# this turned out to be useless\n\n\ndef rw(L, N, pot, f): #boxsize, number of walkers, time, food-parameter L=int*10\n from timeit import default_timer as timer #timer\n start = timer()\n mat = f * np.ones((L,L)) #matrix full of ones, one means position contains food\n pos = np.zeros((N,2)) #x and y coordinate for all N walkers\n \n grid = [] #grid where you save the msds\n for k in list(np.round(np.logspace(0, pot))):\n if k not in grid:\n grid.append(k)\n msd = np.zeros((N, len(grid)))\n fpercentage = np.zeros(len(grid))\n #fdimension = np.zeros(len(grid))\n fmats = np.zeros((len(grid), L, L))\n rho = np.zeros((len(grid), int(L/10), int(L/10)))\n corr = np.zeros((len(grid), int(L/10), int(L/10)))\n i = 0\n \n for n in range(N): #search iid starting pos for all walkers\n pos[n,:] = np.array([rand.randint(0,L-1), rand.randint(0,L-1)])\n startconf = pos.copy() # save the starting config to calc the msd\n for n in range(N):\n mat[int(pos[n,1]), int(pos[n,0])] = 0 #set entry on which a walker sits to zero, so allways pacman eats all the food\n #print(startconf)\n \n #calc all the prob to move, than move all walkers at the same time, that everyone sees the same food\n prob = np.zeros((N,4)) #up, down, left, right\n for n in range(N):\n prob[n,0] = np.exp(mat[int((pos[n,1] + 1)%L), int(pos[n,0])]) #up\n prob[n,1] = np.exp(mat[int((pos[n,1] - 1)%L), int(pos[n,0])]) #down\n prob[n,2] = np.exp(mat[int(pos[n,1]), int((pos[n,0] + 1)%L)]) #left\n prob[n,3] = np.exp(mat[int(pos[n,1]), int((pos[n,0] - 1)%L)]) #right\n prob[n,:] = prob[n,:] / sum(prob[n,:]) #normalization\n \n #move all walkers and change the food matrix afterwards\n lr_wall = np.zeros(N) #count the moves across the pbc \n ud_wall = np.zeros(N)\n deltax = np.zeros(N)\n deltay = np.zeros(N)\n for t in range(1, int(10**pot) + 1):\n #calc all the prob to move, than move all walkers at the same time, that everyone sees the same food\n prob = np.zeros((N,4)) #up, down, left, right\n for n in range(N):\n prob[n,0] = np.exp(mat[int((pos[n,1] + 1)%L), int(pos[n,0])]) #up\n prob[n,1] = np.exp(mat[int((pos[n,1] - 1)%L), int(pos[n,0])]) #down\n prob[n,2] = np.exp(mat[int(pos[n,1]), int((pos[n,0] + 1)%L)]) #left\n prob[n,3] = np.exp(mat[int(pos[n,1]), int((pos[n,0] - 1)%L)]) #right\n prob[n,:] = prob[n,:] / sum(prob[n,:]) #normalization\n #print(t, prob)\n \n\n for n in range(N):\n #move all walkers and change the food matrix afterwards\n rn = rand.random()\n #print(rn)\n if rn < prob[n,0]:\n lr_wall[n] = lr_wall.copy()[n] + ((pos.copy()[n,0] + 1) // L)\n pos[n,0] = (pos[n,0] + 1) % L\n if rn > prob[n,0] and rn < prob[n,0] + prob[n,1]:\n lr_wall[n] = lr_wall.copy()[n] + ((pos.copy()[n,0] - 1) // L)\n pos[n,0] = (pos[n,0] - 1) % L\n if rn > prob[n,0] + prob[n,1] and rn < 1 - prob[n,3]:\n ud_wall[n] = ud_wall.copy()[n] + ((pos.copy()[n,1] + 1) // L)\n pos[n,1] = (pos[n,1] + 1) % L\n if rn > 1 - prob[n,3]:\n ud_wall[n] = ud_wall.copy()[n] + ((pos.copy()[n,1] - 1) // L)\n pos[n,1] = (pos[n,1] - 1) % L\n #print(pos)\n #print('lr:', lr_wall)\n #print('ud:', ud_wall)\n #print('dx:', deltax)\n #print('dy:', deltay)\n #print(msd)\n #now change the matrix\n for n in range(N):\n mat[int(pos[n,1]), int(pos[n,0])] = 0 #set entry on which a walker sits to zero, so allways pacman eats all the food\n #print(mat)\n if t in grid:\n deltax = (pos[:,0] - startconf[:,0]) + lr_wall * L\n deltay = (pos[:,1] - startconf[:,1]) + ud_wall * L \n msd[:,i] = deltax**2 + deltay**2\n fpercentage[i] = sum(sum(mat)) / (f * L**2)\n #fdimension[i] = fractal_dimension(mat, f)\n fmats[i] = mat\n for n in range(N): \n rho[i,int(pos[n,1]/10),int(pos[n,0]/10)] += 100/(L*L)\n \n for dx in range(int(L/10)):\n for dy in range(int(L/10)):\n for x0 in range(int(L/10)):\n for y0 in range(int(L/10)):\n corr[i,dy,dx] += rho[i,y0,x0]*rho[i,(y0+dy)%int(L/10),(x0+dx)%int(L/10)]\n #corr[i] -= (N/(L*L))**2\n i = i + 1\n end = timer()\n print(end - start)\n return msd, fpercentage, corr\n \n \ndef multiple_walks(L, N, pot, f, n):#n number of walks\n grid = [] \n for k in list(np.round(np.logspace(0, pot))):\n if k not in grid:\n grid.append(k)\n msd = np.zeros((N,len(grid)))\n fpercentage = np.zeros(len(grid))\n #fdimension = np.zeros(len(grid))\n corr = np.zeros((len(grid),int(L/10),int(L/10)))\n for i in range(n): #average over the n walks\n raw = rw(L, N, pot, f)\n msd, fpercentage, corr = msd + raw[0], fpercentage + raw[1], corr + raw[2] \n avgmsd = sum(msd) / (N*n)\n fpercentage = fpercentage / n\n corr = corr / n\n #fdimension = fdimension / n\n for i in range(2, len(grid)-2):\n avgmsd[i] = (avgmsd[i+2] + avgmsd[i+1] + avgmsd[i] + avgmsd[i-1] + avgmsd[i-2]) / 5.0\n diffmsd = np.zeros((len(grid)))\n for i in range(len(grid) - 1):\n diffmsd[i] = (avgmsd[i+1] - avgmsd[i])/(grid[i+1] - grid[i])\n diffmsd[len(grid) - 1] = diffmsd[len(grid) - 2]\n return avgmsd, grid, fpercentage, diffmsd, corr \n \n#f = int(sys.argv[1])\n \navgmsd, grid, fpercentage, diffmsd, corr = multiple_walks(100, 1000, 5, 5, 20)\n\n#diff_avg = diff.copy() \n#for k in range(2,len(diff)-2):\n# diff_avg[k] = (diff[k-2] + diff[k-1] + diff[k] + diff[k+1] + diff[k+2])/5.0\n \n \n#plt.figure(num=None, figsize=(6, 10), dpi=80, facecolor='w', edgecolor='k')\n#plt.subplot(4,1,1)\n#plt.loglog(grid,avgmsd,label='$F=5$',lw = 0.5)\n#plt.loglog(grid,grid,label='$reference$',lw = 0.5)\n#plt.title('$0\\%\\ density$')\n##plt.xlabel('$steps$', size = 15)\n#plt.ylabel('$msd$', size = 15)\n#plt.legend()\n#plt.subplot(4,1,2)\n#plt.plot(grid,fpercentage)\n#plt.xscale('log')\n##plt.xlabel('$steps$', size = 15)\n#plt.ylabel('$food$', size = 15)\n#plt.subplot(4,1,3)\n#plt.plot(grid,fdim)\n#plt.xscale('log')\n#plt.xlabel('$steps$', size = 15)\n#plt.ylabel('$fractal\\ dim$', size = 15)\n#plt.subplot(4,1,4)\n#plt.plot(grid, diff / avgmsd * grid, label='$central\\ difference$')\n#plt.plot(grid, diff_avg / avgmsd * grid, label='$local\\ mean$')\n#plt.legend()\n#plt.xscale('log')\n#plt.xlabel('$steps$', size = 15)\n#plt.ylabel('$diff\\ exponent$', size = 15)\n#plt.savefig('0dens.pdf')\nnp.savetxt('msd.dat', avgmsd)\nnp.savetxt('food.dat', fpercentage)\nnp.savetxt('diff.dat', diffmsd)\nnp.savetxt('corr.dat', corr)\n\n","sub_path":"mult_walker_food/rw_with_food_multiple_walkers.py","file_name":"rw_with_food_multiple_walkers.py","file_ext":"py","file_size_in_byte":7956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123112919","text":"import argparse\n\nimport numpy\n\nfrom .._helpers import read, reader_map\nfrom ._helpers import _get_version_text\n\n\ndef info(argv=None):\n # Parse command line arguments.\n parser = _get_info_parser()\n args = parser.parse_args(argv)\n\n # read mesh data\n mesh = read(args.infile, file_format=args.input_format)\n print(mesh)\n\n # check if the cell arrays are consistent with the points\n is_consistent = True\n for cells in mesh.cells:\n if numpy.any(cells.data > mesh.points.shape[0]):\n print(\"\\nATTENTION: Inconsistent mesh. Cells refer to nonexistent points.\")\n is_consistent = False\n break\n\n # check if there are redundant points\n if is_consistent:\n point_is_used = numpy.zeros(mesh.points.shape[0], dtype=bool)\n for cells in mesh.cells:\n point_is_used[cells.data] = True\n if numpy.any(~point_is_used):\n print(\"ATTENTION: Some points are not part of any cell.\")\n\n\ndef _get_info_parser():\n parser = argparse.ArgumentParser(\n description=(\"Print mesh info.\"), formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument(\"infile\", type=str, help=\"mesh file to be read from\")\n\n parser.add_argument(\n \"--input-format\",\n \"-i\",\n type=str,\n choices=sorted(list(reader_map.keys())),\n help=\"input file format\",\n default=None,\n )\n\n parser.add_argument(\n \"--version\",\n \"-v\",\n action=\"version\",\n version=_get_version_text(),\n help=\"display version information\",\n )\n return parser\n","sub_path":"meshio/_cli/_info.py","file_name":"_info.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119224533","text":"from django.shortcuts import render\nimport datetime\nimport random\nimport itertools\nfrom scheduler.schedule import IPL\nfrom django.http import JsonResponse\n# Create your views here.\n\n\ndef isweekend(currDate):\n if currDate.weekday()>5:\n return True\n else:\n return False\n\ndef setDates(schedule,startingDate):\n currDate=startingDate \n schedule[0]['date']=currDate.strftime('%d/%m/%Y')\n schedule[0]['count']=1\n weekendCount=0\n for i in range(1,len(schedule)):\n if not isweekend(currDate):\n currDate+=datetime.timedelta(days=1)\n else:\n weekendCount+=1\n if weekendCount==2:\n currDate+=datetime.timedelta(days=1)\n weekendCount=0\n schedule[i]['date']=currDate.strftime('%d/%m/%Y')\n schedule[i]['count']=i+1\n return schedule\n\ndef setSchedule(startDate):\n teams=['Team A','Team B','Team C','Team D','Team E','Team F','Team G','Team H']\n stadiums=['Chennai','Kochi','Bangalore','Pune','Bombay','Delhi','Jaipur','Kolkata']\n roundRobin=IPL(teams)\n roundRobin.createMatches()\n if roundRobin.finished == True:\n matches=(roundRobin.matches)\n\n homeMatches=[]\n awayMatches=[]\n for matchPair in matches:\n homeMatch={}\n awayMatch={}\n homeMatch['home-team']=matchPair[0]\n homeMatch['away-team']=matchPair[1]\n homeMatch['match']=matchPair[0]+\" vs \"+matchPair[1]\n homeMatch['stadium']=stadiums[teams.index(matchPair[0])]\n awayMatch['home-team']=matchPair[1]\n awayMatch['away-team']=matchPair[0]\n awayMatch['match']=matchPair[1]+\" vs \"+matchPair[0]\n awayMatch['stadium']=stadiums[teams.index(matchPair[1])]\n homeMatches.append(homeMatch)\n awayMatches.append(awayMatch)\n matches=homeMatches+awayMatches\n return setDates(matches,startDate)\ndef schedule(req,year,month,day):\n startDate=datetime.date(year,month,day)\n print(year,month,day)\n print(startDate)\n return JsonResponse(setSchedule(startDate),safe=False)\n\n","sub_path":"scheduler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186103624","text":"class Date(object):\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n @classmethod\n def from_string(cls, s):\n parts = s.split('-')\n return cls(int(parts[0]), int(parts[1]), int(parts[2]))\n\n @classmethod\n def start_date(cls):\n print(cls)\n return cls(1970, 1, 1)\n\n @staticmethod\n def util_plus(a, b):\n return a+b\n\n def __repr__(self):\n return f'Date({self.year}, {self.month}, {self.day})'\n\n def __str__(self):\n return f'{self.year}-{self.month}-{self.day}'\n\n\ndef print_obj(objects, attributes):\n for obj in objects:\n for attr in attributes:\n a = str(getattr(obj, attr))\n print(f'{a:>5s}', end=' ')\n print()\n\nd0 = Date.start_date()\nprint(d0)\n\nd1 = Date(2002, 12, 25)\nprint(d1.year)\nprint(getattr(d1, 'year'))\nprint(d1)\n\nd2 = Date.from_string('1998-07-28')\nprint(d2)\n\nprint_obj([d1, d2], ['year', 'month', 'day'])\nprint_obj([d2, d1], ['month', 'day', 'year'])\n\nclass MyDate(Date):\n pass\n\nd88 = MyDate.start_date()\nprint(d88)\n\nDate.util_plus(1, 2)\nDate.__dict__['util_plus'].__func__(2, 8)\nDate.__dict__['from_string'].__func__(Date, '2008-08-31')","sub_path":"PythonProgrammingLanguage/oop/class_basics.py","file_name":"class_basics.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382984745","text":"from django import template as django_template\nfrom django.template import loader as django_template_loader\nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp.template import _swap_settings\nfrom settings import BASE_DIR\n\nimport config\nimport models\nimport os\n\n\nconfig_cache = config.ConfigCache()\n\nif isinstance(config_cache.theme, (list, tuple)):\n TEMPLATE_DIRS = config_cache.theme\nelse:\n TEMPLATE_DIRS = [os.path.abspath(os.path.join(BASE_DIR, 'themes', 'default'))]\n if config_cache.theme and config_cache.theme != 'default':\n TEMPLATE_DIRS.insert(0, os.path.abspath(os.path.join(BASE_DIR, 'themes', config_cache.theme)))\n\n\ndef get_context_defaults(context=None):\n if context is None:\n context = {}\n\n pages = db.Query(models.Page)\n\n context.update({\n 'config': config_cache,\n 'devel': os.environ['SERVER_SOFTWARE'].startswith('Devel'),\n 'gallery': config.LazyGallery(),\n 'pages': pages.fetch(10),\n })\n return context\n\n\ndef render(template, context=None):\n context = get_context_defaults(context)\n context.update({'template': template})\n old_settings = _swap_settings({'TEMPLATE_DIRS': TEMPLATE_DIRS})\n try:\n template = django_template_loader.get_template(template)\n return template.render(django_template.Context(context))\n finally:\n _swap_settings(old_settings)\n","sub_path":"utils/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344107922","text":"'''\nCreated on Mar 13, 2012\n\n@author: moloch\n'''\n\nfrom models.User import User\nfrom models.Action import Action\nfrom libs.SecurityDecorators import authenticated\nfrom tornado.web import RequestHandler #@UnresolvedImport\nfrom libs.SEManager import SEManager\nfrom libs.Session import SessionManager\nfrom libs.WebSocketManager import WebSocketManager\nfrom libs.Notification import Notification\n\nclass SocialHomeHandler(RequestHandler):\n \n def initialize(self, dbsession):\n self.dbsession = dbsession\n self.session_manager = SessionManager.Instance()\n self.session = self.session_manager.get_session(self.get_secure_cookie('auth'), self.request.remote_ip)\n\n \n @authenticated\n def get(self, *args, **kwargs):\n se_manager = SEManager.Instance()\n self.render(\"se/view.html\", current_se = se_manager.get_current())\n \n @authenticated\n def post(self, *args, **kwargs):\n try:\n token = self.get_argument(\"token\")\n except:\n self.render('se/submit.html', message=\"Please enter a token!\")\n \n user = User.by_user_name(self.session.data['user_name'])\n se_manager = SEManager.Instance()\n challenge = se_manager.active_challenge\n if token == se_manager.active_challenge.token:\n se_manager.active_challenge.team_id = user.team.id\n action = Action(\n classification = unicode(\"Defeated a Social Engineering Challenge\"),\n description = unicode(\"%s successfully defeated the level %s Social Engineering Challenge\" % (user.display_name, se_manager.active_challenge.level)),\n value = challenge.value,\n user_id = user.id)\n se_manager.update_challenge()\n self.notify(user, challenge)\n self.dbsession.add(challenge)\n self.dbsession.add(user)\n self.dbsession.add(action)\n self.dbsession.flush()\n self.render('se/submit.html', message = \"You have successfully completed a Social Engineering Round!\")\n \n def notify(self, user, se):\n '''Send a notification to everyone that a round of the social engineering challenge has updated '''\n title = \"Social Engineering Round Compelete!\"\n message = unicode(\"%s successfully defeated the level %s Social Engineering Challenge\" % (user.display_name, se.level))\n file_path = self.application.settings['avatar_dir']+'/'+user.avatar\n ws_manager = WebSocketManager.Instance()\n notify = Notification(title, message, file_location = file_path)\n ws_manager.send_all(notify)\n ","sub_path":"handlers/SocialHandlers.py","file_name":"SocialHandlers.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"198299337","text":"# Copyright (C) 2019 - TODAY, Open Source Integrators\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import fields, models\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n us_check_layout = fields.Selection(\n related='company_id.us_check_layout', string=\"Check Layout\",\n help=\"Select the format corresponding to the check paper you will be \"\n \"printing your checks on.\\nIn order to disable the printing \"\n \"feature, select 'None'.\", store=True, readonly=False)\n us_check_multi_stub = fields.Boolean(\n related='company_id.us_check_multi_stub',\n string='Multi-Pages Check Stub',\n help=\"This option allows you to print check details (stub) on \"\n \"multiple pages if they don't fit on a single page.\",\n default=\"False\", store=True, readonly=False)\n us_check_margin_top = fields.Float(\n related='company_id.us_check_margin_top', string='Top Margin',\n help=\"Adjust the margins of generated checks to make it fit your \"\n \"printer's settings.\", store=True, readonly=False)\n us_check_margin_left = fields.Float(\n related='company_id.us_check_margin_left', string='Left Margin',\n help=\"Adjust the margins of generated checks to make it fit your \"\n \"printer's settings.\", store=True, readonly=False)\n","sub_path":"osi_check_layout/models/osi_res_config_settings.py","file_name":"osi_res_config_settings.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652544717","text":"import os\nfrom flask import Flask, render_template, jsonify\nimport requests\n\napp = Flask(__name__)\n\nfb_app_secret = os.environ.get('FB_APP_SECRET', None)\nfb_app_id = os.environ.get('FB_APP_ID', None)\n\ndef get_fb_likes_and_loves(video_id):\n video_id = str(video_id)\n\n response = requests.get(\n \"https://graph.facebook.com/v2.8/? \\\n ids={live_video_id}& \\\n fields=reactions.type(LIKE).limit(0).summary(total_count).as(reactions_like), \\\n reactions.type(LOVE).limit(0).summary(total_count).as(reactions_love)& \\\n access_token={app_id}|{app_secret}\".format(\n app_id=fb_app_id, \n app_secret=fb_app_secret, \n live_video_id=video_id\n )\n ).json()\n\n likes = response[video_id]['reactions_like']['summary']['total_count']\n loves = response[video_id]['reactions_love']['summary']['total_count']\n return likes, loves\n\n@app.route('/getmetrics/')\ndef getmetrics():\n post_id = 1803310746574969\n likes_count, loves_count = get_fb_likes_and_loves(post_id)\n return jsonify(likes=likes_count, loves=loves_count)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"278477256","text":"#!/usr/bin/env python\n\nimport argparse\nimport sys, os\nimport subprocess as sp\nimport numpy as np\nimport netCDF4 as nc\n\nROOT=os.path.dirname(os.path.abspath(__file__))\nsys.path.append(ROOT)\nsys.path.append(os.path.join(ROOT,'oasisgrids','esmgrids'))\n\nfrom esmgrids.mom_grid import MomGrid\nfrom esmgrids.woa_grid import WoaGrid\nfrom helpers import setup_test_input_dir, setup_test_output_dir\nfrom helpers import calc_regridding_err\n\nEARTH_RADIUS = 6370997.0\nEARTH_AREA = 4*np.pi*EARTH_RADIUS**2\n\ndef get_grid(filename):\n\n try:\n grid = MomGrid.fromfile(filename)\n return grid\n except KeyError as e:\n print(\"Not a mom grid: \",type(e),str(e))\n\n try:\n grid = WoaGrid(filename)\n return grid\n except KeyError as e:\n print(\"Not a WOA grid: \",type(e),str(e))\n\n \n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Convert grid file to GRIDSPEC format\")\n parser.add_argument(\"-v\",\"--verbose\", help=\"Verbose output\", action='store_true')\n parser.add_argument(\"inputs\", help=\"grid files\", nargs='+')\n args = parser.parse_args()\n\n verbose=args.verbose\n\n # Loop over all the inputs from the command line. \n for input in args.inputs:\n\n if verbose: print(\"Processing {}\".format(input))\n\n suffix = '.nc'\n\n if input.endswith(suffix):\n output = input[:-len(suffix)] + '_gridspec.nc'\n else:\n output = input + '_gridspec.nc'\n\n if verbose:\n print(\"Input file: {}\".format(input))\n print(\"GRIDSPEC output file: {}\".format(output))\n\n # grid = MomGrid.fromfile(input)\n grid = get_grid(input)\n grid.write_gridspec(output)\n\n","sub_path":"grd2spec.py","file_name":"grd2spec.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41229152","text":"# chainer v4\nimport math\n\nfrom chainer.functions.connection import linear\n#from chainer.functions import batch_matmul\nfrom matmul_for_II import batch_matmul\nfrom chainer import initializers\nfrom chainer import link\nimport chainer.functions as F\nfrom chainer import variable\n\n\nclass MultiLinear(link.Link):\n def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=True,\n initialW=None, initial_bias=None):\n super(MultiLinear, self).__init__()\n \n if out_size is None:\n in_size, out_size = None, in_size\n self.out_size = out_size\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer)\n if in_size is not None:\n self._initialize_params(in_size)\n\n if nobias:\n self.b = None\n else:\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_size)\n \n \n \n def _initialize_params(self, in_size):\n self.W.initialize((self.out_size, in_size))\n \n def __call__(self, x):\n \"\"\"Applies the linear layer.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n\n Returns:\n ~chainer.Variable: Output of the linear layer.\n\n \"\"\"\n if self.has_uninitialized_params:\n self._initialize_params(x.shape[1])\n #return linear.linear(x, self.W, self.b)\n batch_size = x.data.shape[1]\n return F.transpose(F.reshape(batch_matmul(x, self.W),(self.out_size,batch_size)))\n","sub_path":"getruiternewsfromweb/GINN_ChainerV4/multilinear.py","file_name":"multilinear.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88300899","text":"#AfvinkOpdracht 7\r\n#Auteur: Shane Pullens\r\n\r\n\r\ndef main():\r\n seq = getSequentie()\r\n isDNA(seq)\r\n rna = RNAconvert(seq)\r\n StartStop(rna)\r\n\r\ndef getSequentie():\r\n #sequentie bestand opvragen\r\n print(\"-\" *80)\r\n getBestand = input(\"Geef de naam van je bestand op.\")\r\n gene = open(getBestand,\"r\")\r\n print (getBestand)\r\n gene.readline()\r\n \r\n seq = \"\"\r\n for c in gene:\r\n seq = seq + c\r\n seq = seq.replace(\"\\n\",\"\") #Dit zorgt ervoor dat de enters worden weggehaald\r\n return seq\r\n\r\ndef isDNA(seq):\r\n gene = seq\r\n DNA = True\r\n \r\n for line in gene:\r\n line = line.lower()\r\n for char in line:\r\n if char != \"g\" and\\\r\n char != \"c\" and\\\r\n char != \"a\" and\\\r\n char != \"t\" and\\\r\n char != \"n\": #Dit is voor sommige sequencies die niet volledig gesequenced zijn.\r\n DNA = False\r\n print (\"\")\r\n if DNA == True:\r\n print (\"Dit is DNA.\")\r\n elif DNA == False:\r\n print (\"Dit is geen DNA\")\r\n print (\"\")\r\n\r\ndef RNAconvert(seq):\r\n rna = seq.replace(\"T\",\"U\")\r\n return rna\r\n\r\ndef StartStop(rna):\r\n gene = rna\r\n start = gene.find(\"AUG\")\r\n\r\n stop = 0\r\n while stop <= len(gene): #Deze loop zoekt naar alle TAA tot er 1 is \r\n stop = gene.find(\"UAA\", stop)#gevonden boven het startcodon\r\n if stop >= start:\r\n break\r\n stop += 3\r\n \r\n stop1 = 0\r\n while stop1 <= len(gene): #Deze loop zoekt naar alle TAC tot er 1 is\r\n stop1 = gene.find(\"UAC\", stop1)#gevonden boven het startcodon\r\n if stop1 >= start:\r\n break\r\n stop1 += 3\r\n\r\n stop2 = 0\r\n while stop2 <= len(gene): #Deze loop zoekt naar alle TAC tot er 1 is \r\n stop2 = gene.find(\"UGA\", stop2)#gevonden boven het startcodon\r\n if stop2 >= start:\r\n break\r\n stop2 += 3\r\n \r\n L = (stop,stop1,stop2)\r\n realStop = L.index(min(L)) #het stopcodon moet altijd groter zijn dan het startcodon\r\n realStop = L[realStop]\r\n print (\"Eerste startcodon:\\t\",start)\r\n print (\"Eerst volgende stopcodon:\\t\",realStop) #Dit kiest de laagste stopcodon\r\n\r\n compstr = gene[start:realStop]\r\n print (\"Transcriptie van RNA: \",compstr)\r\n return compstr\r\nmain()\r\n","sub_path":"AfvinkOpdracht7.py","file_name":"AfvinkOpdracht7.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10016748","text":"'''\n This class performs a statistical analysys of the given data\n in order to culculate the standard deviation and the average \n word frequency.\n'''\nfrom math import pow, sqrt\n\nclass Statistical_analyser:\n\n def __init__(self, word_dict):\n\n self.word_dict = word_dict\n self.sample = None\n self.ruling_price = None\n self.average_frequency = None\n self.standard_deviation = None\n\n def _culculate_average_frequency_ruling_price(self):\n \n sample = 0\n total_sum = 0\n ruling_price = 0\n \n try: \n for _, tupl in self.word_dict.items(): \n word_frequency, _ = tupl\n sample+=1\n total_sum += word_frequency\n if word_frequency > ruling_price:\n ruling_price = word_frequency\n \n self.sample = sample\n self.ruling_price = ruling_price\n self.average_frequency = total_sum / sample\n except: \n raise Exception('Cannot culculate the average frequency of the sample')\n\n \n def _culculate_standard_deviation(self):\n \n total_sum = 0\n try:\n for _, tupl in self.word_dict.items():\n word_frequency, _ = tupl\n total_sum+= pow((word_frequency - self.average_frequency),2)\n \n variation = total_sum / self.sample\n self.standard_deviation = sqrt(variation)\n except:\n raise Exception('Cannot culculate the deviation')\n\n def culculate_statistical_indexes(self):\n\n self._culculate_average_frequency_ruling_price()\n self._culculate_standard_deviation()\n","sub_path":"core/actions/text_analysys/statistical_analyser.py","file_name":"statistical_analyser.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132414938","text":"import unittest\nimport src.utils_gamma as covid\nimport pandas as pd\n\n\nclass UtilsGammaFunctions(unittest.TestCase):\n \"\"\" Test the Gamma functions \"\"\"\n def test_gamma_pdf_begin(self):\n \"\"\" Test the Gamma PDF function \"\"\"\n y = covid.gamma_pdf(0, mu=50, theta=1)\n self.assertAlmostEqual(0, y, places=1)\n\n def test_gamma_pdf_middle(self):\n \"\"\" Test the Gamma PDF function. The midpoint is a little\n before the mean as the function is skewed. \"\"\"\n y = covid.gamma_pdf(47, mu=50, theta=1)\n self.assertAlmostEqual(0.05, y, places=2)\n\n def test_gamma_pdf_end(self):\n \"\"\" Test the Gamma PDF function \"\"\"\n y = covid.gamma_pdf(100, mu=50, theta=1)\n self.assertAlmostEqual(0, y, places=1)\n \n def test_gamma_pred_case_begin(self):\n \"\"\" Test the Gamma PDF function \"\"\"\n y = covid.gamma_pred_case(0, duration=100, theta=1, peak=160000, spread=20)\n self.assertAlmostEqual(0, y/160000, places=1)\n\n def test_gamma_pred_case_middle(self):\n \"\"\" Test the Gamma PDF function \"\"\"\n y = covid.gamma_pred_case(50, duration=100, theta=1, peak=160000, spread=20)\n self.assertAlmostEqual(0.5, y/160000, places=1)\n\n def test_gamma_pred_case_end(self):\n \"\"\" Test the Gamma PDF function \"\"\"\n y = covid.gamma_pred_case(100, duration=100, theta=1, peak=160000, spread=20)\n self.assertAlmostEqual(1.0, y/160000, places=1)\n\n def test_best_gamma_params(self):\n \"\"\" Test fit against a known curve \"\"\"\n peak = 160000\n duration = 100\n spread=20\n theta=1\n start_str='2020-02-23'\n \n # Build perfect data\n y = []\n for day in range(100):\n val = covid.gamma_pred_case(day, duration=duration,\n theta=theta, peak=peak,\n spread=spread)\n y.append(val)\n dt_idx = pd.date_range(start_str, freq='1D', periods=duration)\n df = pd.DataFrame({'Actual': y}, index=dt_idx)\n \n # Prove we can fit and find the same parameters\n values = covid.find_best_gamma_param(df=df,\n start_str=start_str,\n spread=spread,\n peak_guess=peak,\n duration_guess=duration)\n # The way I grid search peak means it is very unlikely to be equal\n # It just needs to be close\n self.assertLess(abs(peak-values[0]), peak*0.05)\n self.assertLess(abs(duration-values[1]), duration*0.05)\n self.assertLess(abs(theta-values[2]), theta*0.2)\n\n\nif __name__ == '__main__':\n unittest.main(warnings='ignore')\n","sub_path":"tst/test_utils_gamma.py","file_name":"test_utils_gamma.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"573042718","text":"from itertools import cycle\nfrom unittest.mock import patch\n\nfrom model_bakery import baker\n\nfrom tacticalrmm.test import TacticalTestCase\n\nfrom .models import WinUpdate\nfrom .serializers import UpdateSerializer\n\n\nclass TestWinUpdateViews(TacticalTestCase):\n def setUp(self):\n self.authenticate()\n self.setup_coresettings()\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_run_update_scan(self, nats_cmd):\n agent = baker.make_recipe(\"agents.agent\")\n url = f\"/winupdate/{agent.pk}/runupdatescan/\"\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n nats_cmd.assert_called_with({\"func\": \"getwinupdates\"}, wait=False)\n\n self.check_not_authenticated(\"get\", url)\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_install_updates(self, nats_cmd):\n agent = baker.make_recipe(\"agents.agent\")\n baker.make(\"winupdate.WinUpdate\", agent=agent, _quantity=4)\n baker.make(\"winupdate.WinUpdatePolicy\", agent=agent)\n url = f\"/winupdate/{agent.pk}/installnow/\"\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n nats_cmd.assert_called_once()\n\n self.check_not_authenticated(\"get\", url)\n\n def test_get_winupdates(self):\n agent = baker.make_recipe(\"agents.agent\")\n baker.make(\"winupdate.WinUpdate\", agent=agent, _quantity=4)\n\n # test a call where agent doesn't exist\n resp = self.client.get(\"/winupdate/500/getwinupdates/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n url = f\"/winupdate/{agent.pk}/getwinupdates/\"\n resp = self.client.get(url, format=\"json\")\n serializer = UpdateSerializer(agent)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(len(resp.data[\"winupdates\"]), 4) # type: ignore\n self.assertEqual(resp.data, serializer.data) # type: ignore\n\n self.check_not_authenticated(\"get\", url)\n\n \"\"\" @patch(\"winupdate.tasks.check_for_updates_task.apply_async\")\n def test_run_update_scan(self, mock_task):\n\n # test a call where agent doesn't exist\n resp = self.client.get(\"/winupdate/500/runupdatescan/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n agent = baker.make_recipe(\"agents.agent\")\n url = f\"/winupdate/{agent.pk}/runupdatescan/\"\n\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n mock_task.assert_called_with(\n queue=\"wupdate\",\n kwargs={\"pk\": agent.pk, \"wait\": False, \"auto_approve\": True},\n )\n\n self.check_not_authenticated(\"get\", url) \"\"\"\n\n \"\"\" @patch(\"agents.models.Agent.salt_api_cmd\")\n def test_install_updates(self, mock_cmd):\n\n # test a call where agent doesn't exist\n resp = self.client.get(\"/winupdate/500/installnow/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n agent = baker.make_recipe(\"agents.agent\")\n url = f\"/winupdate/{agent.pk}/installnow/\"\n\n # test agent command timeout\n mock_cmd.return_value = \"timeout\"\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 400)\n\n # test agent command error\n mock_cmd.return_value = \"error\"\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 400)\n\n # test agent command running\n mock_cmd.return_value = \"running\"\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 400)\n\n # can't get this to work right\n # test agent command no pid field\n mock_cmd.return_value = {}\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 400)\n\n # test agent command success\n mock_cmd.return_value = {\"pid\": 3316}\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n\n self.check_not_authenticated(\"get\", url) \"\"\"\n\n def test_edit_policy(self):\n url = \"/winupdate/editpolicy/\"\n agent = baker.make_recipe(\"agents.agent\")\n winupdate = baker.make(\"winupdate.WinUpdate\", agent=agent)\n\n invalid_data = {\"pk\": 500, \"policy\": \"inherit\"}\n # test a call where winupdate doesn't exist\n resp = self.client.patch(url, invalid_data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n data = {\"pk\": winupdate.pk, \"policy\": \"inherit\"} # type: ignore\n\n resp = self.client.patch(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n\n self.check_not_authenticated(\"patch\", url)\n\n\nclass WinupdateTasks(TacticalTestCase):\n def setUp(self):\n self.setup_coresettings()\n\n site = baker.make(\"clients.Site\")\n self.online_agents = baker.make_recipe(\n \"agents.online_agent\", site=site, _quantity=2\n )\n self.offline_agent = baker.make_recipe(\"agents.agent\", site=site)\n\n @patch(\"agents.models.Agent.nats_cmd\")\n @patch(\"time.sleep\")\n def test_auto_approve_task(self, mock_sleep, nats_cmd):\n from .tasks import auto_approve_updates_task\n\n # Setup data\n baker.make_recipe(\n \"winupdate.winupdate\",\n agent=cycle(\n [self.online_agents[0], self.online_agents[1], self.offline_agent]\n ),\n _quantity=20,\n )\n baker.make_recipe(\n \"winupdate.winupdate_approve\",\n agent=cycle(\n [self.online_agents[0], self.online_agents[1], self.offline_agent]\n ),\n _quantity=3,\n )\n\n # run task synchronously\n auto_approve_updates_task()\n\n # make sure the check_for_updates_task was run once for each online agent\n self.assertEqual(nats_cmd.call_count, 2)\n\n # check if all of the created updates were approved\n winupdates = WinUpdate.objects.all()\n for update in winupdates:\n self.assertEqual(update.action, \"approve\")\n\n \"\"\" @patch(\"agents.models.Agent.salt_api_async\")\n def test_check_agent_update_daily_schedule(self, agent_salt_cmd):\n from .tasks import check_agent_update_schedule_task\n\n # Setup data\n # create an online agent with auto approval turned off\n agent = baker.make_recipe(\"agents.online_agent\")\n baker.make(\"winupdate.WinUpdatePolicy\", agent=agent)\n\n # create approved winupdates\n baker.make_recipe(\n \"winupdate.approved_winupdate\",\n agent=cycle(\n [self.online_agents[0], self.online_agents[1], self.offline_agent]\n ),\n _quantity=20,\n )\n\n # create daily patch policy schedules for the agents\n winupdate_policy = baker.make_recipe(\n \"winupdate.winupdate_approve\",\n agent=cycle(\n [self.online_agents[0], self.online_agents[1], self.offline_agent]\n ),\n _quantity=3,\n )\n\n check_agent_update_schedule_task()\n agent_salt_cmd.assert_called_with(func=\"win_agent.install_updates\")\n self.assertEquals(agent_salt_cmd.call_count, 2) \"\"\"\n\n \"\"\" @patch(\"agents.models.Agent.salt_api_async\")\n def test_check_agent_update_monthly_schedule(self, agent_salt_cmd):\n from .tasks import check_agent_update_schedule_task\n\n # Setup data\n # create an online agent with auto approval turned off\n agent = baker.make_recipe(\"agents.online_agent\")\n baker.make(\"winupdate.WinUpdatePolicy\", agent=agent)\n\n # create approved winupdates\n baker.make_recipe(\n \"winupdate.approved_winupdate\",\n agent=cycle(\n [self.online_agents[0], self.online_agents[1], self.offline_agent]\n ),\n _quantity=20,\n )\n\n # create monthly patch policy schedules for the agents\n winupdate_policy = baker.make_recipe(\n \"winupdate.winupdate_approve_monthly\",\n agent=cycle(\n [self.online_agents[0], self.online_agents[1], self.offline_agent]\n ),\n _quantity=3,\n )\n\n check_agent_update_schedule_task()\n agent_salt_cmd.assert_called_with(func=\"win_agent.install_updates\")\n self.assertEquals(agent_salt_cmd.call_count, 2) \"\"\"\n","sub_path":"api/tacticalrmm/winupdate/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"77696572","text":"def compress_word(w):\n vowels = \"aeiou\"\n i = 1\n new_word = \"\" + w[0]\n while i < len(w):\n if w[i].lower() not in vowels:\n new_word+=w[i].lower()\n i+=1\n return new_word\n \n \n \n \nprint(compress_word(\"happy\"))\nprint(compress_word(\"application\"))\n\ndef sentence(line):\n list = line.split();\n new_list = []\n for i in list:\n new_list.append(compress_word(i))\n return \" \".join(new_list)\n\nprint(sentence(\"good morning\"))\nprint(sentence(\"a lot of vowels\"))\n\n ","sub_path":"exam_01/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379529059","text":"import os.path as osp\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass SqueezeSegV2(nn.Module):\n def __init__(self, input_channel, output_channel):\n super().__init__()\n squeeze_kwargs = {'input_channels' : input_channel,\n 'squeeze_depth' : 3,\n 'cam_depth' : 1,\n 'conv_starts' : 64,\n 'squeeze_start' : 16,\n 'ef_start' : 64}\n\n head_kwargs = {'in_channels' : 64,\n 'mid_channels' : 32,\n 'num_classes' : output_channel,\n 'crf_iters' : 3,\n 'crf_dims' : 3,\n 'crf_start_dim' : 0}\n\n self.squeeze = SqueezeSegBone(**squeeze_kwargs)\n self.head = SegmentHead(**head_kwargs)\n\n def forward(self, x):\n features = self.squeeze(x)\n return self.head(x, features)\n\n @classmethod\n def load_from_kwargs(cls, data):\n if isinstance(data['head_cls'], str):\n head_cls = SegmentHead()\n data['head_cls'] = head_cls\n return cls(**data)\n\nclass SqueezeSegBone(nn.Module):\n def __init__(self, input_channels, squeeze_depth=2, cam_depth=1, conv_starts=64, squeeze_start=16, ef_start=64):\n super().__init__()\n self.reduce = 1\n self.start = nn.Sequential(\n Conv(input_channels, conv_starts, 3, 1, 2, top_parent=self),\n ContextAggregation(conv_starts, top_parent=self),\n Conv(conv_starts, conv_starts, 1, top_parent=self),\n )\n self.rest = nn.Sequential(\n Pool(3, 2, 1, top_parent=self),\n SqueezePart(conv_starts, squeeze_start, ef_start, squeeze_depth, cam_depth, top_parent=self),\n DeFire(2 * ef_start, squeeze_start, int(conv_starts / 2), top_parent=self),\n nn.Dropout2d(),\n )\n\n def forward(self, x):\n shape = x.shape\n over = shape[-1] % self.reduce\n if over:\n over = self.reduce - over\n x = F.pad(x, (int(over / 2), int(over / 2), 0, 0), 'replicate')\n pre_add = self.start(x)\n insides = self.rest(pre_add)\n result = pre_add + insides\n return result\n\nclass SegmentHead(nn.Module):\n def __init__(self, in_channels, mid_channels, num_classes, crf_iters, crf_start_dim, crf_dims, **crf_kwargs):\n super().__init__()\n self.net = nn.Sequential(\n DeFire(in_channels, mid_channels // 16, mid_channels // 2),\n Fire(mid_channels, mid_channels // 16, mid_channels // 2),\n Conv(mid_channels, num_classes, 1, relu=False, norm=False),\n )\n self.crf = CRF(crf_iters, crf_start_dim, crf_dims, **crf_kwargs)\n\n def forward(self, data_input, features):\n result = self.net(features)\n if result.shape[-1] != data_input.shape[-1]:\n diff = result.shape[-1] - data_input.shape[-1]\n result = result[..., (diff // 2) : -(diff // 2)]\n result = self.crf(data_input, result)\n return result\n\nclass Fire(nn.Module):\n def __init__(self, in_channels, squeeze, expand, cam=False, top_parent=None):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = expand * 2\n self.squeeze = Conv(in_channels, squeeze, 1, top_parent=top_parent)\n self.expand1x1 = Conv(squeeze, expand, 1, top_parent=top_parent)\n self.expand3x3 = Conv(squeeze, expand, 3, 1, top_parent=top_parent)\n if cam:\n self.cam = ContextAggregation(self.out_channels, top_parent=top_parent)\n else:\n self.cam = None\n\n def forward(self, x):\n sq = self.squeeze(x)\n e1 = self.expand1x1(sq)\n e3 = self.expand3x3(sq)\n c = torch.cat([e1, e3], 1)\n if self.cam is not None:\n return self.cam(c)\n return c\n\n\nclass DeFire(nn.Module):\n def __init__(self, in_channels, squeeze, expand, cam=False, top_parent=None):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = expand * 2\n self.squeeze = Conv(in_channels, squeeze, 1, top_parent=top_parent)\n self.deconv = DeConv(squeeze)\n self.expand1x1 = Conv(squeeze, expand, 1, top_parent=top_parent)\n self.expand3x3 = Conv(squeeze, expand, 3, 1, top_parent=top_parent)\n if cam:\n self.cam = ContextAggregation(self.out_channels, top_parent=top_parent)\n else:\n self.cam = None\n\n def forward(self, x):\n sqd = self.deconv(self.squeeze(x))\n e1 = self.expand1x1(sqd)\n e3 = self.expand3x3(sqd)\n c = torch.cat([e1, e3], 1)\n if self.cam is not None:\n return self.cam(c)\n return c\n\n\nclass Pool(nn.Module):\n def __init__(self, size, stride, pad=0, top_parent=None):\n super().__init__()\n if top_parent is not None:\n top_parent.reduce *= stride\n self.pool = nn.MaxPool2d(size, (1, stride), padding=pad)\n\n def forward(self, x):\n return self.pool(x)\n\n\nclass ContextAggregation(nn.Module):\n def __init__(self, channels, reduction=16, top_parent=None):\n super().__init__()\n mid = channels // reduction\n self.in_channels = channels\n self.out_channels = channels\n nets = [\n Pool(7, 1, 3, top_parent=top_parent),\n Conv(channels, mid, 1, relu=True, norm=False, top_parent=top_parent),\n Conv(mid, channels, 1, relu=False, norm=False, top_parent=top_parent),\n torch.nn.Sigmoid(),\n ]\n self.nets = nn.Sequential(*nets)\n\n def forward(self, x):\n return x * self.nets(x)\n\n\nclass Conv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, pad=0, stride=1, relu=True, norm=True, top_parent=None):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n if top_parent is not None:\n top_parent.reduce *= stride\n nets = []\n nets.append(nn.Conv2d(in_channels, out_channels, kernel_size, padding=pad, stride=(1, stride)))\n if relu:\n nets.append(nn.ReLU(inplace=True))\n if norm:\n nets.append(nn.BatchNorm2d(out_channels))\n self.net = nn.Sequential(*nets)\n\n def forward(self, x):\n return self.net(x)\n\n\nclass DeConv(nn.Module):\n def __init__(self, channels, relu=True, norm=True):\n super().__init__()\n self.in_channels = channels\n self.out_channels = channels\n nets = []\n nets.append(nn.ConvTranspose2d(channels, channels, (1, 4), (1, 2), padding=(0, 1)))\n if relu:\n nets.append(nn.ReLU(inplace=True))\n if norm:\n nets.append(nn.BatchNorm2d(channels))\n self.net = nn.Sequential(*nets)\n\n def forward(self, x):\n return self.net(x)\n\n\nclass SqueezePart(nn.Module):\n SQ_ADD = 16\n EF_ADD = 64\n\n def __init__(self, input_channels, sq, ef, depth, cam_depth=0, top_parent=None):\n super().__init__()\n cam = cam_depth > 0\n if depth == 0:\n self.net = nn.Sequential(\n Fire(input_channels, sq, ef, cam, top_parent=top_parent),\n Fire(2 * ef, sq, ef, cam, top_parent=top_parent),\n Fire(2 * ef, sq + self.SQ_ADD, ef + self.EF_ADD, cam, top_parent=top_parent),\n Fire(2 * (ef + self.EF_ADD), sq + self.SQ_ADD, ef + self.EF_ADD, cam, top_parent=top_parent),\n )\n else:\n self.beg = nn.Sequential(\n Fire(input_channels, sq, ef, cam, top_parent=top_parent), Fire(2 * ef, sq, ef, cam, top_parent=top_parent)\n )\n self.rest = nn.Sequential(\n Pool(3, 2, 1, top_parent=top_parent),\n SqueezePart(2 * ef, sq + self.SQ_ADD, ef + self.EF_ADD, depth - 1, cam_depth - 1, top_parent=top_parent),\n DeFire(2 * (ef + self.EF_ADD * (2 if depth == 1 else 1)), 2 * sq, ef, top_parent=top_parent),\n )\n self.depth = depth\n\n def forward(self, x):\n if self.depth:\n pre_add = self.beg(x)\n insides = self.rest(pre_add)\n return pre_add + insides\n else:\n return self.net(x)\n\n\nclass CRF(nn.Module):\n SQ_VAR_BI = np.array([0.015, 0.015, 0.01]) ** 2\n SQ_VAR_ANG = np.array([0.9, 0.9, 0.6]) ** 2\n\n def __init__(\n self,\n num_iterations,\n bf_start_dim,\n bf_dims,\n mask_dim=-1,\n size_a=3,\n size_b=5,\n sq_var_bi=None,\n sq_var_ang=None,\n sq_var_bi_ang=None,\n ang_coef=0.02,\n bi_coef=0.1,\n ):\n super().__init__()\n if sq_var_ang is None:\n sq_var_ang = self.SQ_VAR_ANG\n if sq_var_bi is None:\n sq_var_bi = self.SQ_VAR_BI\n num_classes = len(sq_var_ang)\n init = (np.ones((num_classes, num_classes)) - np.eye(num_classes))[..., None, None].astype(np.float32)\n self.mask_dim = mask_dim\n self.bilateral = _BilateralWeights(size_a, size_b, bf_dims, sq_var_bi)\n self.local = _LocalPassing(size_a, size_b, num_classes, sq_var_ang, sq_var_bi_ang)\n self.ang_compat = nn.Conv2d(num_classes, num_classes, 1, bias=False)\n self.bi_ang_compat = nn.Conv2d(num_classes, num_classes, 1, bias=False)\n self.iterations = num_iterations\n self.bf_start_dim = bf_start_dim\n self.bf_dims = bf_dims\n self.ang_compat.weight = nn.Parameter(torch.from_numpy(init * ang_coef))\n self.bi_ang_compat.weight = nn.Parameter(torch.from_numpy(init * bi_coef))\n\n def forward(self, lidar_input, data):\n bf_weights = self.bilateral(lidar_input[:, self.bf_start_dim : self.bf_start_dim + self.bf_dims])\n mask = (lidar_input[:, self.mask_dim, None, ...] >= 0.5).float()\n for _ in range(self.iterations):\n unary = F.softmax(data, 1)\n ang, bi_ang = self.local(unary, mask, bf_weights)\n ang = self.ang_compat(ang)\n bi_ang = self.bi_ang_compat(bi_ang)\n outputs = unary + ang + bi_ang\n data = outputs\n return outputs\n\n\nclass DropoutNoise(nn.Module):\n def __init__(self, np_file=osp.join(osp.dirname(osp.abspath(__file__)), 'mask.npy')):\n super().__init__()\n self.mask = torch.from_numpy(np.load(np_file)).clamp(0, 1)[None, ...]\n\n def forward(self, data):\n bsize = data.shape[0]\n for i in range(bsize):\n mask = torch.bernoulli(self.mask).float()\n data[i] *= mask\n return data\n\n\n\nclass _LocalPassing(nn.Module):\n def __init__(self, size_a, size_b, in_channels, sq_var_ang, sq_var_bi=None):\n if sq_var_bi is None:\n sq_var_bi = sq_var_ang\n pad = (size_a // 2, size_b // 2)\n super().__init__()\n self.ang_conv = nn.Conv2d(in_channels, in_channels, (size_a, size_b), padding=pad, bias=False)\n self.bi_ang_conv = nn.Conv2d(in_channels, in_channels, (size_a, size_b), padding=pad, bias=False)\n self.condense_conv = nn.Conv2d(in_channels, (size_a * size_b - 1) * in_channels, (size_a, size_b), padding=pad, bias=False)\n\n self.ang_conv.weight = nn.Parameter(torch.from_numpy(_gauss_weights(size_a, size_b, in_channels, sq_var_ang)), requires_grad=False)\n self.bi_ang_conv.weight = nn.Parameter(\n torch.from_numpy(_gauss_weights(size_a, size_b, in_channels, sq_var_bi)), requires_grad=False\n )\n self.condense_conv.weight = nn.Parameter(torch.from_numpy(_condensing_weights(size_a, size_b, in_channels)), requires_grad=False)\n\n def forward(self, data, mask, bilateral):\n b, c, h, w = data.shape\n ang = self.ang_conv(data)\n bi_ang = self.bi_ang_conv(data)\n condense = self.condense_conv(data * mask).view(b, c, -1, h, w)\n bi_out = (condense * bilateral).sum(2) * mask * bi_ang\n return ang, bi_out\n\n\nclass _BilateralWeights(nn.Module):\n def __init__(self, size_a, size_b, in_channels, sq_var):\n super().__init__()\n pad = (size_a // 2, size_b // 2)\n self.in_channels = in_channels\n self.sq_var = sq_var\n self.condense_conv = nn.Conv2d(in_channels, (size_a * size_b - 1) * in_channels, (size_a, size_b), padding=pad, bias=False)\n self.condense_conv.weight = nn.Parameter(torch.from_numpy(_condensing_weights(size_a, size_b, in_channels)), requires_grad=False)\n\n def forward(self, data):\n condensed = self.condense_conv(data)\n diffs = [data[:, i, None, ...] - condensed[:, i :: self.in_channels, ...] for i in range(self.in_channels)]\n return torch.stack([torch.exp_(-sum([diff ** 2 for diff in diffs]) / (2 * self.sq_var[i])) for i in range(len(self.sq_var))], 1)\n\n\ndef _gauss_weights(size_a, size_b, num_classes, sq_var):\n kernel = np.zeros((num_classes, num_classes, size_a, size_b), dtype=np.float32)\n for k in range(num_classes):\n kernel_2d = np.zeros((size_a, size_b), dtype=np.float32)\n for i in range(size_a):\n for j in range(size_b):\n diff = np.sum((np.array([i - size_a // 2, j - size_b // 2])) ** 2)\n kernel_2d[i, j] = np.exp(-diff / 2 / sq_var[k])\n kernel_2d[size_a // 2, size_b // 2] = 0\n kernel[k, k] = kernel_2d\n return kernel\n\n\ndef _condensing_weights(size_a, size_b, in_channels):\n half_filter_dim = (size_a * size_b) // 2\n kernel = np.zeros((size_a * size_b * in_channels, in_channels, size_a, size_b), dtype=np.float32)\n for i in range(size_a):\n for j in range(size_b):\n for k in range(in_channels):\n kernel[i * (size_b * in_channels) + j * in_channels + k, k, i, j] = 1\n kernel = np.concatenate([kernel[: in_channels * half_filter_dim], kernel[in_channels * (half_filter_dim + 1) :]], axis=0)\n return kernel\n\n","sub_path":"detectors/squeezesegv2.py","file_name":"squeezesegv2.py","file_ext":"py","file_size_in_byte":13736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115686486","text":"import time\nimport math\nimport numpy as np\nimport random\nfrom math import sin, cos\nfrom random import randint\nimport os, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0, parentdir)\nimport pdb\nimport pybullet as p\nimport pybullet_data\nurdfRootPath=pybullet_data.getDataPath()\nmaxForce = 500\nmaxVelocity = 0.2\n\nfingerAForce = 1\nfingerBForce = 1.5\nfingerTipForce = 1\nserverMode = p.GUI # GUI/DIRECT\nphysicsClient = p.connect(serverMode)\np.setGravity(0,0,-10)\nobjects = p.loadSDF(os.path.join(urdfRootPath, \"kuka_iiwa/kuka_with_gripper2.sdf\"))\nrobotID = objects[0]\nplaneID = p.loadURDF(os.path.join(urdfRootPath, \"plane.urdf\"), [0, 0, 0])\n# cubeID = p.loadURDF(os.path.join(urdfRootPath, \"cube_small.urdf\"), [0.6, 0.05, 0])\nkukaEndEffectorIndex = 6\nkukaGripperIndex = 7\n\nnumJoints = p.getNumJoints(robotID)\nprint(\"Number of joints: {}\".format(numJoints))\njointTypeList = [\"REVOLUTE\", \"PRISMATIC\", \"SPHERICAL\", \"PLANAR\", \"FIXED\"]\nfor i in range(numJoints):\n jointInfo = p.getJointInfo(robotID, i)\n jointID = jointInfo[0]\n jointName = jointInfo[1].decode(\"utf-8\")\n jointType = jointTypeList[jointInfo[2]]\n jointLowerLimit = jointInfo[8]\n jointUpperLimit = jointInfo[9]\n print(\"\\tID: {}\".format(jointID))\n print(\"\\tname: {}\".format(jointName))\n print(\"\\ttype: {}\".format(jointType))\n print(\"\\tlower limit: {}\".format(jointLowerLimit))\n print(\"\\tupper limit: {}\".format(jointUpperLimit))\nprint(\"------------------------------------------\")\n\n\ndef reset():\n\n p.resetBasePositionAndOrientation(robotID, [-0.100000, 0.000000, 0.070000],\n [0.000000, 0.000000, 0.000000, 1.000000])\n jointPositions = [\n 0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684, -0.006539, 0.000048,\n -0.299912, 0.000000, -0.000043, 0.299960, 0.000000, -0.000200\n ]\n numJoints = p.getNumJoints(robotID)\n for jointIndex in range(numJoints):\n p.resetJointState(robotID, jointIndex, jointPositions[jointIndex])\n p.setJointMotorControl2(robotID,\n jointIndex,\n p.POSITION_CONTROL,\n targetPosition=jointPositions[jointIndex],\n force=maxForce)\n\n# Control end effector position via POSITION_CONTROL\ndef endEffectControl(target):\n # use standard IK algorithm to calculate poses\n jointPoses = p.calculateInverseKinematics(robotID, kukaEndEffectorIndex, target,[0,1,0,0])\n for i in range(kukaEndEffectorIndex + 1):\n # print(i)\n p.setJointMotorControl2(bodyUniqueId=robotID,\n jointIndex=i,\n controlMode=p.POSITION_CONTROL,\n targetPosition=jointPoses[i],\n targetVelocity=0,\n force=maxForce,\n maxVelocity=maxVelocity,\n positionGain=0.3,\n velocityGain=1)\n\ndef gripperControl(fingerAngle):\n p.setJointMotorControl2(robotID,\n 8,\n p.POSITION_CONTROL,\n targetPosition=-fingerAngle,\n force=fingerAForce)\n p.setJointMotorControl2(robotID,\n 11,\n p.POSITION_CONTROL,\n targetPosition=fingerAngle,\n force=fingerBForce)\n\n p.setJointMotorControl2(robotID,\n 10,\n p.POSITION_CONTROL,\n targetPosition=0,\n force=fingerTipForce)\n p.setJointMotorControl2(robotID,\n 13,\n p.POSITION_CONTROL,\n targetPosition=0,\n force=fingerTipForce)\n\n\ndef circular_path(center, t):\n\n R = 0.5\n print([R*sin(t), 0, R*cos(t)])\n return [0.2 * cos(2*t), 0.5 , 0.2 * sin(2*t)+0.4 ]\n #return [x+y for x,y in zip(center, [R*sin(t), 0, R*cos(t)])]\n\nrandom_target_timer = 0\nt = 0.01\nprevPose = None\nreset()\ngripperControl(0.6)\nls = p.getLinkState(robotID, kukaEndEffectorIndex+1)\n\nendEffectControl((0.6, 0, 0.5))\n\ncube1ID = p.loadURDF(os.path.join(urdfRootPath, \"cube_small.urdf\"), [0.5, -0.25, 0])\n#cube2ID = p.loadURDF(os.path.join(urdfRootPath, \"cube_small.urdf\"), [0.52, -0.15, 0])\nsphereID = p.loadURDF(os.path.join(urdfRootPath, \"sphere_small.urdf\"), [0.52, -0.43, 0])\ntrayID = p.loadURDF(os.path.join(urdfRootPath, \"tray/traybox.urdf\"), [0.52, 0.5, 0])\np.setRealTimeSimulation(1)\nj = 0\ntargetPos = (([0.53, -0.25, 0.50], 0.6, 0.6),([0.53, -0.28, 0.30], 0.1, 0.6),([0.6, 0.4, 0.5], 0.1, 0.1),([0.6, 0.5, 0.5], 0.6, 0.1) ,([0.52, -0.45, 0.5],0.6,0.6),([0.52, -0.46, 0.29],0.1,0.6), ([0.6, 0.4, 0.5],0.6,0.1),([0.6, 0, 0.5],0.6,0.6))\n#endEffectControl(targetPos[j])\nmode = \"move\"\n\n\ndef closeTo(target, orientation):\n ls = p.getLinkState(robotID, kukaEndEffectorIndex)\n print(ls[4], ls[5])\n dis = sum(abs(np.array(ls[4]) - target))+sum(abs(np.array(ls[5]) - orientation))\n print(dis)\n if dis < 0.0005:\n return True\n else:\n return False\n\ndef moveto(target,j):\n endEffectControl(target)\n gripperControl(targetPos[j][2])\n if closeTo(target, [0,1,0,0]):\n t = 0\n return \"grab\",j\n else:\n return \"move\",j\ndef grab(angle, j,t):\n gripperControl(angle)\n if t > 1:\n return \"move\", j+1\n t = 0\n else:\n return \"grab\", j\n\n\ndef indirect(mode,j):\n switcher={\n \"move\":lambda:moveto(targetPos[j][0],j),\n \"grab\":lambda:grab(targetPos[j][1], j,t),\n }\n return switcher.get(mode,lambda :'Invalid')()\n#logid = p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, \"~/Desktop/vid1.mp4\")\np.resetDebugVisualizerCamera( cameraDistance=2, cameraYaw=60, cameraPitch=-30, cameraTargetPosition=[0,0,0])\n\nfor i in range(100000):\n mode,j = indirect(mode, j)\n if j > 7:\n break\n t += 1./240.\n time.sleep(1 / 240.)\n#p.stopStateLogging(logid)\n","sub_path":"pybullet_envs/examples/kuka_reach.py","file_name":"kuka_reach.py","file_ext":"py","file_size_in_byte":6254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38243423","text":"import unittest\nfrom functions.BISAccount import Account\nfrom functions.BISProfile import Profile\nfrom driver.browser import chrome_browser\nfrom functions.common import log\nfrom functions.common.Common import Common\nfrom functions.common.ExcelUtil_tool import ExcelUtil\nfrom ddt import ddt,data\nlogger = log.createlogger(\"MAIN\")\nlogindata = ExcelUtil.readExcel('C:\\\\MLP\\\\Automation\\\\Project\\data\\\\acc&env_info.xlsx','Sheet1')\n# logindata = ExcelUtil.readExcel('../data/acc&env_info.xlsx','Sheet1')\n@ddt\nclass ProfileTest(unittest.TestCase):\n def setUp(self):\n self.driver = chrome_browser()\n self.account = Account(self.driver)\n self.profile = Profile(self.driver)\n self.common = Common(self.driver)\n self.common.open_browser()\n\n @data(*logindata)\n def test_Dashboard(self,data):\n \"\"\"测试profile-dashboard界面的检查\"\"\"\n try:\n self.account.login_with_facebook(data['face_email'], data['face_pw'])\n self.profile.enter_profile()\n self.profile.check_profile_dashboard_my_points()\n self.profile.check_profile_dashboard_earn_points()\n self.profile.check_reward_section()\n self.profile.check_upcoming()\n self.profile.check_mm_bottom()\n logger.info(\"Result Success Rate Is 100%\")\n except Exception as result:\n self.driver.get_screenshot_as_file(\"./screenshot/check_hotels_page_err.png\")\n logger.info(\"%s unknown error during checking profile-dashboard\" % result)\n raise\n\n def tearDown(self):\n self.driver.quit()\n\n if __name__ == '__main__':\n unittest.main()\n\n","sub_path":"testcase/test_M0603_check_profile_dashboard.py","file_name":"test_M0603_check_profile_dashboard.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"479883490","text":"def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={\n \n}, mpd_url=None):\n '\\n Parse formats from MPD manifest.\\n References:\\n 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),\\n http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip\\n 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP\\n '\n if (mpd_doc.get('type') == 'dynamic'):\n return []\n namespace = self._search_regex('(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)\n\n def _add_ns(path):\n return self._xpath_ns(path, namespace)\n\n def is_drm_protected(element):\n return (element.find(_add_ns('ContentProtection')) is not None)\n\n def extract_multisegment_info(element, ms_parent_info):\n ms_info = ms_parent_info.copy()\n\n def extract_common(source):\n segment_timeline = source.find(_add_ns('SegmentTimeline'))\n if (segment_timeline is not None):\n s_e = segment_timeline.findall(_add_ns('S'))\n if s_e:\n ms_info['total_number'] = 0\n ms_info['s'] = []\n for s in s_e:\n r = int(s.get('r', 0))\n ms_info['total_number'] += (1 + r)\n ms_info['s'].append({\n 't': int(s.get('t', 0)),\n 'd': int(s.attrib['d']),\n 'r': r,\n })\n start_number = source.get('startNumber')\n if start_number:\n ms_info['start_number'] = int(start_number)\n timescale = source.get('timescale')\n if timescale:\n ms_info['timescale'] = int(timescale)\n segment_duration = source.get('duration')\n if segment_duration:\n ms_info['segment_duration'] = float(segment_duration)\n\n def extract_Initialization(source):\n initialization = source.find(_add_ns('Initialization'))\n if (initialization is not None):\n ms_info['initialization_url'] = initialization.attrib['sourceURL']\n segment_list = element.find(_add_ns('SegmentList'))\n if (segment_list is not None):\n extract_common(segment_list)\n extract_Initialization(segment_list)\n segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))\n if segment_urls_e:\n ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]\n else:\n segment_template = element.find(_add_ns('SegmentTemplate'))\n if (segment_template is not None):\n extract_common(segment_template)\n media = segment_template.get('media')\n if media:\n ms_info['media'] = media\n initialization = segment_template.get('initialization')\n if initialization:\n ms_info['initialization'] = initialization\n else:\n extract_Initialization(segment_template)\n return ms_info\n mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))\n formats = []\n for period in mpd_doc.findall(_add_ns('Period')):\n period_duration = (parse_duration(period.get('duration')) or mpd_duration)\n period_ms_info = extract_multisegment_info(period, {\n 'start_number': 1,\n 'timescale': 1,\n })\n for adaptation_set in period.findall(_add_ns('AdaptationSet')):\n if is_drm_protected(adaptation_set):\n continue\n adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)\n for representation in adaptation_set.findall(_add_ns('Representation')):\n if is_drm_protected(representation):\n continue\n representation_attrib = adaptation_set.attrib.copy()\n representation_attrib.update(representation.attrib)\n mime_type = representation_attrib['mimeType']\n content_type = mime_type.split('/')[0]\n if (content_type == 'text'):\n pass\n elif (content_type in ('video', 'audio')):\n base_url = ''\n for element in (representation, adaptation_set, period, mpd_doc):\n base_url_e = element.find(_add_ns('BaseURL'))\n if (base_url_e is not None):\n base_url = (base_url_e.text + base_url)\n if re.match('^https?://', base_url):\n break\n if (mpd_base_url and (not re.match('^https?://', base_url))):\n if ((not mpd_base_url.endswith('/')) and (not base_url.startswith('/'))):\n mpd_base_url += '/'\n base_url = (mpd_base_url + base_url)\n representation_id = representation_attrib.get('id')\n lang = representation_attrib.get('lang')\n url_el = representation.find(_add_ns('BaseURL'))\n filesize = int_or_none((url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if (url_el is not None) else None))\n bandwidth = int_or_none(representation_attrib.get('bandwidth'))\n f = {\n 'format_id': (('%s-%s' % (mpd_id, representation_id)) if mpd_id else representation_id),\n 'url': base_url,\n 'manifest_url': mpd_url,\n 'ext': mimetype2ext(mime_type),\n 'width': int_or_none(representation_attrib.get('width')),\n 'height': int_or_none(representation_attrib.get('height')),\n 'tbr': float_or_none(bandwidth, 1000),\n 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),\n 'fps': int_or_none(representation_attrib.get('frameRate')),\n 'language': (lang if (lang not in ('mul', 'und', 'zxx', 'mis')) else None),\n 'format_note': ('DASH %s' % content_type),\n 'filesize': filesize,\n }\n f.update(parse_codecs(representation_attrib.get('codecs')))\n representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)\n\n def prepare_template(template_name, identifiers):\n t = representation_ms_info[template_name]\n t = t.replace('$RepresentationID$', representation_id)\n t = re.sub(('\\\\$(%s)\\\\$' % '|'.join(identifiers)), '%(\\\\1)d', t)\n t = re.sub(('\\\\$(%s)%%([^$]+)\\\\$' % '|'.join(identifiers)), '%(\\\\1)\\\\2', t)\n t.replace('$$', '$')\n return t\n if ('initialization' in representation_ms_info):\n initialization_template = prepare_template('initialization', ('Bandwidth',))\n representation_ms_info['initialization_url'] = (initialization_template % {\n 'Bandwidth': bandwidth,\n })\n\n def location_key(location):\n return ('url' if re.match('^https?://', location) else 'path')\n if (('segment_urls' not in representation_ms_info) and ('media' in representation_ms_info)):\n media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))\n media_location_key = location_key(media_template)\n if (('%(Number' in media_template) and ('s' not in representation_ms_info)):\n segment_duration = None\n if (('total_number' not in representation_ms_info) and ('segment_duration' in representation_ms_info)):\n segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])\n representation_ms_info['total_number'] = int(math.ceil((float(period_duration) / segment_duration)))\n representation_ms_info['fragments'] = [{\n media_location_key: (media_template % {\n 'Number': segment_number,\n 'Bandwidth': bandwidth,\n }),\n 'duration': segment_duration,\n } for segment_number in range(representation_ms_info['start_number'], (representation_ms_info['total_number'] + representation_ms_info['start_number']))]\n else:\n representation_ms_info['fragments'] = []\n segment_time = 0\n segment_d = None\n segment_number = representation_ms_info['start_number']\n\n def add_segment_url():\n segment_url = (media_template % {\n 'Time': segment_time,\n 'Bandwidth': bandwidth,\n 'Number': segment_number,\n })\n representation_ms_info['fragments'].append({\n media_location_key: segment_url,\n 'duration': float_or_none(segment_d, representation_ms_info['timescale']),\n })\n for (num, s) in enumerate(representation_ms_info['s']):\n segment_time = (s.get('t') or segment_time)\n segment_d = s['d']\n add_segment_url()\n segment_number += 1\n for r in range(s.get('r', 0)):\n segment_time += segment_d\n add_segment_url()\n segment_number += 1\n segment_time += segment_d\n elif (('segment_urls' in representation_ms_info) and ('s' in representation_ms_info)):\n fragments = []\n segment_index = 0\n timescale = representation_ms_info['timescale']\n for s in representation_ms_info['s']:\n duration = float_or_none(s['d'], timescale)\n for r in range((s.get('r', 0) + 1)):\n segment_uri = representation_ms_info['segment_urls'][segment_index]\n fragments.append({\n location_key(segment_uri): segment_uri,\n 'duration': duration,\n })\n segment_index += 1\n representation_ms_info['fragments'] = fragments\n if ('fragments' in representation_ms_info):\n f.update({\n 'fragment_base_url': base_url,\n 'fragments': [],\n 'protocol': 'http_dash_segments',\n })\n if ('initialization_url' in representation_ms_info):\n initialization_url = representation_ms_info['initialization_url']\n if (not f.get('url')):\n f['url'] = initialization_url\n f['fragments'].append({\n location_key(initialization_url): initialization_url,\n })\n f['fragments'].extend(representation_ms_info['fragments'])\n try:\n existing_format = next((fo for fo in formats if (fo['format_id'] == representation_id)))\n except StopIteration:\n full_info = formats_dict.get(representation_id, {\n \n }).copy()\n full_info.update(f)\n formats.append(full_info)\n else:\n existing_format.update(f)\n else:\n self.report_warning(('Unknown MIME type %s in DASH manifest' % mime_type))\n return formats","sub_path":"Data Set/bug-fixing-5/c110944fa2f21af733b4f3168764e1b008e11514-<_parse_mpd_formats>-fix.py","file_name":"c110944fa2f21af733b4f3168764e1b008e11514-<_parse_mpd_formats>-fix.py","file_ext":"py","file_size_in_byte":12809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121455076","text":"#Count vowels and consonants in a string\nstring = str(input(\"Enter string: \"))\nvowels = \"aeiou\"\nconsonants = \"bcdfghjklmnpqrstvwxyz\"\nnum_of_v = 0\nnum_of_c = 0\nfor i in string:\n if(i in vowels):\n num_of_v = num_of_v +1\n elif(i in consonants):\n num_of_c = num_of_c +1\nprint(\"Vowels: \",num_of_v, \"Consonants: \",num_of_c)\n","sub_path":"9th.py","file_name":"9th.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39478363","text":"#!/usr/bin/env python\n# -*- coding: utf-8; -*-\n#\n# This file is part of Superdesk Video Server.\n#\n# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.\n#\n# For the full copyright and license information, please see the\n# AUTHORS and LICENSE files distributed with this source code, or\n# at https://www.sourcefabric.org/superdesk/license\n\nimport importlib\nimport os\n\nfrom flask import Flask\n\nimport settings\nfrom lib.logging import configure_logging\nfrom lib.ad_manager import LDAPADManager\n\nif os.environ.get('NEW_RELIC_LICENSE_KEY'):\n try:\n import newrelic.agent\n\n newrelic.agent.initialize(os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'newrelic.ini')))\n except ImportError:\n pass\n\n\ndef get_app(config=None):\n \"\"\"App factory.\n\n :param config: configuration that can override config from `settings.py`\n :return: a new SuperdeskEve app instance\n \"\"\"\n app = Flask(__name__)\n\n if config is None:\n config = {}\n\n config['APP_ABSPATH'] = os.path.abspath(os.path.dirname(__file__))\n\n for key in dir(settings):\n if key.isupper():\n config.setdefault(key, getattr(settings, key))\n\n app.config.update(config)\n\n app.adm = LDAPADManager()\n app.adm.authenticate(app.config['LDAP_URL'])\n\n installed = set()\n\n def install_app(module_name):\n if module_name in installed:\n return\n installed.add(module_name)\n app_module = importlib.import_module(module_name)\n if hasattr(app_module, 'init_app'):\n app_module.init_app(app)\n\n for module_name in app.config.get('CORE_APPS', []):\n install_app(module_name)\n #: logging\n configure_logging(app.config['LOG_CONFIG_FILE'])\n return app\n\n\nif __name__ == '__main__':\n debug = True\n host = '0.0.0.0'\n port = int(os.environ.get('PORT', '5050'))\n app = get_app()\n app.run(host=host, port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"182368819","text":"import taichi as ti\nimport numpy as np\nimport random\nimport cv2\n\nreal = ti.f32\ndim = 2\nn_particles = 8192 * 4\nn_grid = 256\ndx = 1 / n_grid\ninv_dx = 1 / dx\ndt = 1e-4\np_mass = 1\np_vol = 1\nE = 100\n\nscalar = lambda: ti.var(dt=real)\nvec = lambda: ti.Vector(dim, dt=real)\nmat = lambda: ti.Matrix(dim, dim, dt=real)\n\nx, v = vec(), vec()\ngrid_v, grid_m = vec(), scalar()\nC, J = mat(), scalar()\n\n# ti.cfg.arch = ti.x86_64\nti.cfg.arch = ti.cuda\n\n@ti.layout\ndef place():\n ti.root.dense(ti.k, n_particles).place(x, v, J, C)\n ti.root.dense(ti.ij, n_grid).place(grid_v, grid_m)\n\n\n@ti.kernel\ndef clear_grid():\n for i, j in grid_m:\n grid_v[i, j] = [0, 0]\n grid_m[i, j] = 0\n\n\n@ti.kernel\ndef p2g():\n for p in x:\n base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)\n fx = x[p] * inv_dx - ti.cast(base, ti.f32)\n w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1),\n 0.5 * ti.sqr(fx - 0.5)]\n stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E\n affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]\n for i in ti.static(range(3)):\n for j in ti.static(range(3)):\n offset = ti.Vector([i, j])\n dpos = (ti.cast(ti.Vector([i, j]), ti.f32) - fx) * dx\n weight = w[i](0) * w[j](1)\n grid_v[base + offset].atomic_add(weight * (p_mass * v[p] + affine @ dpos))\n grid_m[base + offset].atomic_add(weight * p_mass)\n\n\nbound = 3\n\n\n@ti.kernel\ndef grid_op():\n for i, j in grid_m:\n if grid_m[i, j] > 0:\n inv_m = 1 / grid_m[i, j]\n grid_v[i, j] = inv_m * grid_v[i, j]\n grid_v(1)[i, j] -= dt * 9.8\n if i < bound and grid_v(0)[i, j] < 0:\n grid_v(0)[i, j] = 0\n if i > n_grid - bound and grid_v(0)[i, j] > 0:\n grid_v(0)[i, j] = 0\n if j < bound and grid_v(1)[i, j] < 0:\n grid_v(1)[i, j] = 0\n if j > n_grid - bound and grid_v(1)[i, j] > 0:\n grid_v(1)[i, j] = 0\n\n\n@ti.kernel\ndef g2p():\n for p in x:\n base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)\n fx = x[p] * inv_dx - ti.cast(base, ti.f32)\n w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0),\n 0.5 * ti.sqr(fx - 0.5)]\n new_v = ti.Vector([0.0, 0.0])\n new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]])\n\n for i in ti.static(range(3)):\n for j in ti.static(range(3)):\n dpos = ti.cast(ti.Vector([i, j]), ti.f32) - fx\n g_v = grid_v[base(0) + i, base(1) + j]\n weight = w[i](0) * w[j](1)\n new_v += weight * g_v\n new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx\n\n v[p] = new_v\n x[p] += dt * v[p]\n J[p] *= 1 + dt * new_C.trace()\n C[p] = new_C\n\n\ndef main():\n for i in range(n_particles):\n x[i] = [random.random() * 0.4 + 0.2, random.random() * 0.4 + 0.2]\n v[i] = [0, -1]\n J[i] = 1\n\n for f in range(200):\n for s in range(150):\n clear_grid()\n p2g()\n grid_op()\n g2p()\n\n ti.profiler_print()\n\n scale = 2\n img = np.zeros(shape=(scale * n_grid, scale * n_grid)) + 0.3\n for i in range(n_particles):\n p_x = int(scale * x(0)[i] / dx)\n p_y = int(scale * x(1)[i] / dx)\n img[p_x, p_y] = 1\n img = img.swapaxes(0, 1)[::-1]\n cv2.imshow('MPM', img)\n cv2.waitKey(1)\n # cv2.imwrite('frame{:03d}.png'.format(f), img * 255)\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/mpm.py","file_name":"mpm.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97102088","text":"\nfrom Spacecraft.Spacecraft import Spacecraft\nfrom Dynamics.SimTime import SimTime\nfrom initial_config import initial_config\nfrom Dynamics.SpacecraftOrbit.MainOrbit import MainOrbit\nfrom Dynamics.CelestialBody.Ephemeris import Ephemeris\nfrom Dynamics.SpacecraftAttitude.MainAttitude import MainAttitude\n\nimport numpy as np\nimport pandas as pd\nimport datetime\n\ntwopi = 2.0 * np.pi\ndeg2rad = np.pi / 180.0\nrad2deg = 1 / deg2rad\n\n\nclass MainSimulation(MainOrbit, MainAttitude, SimTime):\n def __init__(self, initial_properties = initial_config()):\n\n self.main_spacecraft = Spacecraft(initial_properties[1], None)\n\n SimTime.__init__(self, initial_properties[0])\n MainAttitude.__init__(self, self.main_spacecraft.attitude_dynamics)\n MainOrbit.__init__(self, initial_properties[2], self.main_spacecraft.orbit_dynamics)\n self.earth = Ephemeris()\n\n # Auxiliary variables\n date = datetime.datetime.now()\n self.filename = date.strftime('%Y-%m-%d %H-%M-%S')\n self.pos = [0, 0, 0]\n self.vel = [0, 0, 0]\n self.quat = [0, 0, 0]\n self.omega = [1, 0, 0, 0]\n self.long = 0\n self.lat = 0\n self.alt = 0\n\n def run_simulation(self):\n self.set_propagator()\n # Loop\n self.reset_countTime()\n while self.maincountTime <= self.endsimTime:\n self.progressionsimTime()\n array_time, str_time = self.get_array_time()\n if self.orbit_update_flag:\n self.pos, self.vel = self.update_orbit(array_time)\n self.lat, self.long, self.alt = self.orbit_propagate.TransECItoGeo()\n self.orbit_update_flag = False\n\n if self.attitude_update_flag:\n self.quat, self.omega = self.update_attitude()\n inputs_parameters = [self.quat, self.omega]\n # Add the force and torque generated by the disturbance for the next dynamics propagation\n #self.add_ext_force()\n #self.add_ext_torque()\n # Add the force and torque generated by the satellite for the next dynamics propagation\n self.add_int_force(self.main_spacecraft.generate_force_b(inputs_parameters))\n self.add_int_torque(self.main_spacecraft.generate_torque_b(inputs_parameters))\n\n self.attitude_update_flag = False\n\n if self.log_flag:\n self.main_spacecraft.update_spacecraft_dynamics(self.pos,\n self.vel,\n self.quat,\n self.omega,\n self.lat,\n self.long,\n self.alt)\n self.main_spacecraft.update_spacecraft_state(str_time, self.maincountTime)\n self.main_spacecraft.update_control_history()\n self.earth.gst_Update(self.orbit_propagate.current_side)\n self.log_flag = False\n\n # update time\n self.updateSimtime()\n\n # Data report to create dictionary\n self.main_spacecraft.create_data()\n\n # Save Dataframe pandas in csv file\n self.save_data()\n print('Finished')\n\n def save_data(self):\n master_data = {**self.main_spacecraft.master_data_satellite, **self.earth.gst}\n database = pd.DataFrame(master_data, columns=master_data.keys())\n print(database)\n\n database.to_csv(\"./Data/logs/\"+self.filename+\".csv\", index=False, header=True)\n print(\"Data created\")\n\n","sub_path":"MainSimulation/MainSimulation.py","file_name":"MainSimulation.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472874603","text":"# Mochamad A. Prananda, Chenshan Yuan \n# CSE 415 HW 3\n# Breadth First Search Algorithm Implementation\n\n# ItrBFS.py\n# Iterative Breadth-First Search of a problem space.\n# The Problem should be given in a separate Python\n# file using the \"QUIET\" file format.\n# See the TowersOfHanoi.py example file for details.\n# Examples of Usage:\n# python3 ItrBFS.py TowersOfHanoi\n# python3 ItrBFS.py EightPuzzle\n\nimport sys\n\nif sys.argv==[''] or len(sys.argv)<2:\n import EightPuzzle as Problem\nelse:\n import importlib\n Problem = importlib.import_module(sys.argv[1])\n\n\nprint(\"\\nWelcome to ItrBFS\")\nCOUNT = None\nBACKLINKS = {}\npath = []\n\ndef runDFS():\n initial_state = Problem.CREATE_INITIAL_STATE()\n #print(\"Initial State:\")\n #print(Problem.DESCRIBE_STATE(initial_state))\n global COUNT, BACKLINKS\n COUNT = 0\n BACKLINKS = {}\n IterativeBFS(initial_state)\n print(str(COUNT)+\" states examined.\")\n print(Problem.maze)\n\ndef IterativeBFS(initial_state):\n global COUNT, BACKLINKS\n\n OPEN = [initial_state]\n CLOSED = []\n BACKLINKS[Problem.HASHCODE(initial_state)] = -1\n\n while OPEN != []:\n S = OPEN[0]\n\n del OPEN[0]\n CLOSED.append(S);\n\n if Problem.GOAL_TEST(S):\n if (S == Problem.EXIT):\n backtrace(S)\n Problem.printMaze()\n del OPEN[:]\n del CLOSED[:]\n OPEN.append(S)\n BACKLINKS[Problem.HASHCODE(Problem.EXIT)] = -1\n Problem.EXIT = Problem.getPelletIndex()\n S = OPEN[0]\n del OPEN[0]\n CLOSED.append(S)\n if (Problem.EXIT == -1):\n print(Problem.GOAL_MESSAGE_FUNCTION(S))\n Problem.runPath(path)\n\n\n COUNT += 1\n if (COUNT % 32)==0:\n print(\"\",end=\"\")\n if (COUNT % 128)==0:\n print(\"COUNT = \"+str(COUNT))\n print(\"len(OPEN)=\"+str(len(OPEN)))\n print(\"len(CLOSED)=\"+str(len(CLOSED)))\n L = []\n for op in Problem.OPERATORS:\n #Optionally uncomment the following when debugging\n #a new problem formulation.\n #print(\"Trying operator: \"+op.name)\n \n \n if op.precond(S):\n new_state = op.state_transf(S)\n \n if not occurs_in(new_state, CLOSED) and not occurs_in(new_state, OPEN):\n L.append(new_state)\n BACKLINKS[Problem.HASHCODE(new_state)] = S\n #Uncomment for debugging:\n #print(Problem.DESCRIBE_STATE(new_state))\n\n for s2 in L:\n for i in range(len(OPEN)):\n if Problem.DEEP_EQUALS(s2, OPEN[i]):\n del OPEN[i]; break\n\n OPEN = OPEN + L\n # print('OPEN is: ' + str(OPEN[0]))\n\n\ndef backtrace(S):\n global BACKLINKS, path\n\n tempPath = []\n while not S == -1:\n [i, j] = Problem.coordinate(S)\n Problem.maze[i][j] = ' '\n tempPath.append(S)\n S = BACKLINKS[Problem.HASHCODE(S)]\n tempPath.reverse()\n print(\"Solution path: \")\n #for s in path:\n # print(Problem.DESCRIBE_STATE(s))\n print(str(len(tempPath)) + \" solution paths\")\n print(tempPath)\n # Problem.runPath(path)\n for i in range(len(tempPath)):\n path.append(tempPath[i])\n # path.append(tempPath)\n return tempPath \n \n\ndef occurs_in(s1, lst):\n for s2 in lst:\n if Problem.DEEP_EQUALS(s1, s2): return True\n return False\n\nif __name__=='__main__':\n runDFS()\n\n","sub_path":"ItrBFS.py","file_name":"ItrBFS.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268458915","text":"#from sqlalchemy import create_engine, event\n#from sqlalchemy.engine import Engine\n#from sqlalchemy.orm import scoped_session, sessionmaker\n#from sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom tubeserv import app\n\n#engine = create_engine(app.config[\"DATABASE_URL\"], echo=False)\n#db_session = scoped_session(sessionmaker(autocommit=False,\n# autoflush=False, bind=engine))\n\ndb = SQLAlchemy(app)\n\n@db.event.listens_for(db.engine, \"connect\")\ndef set_sqlite_pragma(dbapi_connection, connection_record):\n cursor = dbapi_connection.cursor()\n cursor.execute(\"PRAGMA foreign_keys=ON\")\n cursor.close()\n\ndef init_db():\n import tubeserv.database.models\n\n db.create_all()\n","sub_path":"tubeserv/database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"588308042","text":"'''\nВ файле https://stepik.org/media/attachments/lesson/209723/3.html находится одна таблица.\nПросуммируйте все числа в ней и введите в качестве ответа одно число - эту сумму.\nДля доступа к ячейкам используйте возможности BeautifulSoup.\n'''\nurl=\"https://stepik.org/media/attachments/lesson/209723/3.html\"\n#url='input14.html'\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup as BS\n\nresponse=urlopen(url)\nhtml=response.read().decode('utf-8')\nsoup=BS(html,'lxml')\nlinks=[]\nsum=0\nfor link in soup.find_all('td'):\n links.append(int(link.text.strip()))\nfor i in links:\n sum+=i\nprint(sum)\n\n","sub_path":"lection7/task14.py","file_name":"task14.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329612510","text":"import re\nimport subprocess\nfrom subprocess import CalledProcessError\n\nfrom . import base\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"\n Widget for changing and displaying the current keyboard layout.\n It requires setxkbmap to be available in the sytem.\n \"\"\"\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\"configured_keyboards\", \"us\", \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n\n def button_press(self, x, y, button):\n if button == 1:\n self.next_keyboard()\n\n def next_keyboard(self):\n \"\"\"\n Set the next layout in the list of configured keyboard layouts as\n new current layout in use.\n If the current keyboard layout is not in the list, it will set as\n new layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.poll()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1) %\n len(self.configured_keyboards)]\n else:\n next_keyboard = self.configured_keyboards[0]\n self._set_keyboard(next_keyboard)\n\n def poll(self):\n \"\"\"\n Return the currently used keyboard layout as a string.\n Examples: \"us\", \"us dvorak\".\n In case of error returns \"unknown\".\n \"\"\"\n try:\n xset_output = self.call_process([\"xset\", \"-q\"])\n keyboard = _Keyboard(self.configured_keyboards).get_keyboard_layout(xset_output).upper()\n return str(keyboard)\n except CalledProcessError as e:\n self.log.error('Can not change the keyboard layout: {0}'\n .format(e))\n except OSError as e:\n self.log.error('Please, check that setxkbmap is available: {0}'\n .format(e))\n return \"unknown\"\n\n def _set_keyboard(self, keyboard):\n command = ['setxkbmap']\n command.extend(keyboard.split(\" \"))\n try:\n subprocess.check_call(command)\n except CalledProcessError as e:\n self.log.error('Can not change the keyboard layout: {0}'\n .format(e))\n except OSError as e:\n self.log.error('Please, check that setxkbmap is available: {0}'\n .format(e))\n\n\nclass _Keyboard(object):\n\n def __init__(self, configured_keyboards):\n if len(configured_keyboards) == 1:\n self.languages = {\n 'first': configured_keyboards[0],\n 'second': 'None',\n }\n else:\n self.languages = {\n 'first': configured_keyboards[0],\n 'second': configured_keyboards[1],\n }\n self.regular_strings = {\n 'hexadecimal': {\n 'first': \"\"\"\\w{4}e\\w{3}\"\"\",\n 'second': \"\"\"\\w{4}f\\w{3}\"\"\",\n },\n 'binary': {\n 'first': \"\"\"\\w{4}0\\w{3}\"\"\",\n 'second': \"\"\"\\w{4}1\\w{3}\"\"\",\n },\n \"inetger\": \"\\d{8}\",\n \"led_mask\": \"\"\"LED mask:\\s\\s\\w{8}\"\"\",\n }\n\n def get_keyboard_layout(self, xset_output):\n raw_list = []\n\n for item in xset_output.strip().splitlines():\n if re.search(self.regular_strings['led_mask'], item):\n raw_led_mask = re.search(self.regular_strings['led_mask'], item).group()\n raw_list = raw_led_mask.split(':')\n led_mask = raw_list[1].strip()\n break\n\n if not re.search(self.regular_strings['inetger'], led_mask):\n cur_regular_strings = self.regular_strings['hexadecimal']\n else:\n cur_regular_strings = self.regular_strings['binary']\n\n if re.search(cur_regular_strings['first'], led_mask):\n result = self.languages['first']\n elif re.search(cur_regular_strings['second'], led_mask):\n result = self.languages['second']\n else:\n result = \"ERR\"\n return result\n","sub_path":"libqtile/widget/keyboardlayout.py","file_name":"keyboardlayout.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559521012","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponse\nfrom django.views.generic import View\n\nfrom imio_survey.models import SurveyType\n\nimport json\n\nclass SurveyTypeView(View):\n def get(self, request, *args, **kwargs):\n \"\"\" Get List of available surveys \"\"\"\n result = []\n for st in SurveyType.objects.all():\n result.append({'key': st.key, 'desc': st.description})\n return HttpResponse(json.dumps(result),content_type=\"application/json\")\n\nclass SurveyTypeLayersView(View):\n def get(self, request, *args, **kwargs):\n \"\"\" Get List of available layer for a given survey \"\"\"\n survey_type_param = request.GET.get(\"st\", None) #SurveyType\n survey_type_obj = SurveyType.objects.get(pk= survey_type_param)\n result = []\n for layer in survey_type_obj.survey_layers.all():\n result.append({\n 'l': layer.id,\n 'desc': layer.description,\n 'geom': layer.geometry_field_name\n })\n return HttpResponse(json.dumps(result),content_type=\"application/json\")\n","sub_path":"imio_survey/views/surveytype.py","file_name":"surveytype.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216162413","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nClustering Selection/Validation functions (Average Silhouette, Elbow Method)\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#import functions from file k-Means.py\r\nfrom kMeans_algorithm import * \r\n\r\n\"\"\"\r\nAverage silhouette measures the quality of clustering.\r\nWe calculate the average silhouette for a range of number \r\nof clusters, and pick the the number of clusters which has\r\nthe highest average silhouette score.\r\n\"\"\"\r\n\r\nfrom scipy.spatial import distance\r\n\r\ndef SilhouetteScore(x_i, X, idx, K):\r\n '''\r\n For given training example (with index x_i), calculates and returns its silhouette score.\r\n \r\n First, the function calculates the average distance, a_i, between the given training example\r\n and all other points in the cluster it belongs to. Then, the function calculates the average distance\r\n between the training example and all other points not in its own cluster, and picks the cluster with \r\n the smallest average distance. Using a_i and b_i, it calculates the silhouette score of the given \r\n training example.\r\n '''\r\n #calculate average distance between x_i and all points in its cluster\r\n \r\n #training example \r\n point = X[x_i]\r\n #cluster index of training example\r\n idx_point = idx[x_i]\r\n \r\n #list of distances between point and other points in own cluster\r\n own_cluster_distances = np.empty(0)\r\n #loop over training examples' assigned cluster index, find points in \r\n #own cluster and calculate euclidean distance\r\n for i in range(idx.shape[0]):\r\n if idx[i] == idx_point:\r\n own_cluster_distances = np.append(own_cluster_distances, distance.euclidean(point, X[i]))\r\n \r\n #average distance between point and all other points in own cluster\r\n a_i = np.sum(own_cluster_distances)/(own_cluster_distances.shape[0])\r\n \r\n #for each k in range K, calculate average distance between point and all other points in cluster k\r\n avg_cluster_distances = np.empty(0)\r\n #range of K without given trainig example's own cluster\r\n other_clusters = [r for r in range(K) if r != idx_point]\r\n \r\n for k in other_clusters:\r\n #distances between point and all points in cluster k\r\n k_distances = np.empty(0)\r\n #all points in cluster k\r\n k_cluster = X[idx==k]\r\n #number of points in cluster k\r\n k_len = k_cluster.shape[0]\r\n for n in range(k_len):\r\n k_distances = np.append(k_distances, distance.euclidean(point, k_cluster[n]))\r\n #average distance between point and all points in cluster k appended to\r\n #avg_cluster_distances array\r\n if k_len != 0:\r\n avg_cluster_distances = np.append(avg_cluster_distances, np.sum(k_distances)/k_len)\r\n else:\r\n avg_cluster_distances = np.append(avg_cluster_distances, 0)\r\n \r\n \r\n #find closest cluster in avg_cluster_distances\r\n b_i = np.min(avg_cluster_distances)\r\n \r\n silhouette_score = (b_i - a_i)/np.max([a_i, b_i])\r\n \r\n \r\n return silhouette_score \r\n\r\n\r\ndef AverageSilhouette(X, idx, K):\r\n '''\r\n Calculates and returns the average silhoutte for given number of clusters, K.\r\n \r\n Average silhouette is the average of the silhouette scores of all the training examples.\r\n '''\r\n silhouette_scores = np.empty(0)\r\n #loop over all training examples and calculate their silhouette score\r\n for i in range(X.shape[0]):\r\n silhouette_i = SilhouetteScore(i, X, idx, K)\r\n silhouette_scores = np.append(silhouette_scores, silhouette_i)\r\n \r\n #calculate average of all scores\r\n avg_silhouette = np.sum(silhouette_scores)/len(silhouette_scores)\r\n \r\n return avg_silhouette\r\n \r\n\"\"\"\r\nThe following function runs k-Means for each K in the range 2 - K_range, \r\ncalculates the average silhouette, and plots a graph of the average \r\nsilhouette score for each K.\r\n\"\"\"\r\n\r\ndef PlotAvgSilhouettes(X, K_range, max_iters, init_runs):\r\n \r\n '''\r\n Runs kMeans and plots the average silhouette scores for \r\n each number of clusters in K_range.\r\n \r\n '''\r\n clusters_avg_sil = np.empty(0)\r\n #minimum of K_range must be 2\r\n for K in range(2, K_range+1):\r\n centroids, idx, distortion_lowest = kMeansRuns(X, K, max_iters, init_runs)\r\n k_avg_sil = AverageSilhouette(X, idx, K)\r\n clusters_avg_sil = np.append(clusters_avg_sil, k_avg_sil)\r\n \r\n #plot graph of avg silhouettes of each cluster of size in range 2 - K_range\r\n plt.figure(figsize = (12.8, 9.6))\r\n plt.plot(np.arange(2,K_range+1,1), clusters_avg_sil)\r\n plt.title('Average Silhouette for number of clusters K')\r\n plt.xlabel('K')\r\n plt.ylabel('Average Silhouette')\r\n plt.show()\r\n \r\n return None\r\n\r\n\r\n\"\"\"\r\nThe Elbow method can provide a quick snapshot of how clusters vary in terms of their distortion.\r\nSometimes useful for selecting number of clusters.\r\n\"\"\"\r\n\r\ndef PlotElbow(X, K_range, max_iters, init_runs):\r\n \r\n '''\r\n Plots distortion for clusters of size in 2 - K_range.\r\n '''\r\n cluster_distortions = np.empty(0)\r\n for K in range(2, K_range+1):\r\n centroids, idx, distortion_lowest = kMeansRuns(X, K, max_iters, init_runs)\r\n cluster_distortions = np.append(cluster_distortions, distortion_lowest)\r\n \r\n #plot graph of distortion of each cluster of size in range 2 - K_range\r\n plt.figure(figsize = (12.8, 9.6))\r\n plt.plot(np.arange(2,K_range+1,1), cluster_distortions)\r\n plt.title('Distortion for number of clusters K')\r\n plt.xlabel('K')\r\n plt.ylabel('Distortion')\r\n plt.show()\r\n \r\n return None\r\n","sub_path":"clustering_validation_functions.py","file_name":"clustering_validation_functions.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529290507","text":"\n\nfrom xai.brain.wordbase.verbs._coo import _COO\n\n#calss header\nclass _COOED(_COO, ):\n\tdef __init__(self,): \n\t\t_COO.__init__(self)\n\t\tself.name = \"COOED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"coo\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_cooed.py","file_name":"_cooed.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"571624766","text":"\"\"\"\nTwo Arduino boards( UNO/nano) communication to PC(Win10/Ubuntu) over pyserial example.\nInit to Arduino boards arduinoCharInput.ino before.\n\n\"\"\"\nimport serial\nimport time\n\n# Ubuntu establish connection on a specific port:\nser5 = serial.Serial('/dev/tty.usbUSB0', baudrate=9600, timeout=1)\nser6 = serial.Serial('/dev/tty.usbUSB1', baudrate=9600, timeout=1)\n\n# Win10 establish connection on a specific port:\n# ser5 = serial.Serial('COM5', baudrate=9600, timeout=1)\n# ser6 = serial.Serial('COM6', baudrate=9600, timeout=1)\ntime.sleep(2)\n\ndef getValues5():\n ser5.write(b'z')\n arduinoData5 = ser5.readline().decode('ascii')\n return arduinoData5\n\ndef getValues6():\n ser6.write(b'z')\n arduinoData6 = ser6.readline().decode('ascii')\n return arduinoData6\n\nwhile True:\n\n userInput = input('Get data point?: ')\n\n if userInput == 'y':\n print(getValues5() + getValues6())\n","sub_path":"PySerial-python/arduSerialCom01.py","file_name":"arduSerialCom01.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564745505","text":"from datetime import datetime, timedelta\nimport os\n\nfrom airflow import DAG\nfrom airflow.operators import TheELOperator, BashStreamOperator, BashOperator\nfrom airflow.operators import SlackNotificationOperator\n\ndefault_args = {\n 'owner': 'airflow',\n 'on_failure_callback': SlackNotificationOperator.failed(),\n}\n\ndag = DAG('etl_bev_tax_accounts_v1',\n start_date=datetime.now() - timedelta(days=1),\n schedule_interval='@hourly',\n default_args=default_args\n)\n\nschema_file = 's3://\"$S3_STAGING_BUCKET\"/schemas/etl_bev_tax_accounts.json'\ndata_file = 's3://\"$S3_STAGING_BUCKET\"/etl_bev_tax_accounts_v1/{{run_id}}/etl_bev_tax_accounts.csv'\n\n## TESTING\n# transform_accounts = BashStreamOperator(\n# task_id='transform_accounts_data',\n# dag=dag,\n# bash_command='VALUE=$(cat); echo \"$VALUE\";',\n# # bash_command='echo \"foo\"',\n# input_file='s3://phl-etl-staging-dev/schemas/etl_bev_tax_accounts.json',\n# output_file='s3://phl-etl-staging-dev/etl_bev_tax_accounts_v1/test.txt',\n# params={\n# 'test_param': 'foo'\n# }\n# )\n\ntransform_accounts = BashOperator(\n task_id='transform_accounts_data',\n dag=dag,\n bash_command='echo \"Boooo\";',\n params={\n 'test_param': 'foo'\n }\n)\n\n# extract_accounts_data = TheELOperator(\n# task_id='extract_accounts_data',\n# dag=dag,\n# el_command='read',\n# table_name='VW_AccountDetails',\n# connection_string='\"$BEV_TAX_MSSQL_CONN_STRING\"',\n# output_file=data_file\n# )\n\n# create_temp_table_accounts_data = TheELOperator(\n# task_id='create_temp_table_accounts_data',\n# dag=dag,\n# el_command='create_table',\n# db_schema='phl',\n# table_name='sbt_accounts_{{run_id.lower()}}',\n# table_schema_path=schema_file,\n# connection_string='\"$CARTO_CONN_STRING\"'\n# )\n\n# load_accounts_data = TheELOperator(\n# task_id='load_accounts_data',\n# dag=dag,\n# el_command='write',\n# db_schema='phl',\n# table_name='sbt_accounts_{{run_id.lower()}}',\n# skip_headers=True,\n# table_schema_path=schema_file,\n# connection_string='\"$CARTO_CONN_STRING\"',\n# input_file=data_file\n# )\n\n# swap_accounts_data = TheELOperator(\n# task_id='swap_accounts_data',\n# dag=dag,\n# el_command='swap_table',\n# db_schema='phl',\n# new_table_name='sbt_accounts_{{run_id.lower()}}',\n# old_table_name='sbt_accounts',\n# connection_string='\"$CARTO_CONN_STRING\"'\n# )\n\n# extract_accounts_data >> create_temp_table_accounts_data >> load_accounts_data >> swap_accounts_data\n","sub_path":"dags/etl_bev_tax_accounts.py","file_name":"etl_bev_tax_accounts.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"201870833","text":"#!/usr/bin/env python\n\nimport tensorflow as tf\nimport h5py\nimport numpy as np\nimport argparse\nimport cv2\nfrom datetime import datetime\n\n\ndef ball_pos_and_scores_to_hdf5(games, PATH_TO_CKPT):\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with detection_graph.as_default():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(graph=detection_graph, config=config) as sess:\n\n for game_name in games:\n print(datetime.utcnow(), ' Processing ', game_name)\n game = games[game_name]\n\n detected_pos = []\n ball_scores = []\n\n if 'ball_pos' in game:\n del game['ball_pos']\n if 'ball_scores' in game:\n del game['ball_scores']\n\n if 'table_frames' in game or 'table_frames_encoded' in game:\n for frame_number in range(100):\n\n if 'table_frames' in game:\n frame = game['table_frames'][frame_number]\n else:\n encoded_frame = game['table_frames_encoded'][frame_number]\n frame = cv2.imdecode(np.asarray(\n bytearray(encoded_frame)), 1)\n\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box (y_min,x_min,y_max,x_max) represents a part\n # of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the\n # class label.\n detection_scores = detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n image_np_expanded = np.expand_dims(frame, axis=0)\n\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores,\n detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n x_middle = int(\n round(0.5 * (boxes[0][0][1] * 480 + boxes[0][0][3] * 480)))\n y_middle = int(\n round(0.5 * (boxes[0][0][0] * 320 + boxes[0][0][2] * 320)))\n\n detected_pos.append(\n np.array([x_middle - 8, y_middle - 8]))\n ball_scores.append(scores[0][0])\n\n games.create_dataset(\n game_name + '/ball_pos', data=detected_pos)\n games.create_dataset(\n game_name + '/ball_scores', data=ball_scores)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input_file', help='HDF5 File the neural net shall detect the ball '\n 'and save the corresponding scores in')\n parser.add_argument('-f', '--frozen_interference_graph',\n help='Path to frozen inference graph')\n args = parser.parse_args()\n if args.input_file and args.frozen_interference_graph:\n games = h5py.File(args.input_file)\n ball_pos_and_scores_to_hdf5(games, args.frozen_interference_graph)\n else:\n print('Please specify a frozen interference graph and the HDF5 File where the ball shall be detected')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/neural_net_detection.py","file_name":"neural_net_detection.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461951868","text":"#!/usr/bin/env python\n\nimport exception\nimport orbitalobject\n\n# Tables -----------------------------------------------------------------------\n# SWN tables\nTABLE_ALT_ATMOSPHERE = {\n 2: 'Corrosive',\n 3: 'Breathable',\n 4: 'Breathable',\n 5: 'Breathable',\n 6: 'Breathable',\n 7: 'Breathable',\n 8: 'Thick',\n 9: 'Invasive, toxic',\n 10: 'Corrosive and invasive'\n}\n\nTABLE_ATMOSPHERE = {\n 2: 'Corrosive',\n 3: 'Inert gas',\n 4: 'Airless or thin',\n 5: 'Breathable',\n 6: 'Breathable',\n 7: 'Breathable',\n 8: 'Breathable',\n 9: 'Breathable',\n 10: 'Thick',\n 11: 'Invasive, toxic',\n 12: 'Corrosive and invasive'\n}\n\nTABLE_BIOSPHERE = {\n 2: 'Remnants',\n 3: 'Microbial',\n 4: 'No native',\n 5: 'No native',\n 6: 'Human-miscible',\n 7: 'Human-miscible',\n 8: 'Human-miscible',\n 9: 'Immiscible',\n 10: 'Immiscible',\n 11: 'Hybrid',\n 12: 'Engineered' \n}\n\nTABLE_POPULATION = {\n 2: 'Failed colony',\n 3: 'Outpost',\n 4: 'Tens of thousands',\n 5: 'Tens of thousands',\n 6: 'Hundreds of thousands',\n 7: 'Hundreds of thousands',\n 8: 'Hundreds of thousands',\n 9: 'Millions',\n 10: 'Millions',\n 11: 'Billions',\n 12: 'Alien civilization' \n}\n\nTABLE_POPULATION_ALT = {\n 2: [ 0, 999],\n 3: [ 1000, 9999],\n 4: [ 10000, 99999],\n 5: [ 10000, 99999],\n 6: [ 100000, 999999],\n 7: [ 100000, 999999],\n 8: [ 100000, 999999],\n 9: [ 1000000, 999999999],\n 10: [ 1000000, 999999999],\n 11: [1000000000,10000000000],\n 12: [ 100000, 50000000] \n}\n\nTABLE_TAGS = {\n 1: {\n 1: 'Abandoned Colony',\n 2: 'Alien Ruins',\n 3: 'Altered Humanity',\n 4: 'Area 51',\n 5: 'Badlands World',\n 6: 'Bubble Cities',\n 7: 'Civil War',\n 8: 'Cold War',\n 9: 'Colonized Population',\n 10: 'Desert World'\n },\n 2: {\n 1: 'Eugenic Cult',\n 2: 'Exchange Consulate',\n 3: 'Feral World',\n 4: 'Flying Cities',\n 5: 'Forbidden Tech',\n 6: 'Freak Geology',\n 7: 'Freak Weather',\n 8: 'Friendly Foe',\n 9: 'Gold Rush',\n 10: 'Hatred'\n },\n 3: {\n 1: 'Heavy Industry',\n 2: 'Heavy Mining',\n 3: 'Hostile Biosphere',\n 4: 'Hostile Space',\n 5: 'Local Specialty',\n 6: 'Local Tech',\n 7: 'Major Spaceyard',\n 8: 'Minimal Contact',\n 9: 'Misandry/Misogyny',\n 10: 'Oceanic World'\n },\n 4: {\n 1: 'Out of Contact',\n 2: 'Outpost World',\n 3: 'Perimeter Agency',\n 4: 'Pilgrimage Site',\n 5: 'Police State',\n 6: 'Preceptor Archive',\n 7: 'Pretech Cultists',\n 8: 'Primitive Aliens',\n 9: 'Psionics Fear',\n 10: 'Psionics Worship'\n },\n 5: {\n 1: 'Psionics Academy',\n 2: 'Quarantined World',\n 3: 'Radioactive World',\n 4: 'Regional Hegemon',\n 5: 'Restrictive Laws',\n 6: 'Rigid Culture',\n 7: 'Seagoing Cities',\n 8: 'Sealed Menace',\n 9: 'Sectarians',\n 10: 'Seismic Instability'\n },\n 6: {\n 1: 'Secret Masters',\n 2: 'Theocracy',\n 3: 'Tomb World',\n 4: 'Trade Hub',\n 5: 'Tyranny',\n 6: 'Unbraked AI',\n 7: 'Warlords',\n 8: 'Xenophiles',\n 9: 'Xenophobes',\n 10: 'Zombies'\n }\n\n}\n\nTABLE_TECH_LEVEL = {\n 2: '0',\n 3: '1',\n 4: '2',\n 5: '3',\n 6: '3',\n 7: '4',\n 8: '4',\n 9: '4',\n 10: '4',\n 11: '4+',\n 12: '5' \n}\n\nTABLE_TECH_LEVEL_REVERSE = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '4+': 5,\n '5': 6\n}\n\nTABLE_TEMPERATURE = {\n 2: 'Frozen',\n 3: 'Cold-to-temperate',\n 4: 'Cold',\n 5: 'Cold',\n 6: 'Temperate',\n 7: 'Temperate',\n 8: 'Temperate',\n 9: 'Warm',\n 10: 'Warm',\n 11: 'Temperate-to-warm',\n 12: 'Burning'\n}\n\n# One Roll Star System tables\nTABLE_MAIN_WORLD_ORBIT_TEMP_MOD = {\n 'Frozen': 2,\n 'Cold-to-temperate': 1,\n 'Cold': 1,\n 'Cold': 1,\n 'Temperate': 0,\n 'Temperate': 0,\n 'Temperate': 0,\n 'Warm': -1,\n 'Warm': -1,\n 'Temperate-to-warm': 0,\n 'Burning': -2\n}\n\n# World class ------------------------------------------------------------------\nclass World(object):\n def __init__(self,\n name = '',\n atmosphere = '',\n biosphere = '',\n population = '',\n populationAlt = 0,\n tags = ['',''],\n temperature = '',\n techLevel = '0'):\n # General information\n self.name = exception.arg_check(name,str,'')\n\n # Roll information\n self.atmosphere = exception.arg_check(atmosphere,str,'')\n self.biosphere = exception.arg_check(biosphere,str,'')\n self.population = exception.arg_check(population,str,'')\n self.tags = exception.arg_check(tags,list,['',''])\n for tag in tags:\n if not (isinstance(tag,str)):\n raise exception.InvalidListItemType(tag,str)\n self.temperature = exception.arg_check(temperature,str,'')\n self.techLevel = exception.arg_check(techLevel,str,'0')\n\n # Alternate roll information\n self.populationAlt = exception.arg_check(populationAlt,int,0)\n\n def population_alt_text(self):\n # Floor to 3 significant figures\n if ( self.populationAlt > 99999 ):\n return('{:,}'.format(int(str(self.populationAlt)[0:3]+'0'*(len(str(self.populationAlt))-3))))\n # Floor to nearest 1000\n elif ( self.populationAlt > 9999 ):\n return('{:,}'.format(int(str(self.populationAlt)[0:2]+'0'*(len(str(self.populationAlt))-2))))\n # Floor to nearest 100\n elif ( self.populationAlt > 999 ):\n return('{:,}'.format(int(str(self.populationAlt)[0:2]+'0'*(len(str(self.populationAlt))-2))))\n # Floor to nearest 10\n elif ( self.populationAlt > 99 ):\n return(str(self.populationAlt)[0:1]+'0'*(len(str(self.populationAlt))-1))\n # Else just return value\n else:\n return(str(self.populationAlt))\n","sub_path":"swn/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551044567","text":"# -*- coding: utf-8 -*-\n\ndictionary = \\\n{\n 'lang': u'fr',\n#admin fields\n 'admin': u'Administrateur',\n 'catalogs': u'Catalogues',\n 'journals': u'Journaux',\n 'tables': u'Tables',\n 'reports': u'Rapport',\n 'details': u'Détails',\n 'id': u'N°Enregistrement',\n 'deleted_flag': u'Indicateur de supression',\n 'caption': u'Intitulé',\n 'name': u'Nom',\n 'table_name': u'Table',\n 'template': u'Modèle de rapport',\n 'report_module': u'Module de rapport',\n 'params_template': u'Params IU',\n 'view_template': u'Modèle de rapport',\n 'visible': u'Visible',\n 'client_module': u'Module client',\n 'web_client_module': u'Module webClient ',\n 'server_module': u'Module Serveur',\n 'report_module': u'Module de rapport',\n 'project': u'Projet',\n 'users': u'Utilisateur',\n 'roles': u'Rôles',\n 'privileges': u'Privilèges',\n 'tasks': u'Tache',\n 'safe_mode': u'Mode protégé',\n 'language': u'Langage',\n 'author': u'Auteur',\n 'interface': u'Interface',\n 'db_type': u'type BD',\n 'db_name': u'Nom de la base',\n 'alias': u'Base de données',\n 'data_type': u'Type',\n 'filter_type': u'Type de filtre',\n 'size': u'Taille',\n 'object': u'Item à rechercher',\n 'object_field': u'Champ de recherche',\n 'master_field': u'Champ maître',\n 'required': u'Obligatoire',\n 'calculated': u'Calc.',\n 'default': u'Défaut',\n 'read_only': u'Lecture seule',\n 'alignment': u'Align.',\n 'active': u'Activé',\n 'date': u'Date',\n 'role': u'Rôle',\n 'info': u'Information',\n 'item': u'Item',\n 'can_view': u'Peut voir',\n 'can_create': u'Peut créer',\n 'can_edit': u'Peut modifier',\n 'can_delete': u'Peut supprimer',\n 'fields': u'Champs',\n 'field': u'Champ',\n 'filter': u'Filtre',\n 'filters': u'Filtres',\n 'index': u'Index',\n 'index_name': u\"Nom de l'index\",\n 'report_params': u'Params du Rapport',\n 'error': u'Erreur',\n#admin interface\n 'db': u'Base de données',\n 'export': u'Export',\n 'import': u'Import',\n 'viewing': u'Affichage',\n 'editing': u'Edition',\n 'filters': u'Filtres',\n 'order': u'Ordre',\n 'indices': u'Indices',\n 'foreign_keys': u'clés étrangères',\n 'select_all': u'Select. tout',\n 'unselect_all': u'Déselect. tout',\n 'project_params': u'Paramètres du projet',\n 'project_locale': u'Param.locaux du projet',\n 'reserved_word': u'Le nom est un mot réservé',\n#editor\n 'case_sensitive': u'Sensible à la casse',\n 'whole_words': u'Chercher le mots entiers',\n 'in_task': u'Dans la tâche',\n 'text_not_found': u'Texte non trouvé.\\nModifiez et chercher à nouveau ?',\n 'text_changed': u'Le module a été changé.\\nSauver avant de fermer?',\n 'go_to_line': u'Aller à la ligne',\n 'go_to': u'Aller à',\n 'line': u'Ligne',\n#admin editors\n 'caption_name': u'Nom',\n 'caption_word_wrap': u'Wrap',\n 'caption_expand': u'Exp.',\n 'caption_edit': u'Edit',\n 'caption_descening': u'Desc.',\n#admin messages\n 'fill_task_attrubutes': u'Fill in the caption, name and database type attributes.',\n 'can_not_connect': u\"Impossible de se connecter à la base %s\",\n 'field_used_in_filters': u\"Impossible de supprimer le champ %s.\\n utilisé dans la définition du filtre:\\n%s\",\n 'field_used_in_fields': u\"Impossible de supprimer le champ %s.\\n Utilisé dans la définition du champ:\\n%s\",\n 'field_used_in_indices': u\"Impossible de supprimer le champ %s.\\n Utilisé dans la définition d'un index:\\n%s\",\n 'field_is_system': u\"Impossible de supprimer un champ systeme..\",\n 'detail_mess': u'%s - détail %s',\n 'item_used_in_items': u\"Impossibe de supprimer l'item %s.\\n Utilisé dans la définition de l'item:\\n%s\",\n 'field_mess': u'%s - champ %s',\n 'item_used_in_fields': u\"Impossibe de supprimer l'item %s.\\n Utilisé dans la définition du champ:\\n%s\",\n 'param_mess': u'%s - parametre %s',\n 'item_used_in_params': u\"Impossible de supprimer l'item %s.\\n Est utilisé dans le paramètre :\\n%s\",\n 'invalid_name': u'Nom incorrect.',\n 'invalid_field_name': u'Nom de champ incorrect.',\n 'type_is_required': u'Type de champ obligatoire.',\n 'index_name_required': u\"Nom de l'index obligatoire.\",\n 'index_fields_required': u\"Les champs de l'index sont obligatoire.\",\n 'cant_delete_group': u\"Impossible de supprimer un groupe\",\n 'object_field_required': u'Un champ item est obligatoire.',\n 'no_tasks_ptoject': u\"Il n'y a pas de tâches dans le projet.\",\n 'stop_server': u'Arrêter le serveur.',\n#interface buttons and labels\n 'yes': u'Oui',\n 'no': u'Non',\n 'ok': u'OK',\n 'cancel': u'Annuler',\n 'delete': u'Supprimer',\n 'new': u'Nouveau',\n 'edit': u'Editer',\n 'copy': u'Copier',\n 'print': u'Imprimer',\n 'save': u'Sauvegarder',\n 'open': u'Ouvrir',\n 'close': u'Fermer',\n 'select': u'Selectionner',\n 'filter': u'Filtrer',\n 'apply': u'Appliquer',\n 'find': u'Chercher',\n 'replace': u'Remplacer',\n 'view': u'Afficher',\n 'log_in': u'Se connecter',\n 'login': u'Connexion',\n 'password': u'Mot de passe',\n 'log_out': u'Se déconnecter',\n#runtime messages\n 'invalid_int': u'%s valeur incorrecte - doit être un integer',\n 'invalid_float': u'%s valeur incorrecte - doit être un float',\n 'invalid_cur': u'%s valeur incorrecte - doit être de type monnaie',\n 'invalid_date': u'%s valeur incorrecte - doit être une date',\n 'invalid_bool': u'%s valeur incorrecte - doit être un boolean',\n 'invalid_value': u'%s valeur incorrecte',\n 'value_required': u'Une valeur est obligatoire',\n 'invalid_length': u'Taille du texte supérieure à la taille maximum - %d',\n 'save_changes': u'Les données ont changé. Voulez-vous les sauvegarder ?',\n 'apply_changes': u\"Les modifications des données n'ont pas été soumises au serveur. Voulez-vous soumettre ces modifications ?\",\n 'delete_record': u\"Supprimer l'enregistrement ?\",\n 'server_request_error': u'Erreur dans la requête au serveur',\n 'cant_delete_used_record': u\"Impossible de supprimer l'enregistrement. Il est en cours d'utilisation.\",\n 'website_maintenance': u\"Le site web est actuellement en maintenance.\",\n 'items_selected': u\"sélectionné: %d\",\n#rights messages\n 'cant_view': u\"%s: Vous n'êtes pas autorisés à afficher\",\n 'cant_create': u\"%s: Vous n'êtes pas autosisés à créer\" ,\n 'cant_edit': u\"%s: Vous n'êtes pas autosisés à editer\",\n 'cant_delete': u\"%s: Vous n'êtes pas autosisés à supprimer\",\n#calendar\n 'week_start': 0,\n 'days_min': [u'Di', u'Lu', u'Ma', u'Me', u'Je', u'Ve', u'Sa', u'Su'],\n 'months': [u'Janvier', u'Février', u'Mars', u'Avril', u'Mai', u'Juin', u'Juillet', u'Août', u'Septembre', u'Octobre', u'Novembre', u'Decembre'],\n 'months_short': [u'Jan', u'Fév', u'Mar', u'Avr', u'Mai', u'Jui', u'Jul', u'Aoû', u'Sep', u'Oct', u'Nov', u'Déc'],\n#grid\n 'page': u'Page',\n 'of': u'de'\n}\n","sub_path":"jam/lang/french.py","file_name":"french.py","file_ext":"py","file_size_in_byte":10438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"580273139","text":"import sys\nimport os\nimport trace\nimport __main__\n\nfrom selenium import webbrowser\n\nto_execute = \" \".join(sys.argv[1:])\n# print(to_execute)\nx = exec(sys.argv[2])\nprint(x)\n\n\nclass CustomTrace(trace.Trace):\n pass\n\n\n\ndef _runscript(filename):\n # The script has to run in __main__ namespace (or imports from\n # __main__ will break).\n #\n # So we clear up the __main__ and set several special variables\n # (this gets rid of pdb's globals and cleans old variables on restarts).\n import __main__\n __main__.__dict__.clear()\n __main__.__dict__.update({\"__name__\" : \"__main__\",\n \"__file__\" : filename,\n \"__builtins__\": __builtins__,\n })\n\n # When bdb sets tracing, a number of call and line events happens\n # BEFORE debugger even reaches user's code (and the exact sequence of\n # events depends on python version). So we take special measures to\n # avoid stopping before we reach the main script (see user_line and\n # user_call for details).\n # self._wait_for_mainpyfile = True\n # self.mainpyfile = self.canonic(filename)\n # self._user_requested_quit = False\n with open(filename, \"rb\") as fp:\n statement = \"exec(compile(%r, %r, 'exec'))\" % \\\n (fp.read(), self.mainpyfile)\n self.run(statement)\n\n","sub_path":"python/trace/mod/custom_trace.py","file_name":"custom_trace.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492969132","text":"# encoding:utf-8\n\nfrom src.read_data import read_data\nfrom src.imgtools.pre_visualization import *\n\nfrom collections import Counter\nimport pandas as pd\n\n\n\ntrain_path=r'../data/dsjtzs_txfz_traning.txt'\ntest_path=r'../data/dsjtzs_txfz_test_sample.txt'\n\n\n\nevents=read_data(train_path)\npositive_events=[event for event in events if event.is_human==1]\nnegative_events=[event for event in events if event.is_human==0]\n\ndef describe_events(events):\n traces_length=[]\n delta_times=[]\n delta_x=[]\n distances=[]\n for event in events:\n traces_length.append(len(event.traces))\n delta_times.append(event.traces[-1].time-event.traces[0].time)\n delta_x.append(event.traces[-1].x-event.traces[0].x)\n distance=0\n for i in range(len(event.traces)-1):\n distance+=abs(event.traces[i+1].x-event.traces[i].x)\n distances.append(distance)\n\n traces_length=pd.DataFrame(traces_length)\n delta_times=pd.DataFrame(delta_times)\n delta_x=pd.DataFrame(delta_x)\n distances=pd.DataFrame(distances)\n print(traces_length.describe())\n print(delta_times.describe())\n print(delta_x.describe())\n print(distances.describe())\n\n\n# describe_events(positive_events)\n# describe_events(negative_events)\n\n\ndef analy_single_event(event):\n delta_x=event.traces[-1].x-event.traces[0].x\n delta_t=event.traces[-1].time-event.traces[0].time\n\n distance_x=0\n ymin=event.traces[-1].y\n ymax=event.traces[-1].y\n\n splited_speed=[]\n for i in range(len(event.traces)-1):\n cur_distance=abs(event.traces[i+1].x-event.traces[i].x)\n distance_x+=cur_distance\n ymin=min(ymin,event.traces[i].y)\n ymax=max(ymax,event.traces[i].y)\n cur_speed=cur_distance/abs(event.traces[i+1].time-event.traces[i].time+1)\n splited_speed.append(cur_speed)\n speed_kind=len(Counter(splited_speed))\n splited_speed=pd.DataFrame(splited_speed)\n print('delta_x:%d, dis_x:%d, delta_y:%d, delta_t:%d, speed_kind:%.2f'%(delta_x,distance_x,ymax-ymin,delta_t,speed_kind/len(splited_speed)))\n print(splited_speed.describe())\n\n\ncount=0\nfor event in events:\n count+=1\n if count%6==0:\n if event.is_human==0:\n print(event.is_human)\n analy_single_event(event)\n draw_traces(event.traces,event.target)\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503674895","text":"import openmdao.api as om \nimport numpy as np\nfrom openconcept.utilities.math.integrals import Integrator\nimport warnings \n\n# OpenConcept PhaseGroup will be used to hold analysis phases with time integration\ndef find_integrators_in_model(system, abs_namespace, timevars, states):\n durationvar = system._problem_meta['oc_time_var']\n\n # check if we are a group or not\n if isinstance(system, om.Group):\n for subsys in system._subsystems_allprocs:\n if not abs_namespace:\n next_namespace = subsys.name\n else:\n next_namespace = abs_namespace + '.' + subsys.name\n find_integrators_in_model(subsys, next_namespace, timevars, states)\n else:\n # if the duration variable shows up we need to add its absolute path to timevars\n if isinstance(system, Integrator):\n for varname in system._var_rel_names['input']:\n if varname == durationvar:\n timevars.append(abs_namespace + '.' + varname)\n for state in system._state_vars.keys():\n state_options = system._state_vars[state]\n state_tuple = (abs_namespace + '.' + state_options['name'], \n abs_namespace + '.' + state_options['start_name'], \n abs_namespace + '.' + state_options['end_name'])\n states.append(state_tuple)\n\nclass PhaseGroup(om.Group):\n def __init__(self, **kwargs):\n # BB what if user isn't passing num_nodes to the phases?\n num_nodes = kwargs.get('num_nodes', 1)\n super(PhaseGroup, self).__init__(**kwargs)\n self._oc_time_var_name = 'duration'\n self._oc_num_nodes = num_nodes\n\n def initialize(self):\n self.options.declare('num_nodes', default=1, types=int, lower=0)\n\n def _setup_procs(self, pathname, comm, mode, prob_meta):\n # need to pass down the name of the duration variable via prob_meta\n prob_meta.update({'oc_time_var': self._oc_time_var_name})\n prob_meta.update({'oc_num_nodes': self._oc_num_nodes})\n super(PhaseGroup, self)._setup_procs(pathname, comm, mode, prob_meta)\n \n def _configure(self):\n super(PhaseGroup, self)._configure()\n # check child subsys for variables to be integrated and add them all\n timevars = []\n states = []\n # TODO revisit this approach once var data in configure is officially supported\n find_integrators_in_model(self, '', timevars, states)\n self._setup_var_data()\n\n # make connections from duration to integrated vars automatically\n time_prom_addresses_already_connected = []\n for var_abs_address in timevars:\n if self.pathname:\n var_abs_address = self.pathname + '.' + var_abs_address\n var_prom_address = self._var_abs2prom['input'][var_abs_address]\n if var_prom_address != self._oc_time_var_name and var_prom_address not in time_prom_addresses_already_connected:\n self.connect(self._oc_time_var_name, var_prom_address)\n time_prom_addresses_already_connected.append(var_prom_address)\n self._oc_states_list = states\n\nclass IntegratorGroup(om.Group):\n def __init__(self, **kwargs):\n # BB what if user isn't passing num_nodes to the phases?\n time_units = kwargs.pop('time_units', 's')\n super(IntegratorGroup, self).__init__(**kwargs)\n self._oc_time_units = time_units\n\n def _setup_procs(self, pathname, comm, mode, prob_meta):\n time_units = self._oc_time_units\n try:\n num_nodes = prob_meta['oc_num_nodes']\n except KeyError:\n raise NameError('Integrator group must be created within an OpenConcept phase')\n self.add_subsystem('ode_integ', Integrator(time_setup='duration', method='simpson',diff_units=time_units, num_nodes=num_nodes))\n super(IntegratorGroup, self)._setup_procs(pathname, comm, mode, prob_meta)\n\n def _configure(self):\n super(IntegratorGroup, self)._configure()\n # TODO revisit this when variable data available by default in configure\n self._setup_var_data()\n for subsys in self._subsystems_allprocs:\n for var in subsys._var_rel_names['output']:\n # check if there are any variables to integrate\n tags = subsys._var_rel2meta[var]['tags']\n if 'integrate' in tags:\n state_name = None\n state_units = None\n state_val = 0.0\n state_lower = -1e30\n state_upper = 1e30\n state_promotes = False\n # TODO Check for duplicates otherwise generic Openmdao duplicate output/input error raised\n\n for tag in tags:\n split_tag = tag.split(':')\n if split_tag[0] == 'state_name':\n state_name = split_tag[-1]\n elif split_tag[0] == 'state_units':\n state_units = split_tag[-1]\n elif split_tag[0] == 'state_val':\n state_val = eval(split_tag[-1])\n elif split_tag[0] == 'state_lower':\n state_lower = float(split_tag[-1])\n elif split_tag[0] == 'state_upper':\n state_upper = float(split_tag[-1])\n elif split_tag[0] == 'state_promotes':\n state_promotes = eval(split_tag[-1])\n if state_name is None:\n raise ValueError('Must provide a state_name tag for integrated variable '+subsys.name+'.'+var)\n if state_units is None:\n warnings.warn('OpenConcept integration variable '+subsys.name+'.'+var+' '+'has no units specified. This can be dangerous.')\n self.ode_integ.add_integrand(state_name, rate_name=var, val=state_val,\n units=state_units, lower=state_lower, upper=state_upper)\n # make the rate connection\n rate_var_abs_address = subsys.name+'.'+var\n if self.pathname:\n rate_var_abs_address = self.pathname + '.' + rate_var_abs_address\n rate_var_prom_address = self._var_abs2prom['output'][rate_var_abs_address]\n self.connect(rate_var_prom_address, 'ode_integ'+'.'+var)\n if state_promotes:\n self.ode_integ._var_promotes['output'].append(state_name)\n self.ode_integ._var_promotes['output'].append(state_name+'_final')\n self.ode_integ._var_promotes['input'].append(state_name+'_initial')\n\nclass TrajectoryGroup(om.Group):\n def __init__(self, **kwargs):\n super(TrajectoryGroup, self).__init__(**kwargs)\n self._oc_phases_to_link = []\n\n def _configure(self):\n super(TrajectoryGroup, self)._configure()\n for linkage in self._oc_phases_to_link:\n self._link_phases(linkage[0], linkage[1], linkage[2])\n \n def _link_phases(self, phase1, phase2, states_to_skip=[]):\n # find all the states in each phase\n # if they appear in both phase1 and phase2, connect them\n # unless the state is in states_to_skip\n # if they do not appear in both, do nothing or maybe raise an error message\n # print a report of states linked\n phase1_states = phase1._oc_states_list\n phase2_states = phase2._oc_states_list\n self._setup_var_data()\n for state_tuple in phase1_states:\n if state_tuple[0] in [state_tuple_2[0] for state_tuple_2 in phase2_states]:\n \n phase1_abs_name = phase1.name + '.' + state_tuple[0]\n phase1_end_abs_name = phase1.name + '.' + state_tuple[2] # final \n phase2_start_abs_name = phase2.name + '.' + state_tuple[1] # initial\n if self.pathname:\n phase1_abs_name = self.pathname + '.' + phase1_abs_name\n phase1_end_abs_name = self.pathname + '.' + phase1_end_abs_name\n phase2_start_abs_name = self.pathname + '.' + phase2_start_abs_name\n \n phase1_prom_name = self._var_abs2prom['output'][phase1_abs_name]\n if phase1_prom_name.startswith(phase1.name): # only modify the text if it starts with the prefix\n state_prom_name = phase1_prom_name.replace(phase1.name+'.', \"\", 1)\n else:\n state_prom_name = phase1_prom_name\n phase1_end_prom_name = self._var_abs2prom['output'][phase1_end_abs_name]\n phase2_start_prom_name = self._var_abs2prom['input'][phase2_start_abs_name]\n if not (state_tuple[0] in states_to_skip):\n if not (state_prom_name in states_to_skip):\n self.connect(phase1_end_prom_name, phase2_start_prom_name)\n\n def link_phases(self, phase1, phase2, states_to_skip=[]):\n # need to cache this because the data we need isn't ready yet\n if not isinstance(phase1, PhaseGroup) or not isinstance(phase2, PhaseGroup):\n raise ValueError('link_phases phase arguments must be OpenConcept PhaseGroup objects')\n self._oc_phases_to_link.append((phase1, phase2, states_to_skip))","sub_path":"openconcept/analysis/trajectories.py","file_name":"trajectories.py","file_ext":"py","file_size_in_byte":9494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589779255","text":"# has-a 객체합성\n# 현실상황을 객체합성이라는 기법을 사용하여 해결한다.\n\nclass Gun:\n def __init__(self, kind):\n self.kind = kind\n\n def bang(self):\n print(\"빵야빵야\")\n\n\n# 객체합성\nclass Police():\n def __init__(self, gun_kind = ''):\n if gun_kind:\n self.gun = Gun(gun_kind)\n # Gun 클래스의 인스턴스객체를 생성하여 Police의 인스턴스 멤버로 할당함\n else:\n self.gun = None \n # gun이라는 인스턴스 멤버는 있지만 값은 없는 상태\n\n def get_gun(self, gun_kind):\n self.gun = Gun(gun_kind)\n\n def shoot(self):\n if self.gun:\n self.gun.bang()\n else:\n print(\"당신에게는 총이 없습니다.\")\n\nif __name__ == \"__main__\":\n p1 = Police('리볼버')\n print(p1.gun.kind)\n p1.shoot()\n\n p2 = Police()\n p2.shoot()\n p2.get_gun('기관총')\n p2.shoot()\n","sub_path":"OOP/has_a.py","file_name":"has_a.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134381546","text":"from selenium import webdriver\nimport time\nfrom Utilities import xlutils\nfrom Utilities.CustomLogger import Logsetup\nfrom selenium.webdriver.support.ui import Select\nimport re\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\nlogger = Logsetup.getlogdemoqa()\n\n\nOpenchrome = webdriver.Chrome()\nOpenchrome.get(\"https://demoqa.com/\")\nOpenchrome.maximize_window()\n\nOpenchrome.find_element_by_xpath(\"//a[contains(text(),'Automation practice form')]\").click()\ntime.sleep(5)\nOpenchrome.find_element_by_id(\"continents\")\ncontinent =Openchrome.find_element_by_id(\"continents\")\nselect = Select(continent)\nselect.select_by_visible_text(\"South America\")\nOpenchrome.save_screenshot(\"C:\\\\Users\\Ramya\\\\PycharmProjects\\\\demoqa practice\\\\Screenshots\\dropdown1.png\")\ntime.sleep(3)\nselect.select_by_visible_text(\"Australia\")\nOpenchrome.save_screenshot(\"C:\\\\Users\\Ramya\\\\PycharmProjects\\\\demoqa practice\\\\Screenshots\\dropdown2.png\")\n\n# Method1 ------- with ActionChains-----------------\n\naction = ActionChains(Openchrome)\nAsia = Openchrome.find_element_by_xpath(\"//*[@id='continentsmultiple']/option[1]\")\nNorthAmerica = Openchrome.find_element_by_xpath(\"//*[@id='continentsmultiple']/option[6]\")\nAfrica = Openchrome.find_element_by_xpath(\"//*[@id='continentsmultiple']/option[3]\")\nActionChains(Openchrome).key_down(Keys.CONTROL).click(Asia).key_up(Keys.CONTROL).perform()\nActionChains(Openchrome).key_down(Keys.CONTROL).click(Africa).key_up(Keys.CONTROL).perform()\nActionChains(Openchrome).key_down(Keys.CONTROL).click(NorthAmerica).key_up(Keys.CONTROL).perform()\nOpenchrome.save_screenshot(\"C:\\\\Users\\Ramya\\\\PycharmProjects\\\\demoqa practice\\\\Screenshots\\mutipleselect.png\")\n\n\n","sub_path":"testcases/AutomationPracticeForm- dropdown & select.py","file_name":"AutomationPracticeForm- dropdown & select.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406194843","text":"from django.http import HttpResponse\n\nfrom nose.tools import eq_\nfrom test_utils import RequestFactory\n\nimport amo.tests\nfrom mkt.api.middleware import CORSMiddleware\n\n\nclass TestCORS(amo.tests.TestCase):\n\n def setUp(self):\n self.mware = CORSMiddleware()\n self.req = RequestFactory().get('/')\n\n def test_not_cors(self):\n res = self.mware.process_response(self.req, HttpResponse())\n assert not res.has_header('Access-Control-Allow-Methods')\n\n def test_cors(self):\n self.req.CORS = ['get']\n res = self.mware.process_response(self.req, HttpResponse())\n eq_(res['Access-Control-Allow-Origin'], '*')\n eq_(res['Access-Control-Allow-Methods'], 'GET, OPTIONS')\n\n def test_post(self):\n self.req.CORS = ['get', 'post']\n res = self.mware.process_response(self.req, HttpResponse())\n eq_(res['Access-Control-Allow-Methods'], 'GET, POST, OPTIONS')\n eq_(res['Access-Control-Allow-Headers'], 'Content-Type')\n","sub_path":"mkt/api/tests/test_middleware.py","file_name":"test_middleware.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225425120","text":"import sys,site\nsite.addsitedir('/home/abhishek/.virtualenvs/travelibibo/lib/python2.7/site-packages')\nROOT_DIR = '/home/abhishek/dev/gandalf/'\nsys.stdout = sys.stderr # sys.stdout access restricted by mod_wsgi\npath = ROOT_DIR # import pico from this dir\npath1 = ROOT_DIR + \"pico/\" # import pico from this dir\npath2 = ROOT_DIR + \"dashboard_engine/\" # the modules you want to be usable by Pico\nsys.path.insert(0, path)\nsys.path.insert(0, path1)\nsys.path.insert(0, path2)\n\nimport pico.server\n\n# Set the WSGI application handler\napplication = pico.server.wsgi_app\n","sub_path":"pico/pico.wsgi","file_name":"pico.wsgi","file_ext":"wsgi","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269486275","text":"from PyQt5.QtWidgets import QWidget, QPushButton, QVBoxLayout, QLabel, QLineEdit, QLabel, QVBoxLayout, QHBoxLayout, \\\n QFrame, QMainWindow, QTableWidget, QTableWidgetItem, QGridLayout, QHeaderView\n\n\nclass HomeScreen(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Dashboard\")\n self.resize(500, 400)\n widget = QWidget()\n widget.setStyleSheet(\"background:#000\")\n layout_horizontal = QHBoxLayout()\n menu_vertical_layout = QVBoxLayout()\n\n self.btn_home = QPushButton(\"Home\")\n self.btn_vehicles = QPushButton(\"Vehicles\")\n self.btn_users = QPushButton(\"Users\")\n self.btn_parking_history = QPushButton(\"Parking History\")\n\n menu_vertical_layout.setContentsMargins(0, 0, 0, 0)\n menu_vertical_layout.setSpacing(0)\n self.btn_home.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:blue;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_add.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_manage.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_history.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid white\")\n\n self.btn_home.clicked.connect(self.showHome)\n self.btn_vehicles.clicked.connect(self.showVehicles)\n self.btn_users.clicked.connect(self.showUsers)\n self.btn_parking_history.clicked.connect(self.showHistory)\n\n menu_frame = QFrame()\n menu_vertical_layout.addWidget(self.btn_home)\n menu_vertical_layout.addWidget(self.btn_vehicles)\n menu_vertical_layout.addWidget(self.btn_users)\n menu_vertical_layout.addWidget(self.btn_parking_history)\n menu_vertical_layout.addStretch()\n menu_frame.setLayout(menu_vertical_layout)\n\n parent_vertical = QVBoxLayout()\n parent_vertical.setContentsMargins(0, 0, 0, 0)\n self.vertical_1 = QVBoxLayout()\n self.addHomePageData()\n\n self.vertical_2 = QVBoxLayout()\n self.vertical_2.setContentsMargins(0, 0, 0, 0)\n self.addAddVehiclesPage()\n\n self.vertical_3 = QVBoxLayout()\n self.vertical_3.setContentsMargins(0, 0, 0, 0)\n self.addUsersPage()\n\n self.vertical_4 = QVBoxLayout()\n self.addParkingHistoryPage()\n\n self.frame_1 = QFrame()\n self.frame_1.setMinimumWidth(self.width())\n self.frame_1.setMaximumWidth(self.width())\n self.frame_1.setMaximumHeight(self.width())\n self.frame_1.setMaximumHeight(self.width())\n\n self.frame_1.setLayout(self.vertical_1)\n self.frame_2 = QFrame()\n self.frame_2.setLayout(self.vertical_2)\n self.frame_3 = QFrame()\n self.frame_3.setLayout(self.vertical_3)\n self.frame_4 = QFrame()\n self.frame_4.setLayout(self.vertical_4)\n\n parent_vertical.addWidget(self.frame_1)\n parent_vertical.addWidget(self.frame_2)\n parent_vertical.addWidget(self.frame_3)\n parent_vertical.addWidget(self.frame_4)\n\n layout_horizontal.addWidget(menu_frame)\n layout_horizontal.addLayout(parent_vertical)\n layout_horizontal.setContentsMargins(0, 0, 0, 0)\n parent_vertical.setContentsMargins(0, 0, 0, 0)\n parent_vertical.addStretch()\n # menu_vertical_layout.addStretch()\n layout_horizontal.addStretch()\n widget.setLayout(layout_horizontal)\n\n self.frame_1.show()\n self.frame_2.hide()\n self.frame_3.hide()\n self.frame_4.hide()\n\n self.setCentralWidget(widget)\n\n def showHome(self):\n self.btn_home.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:blue;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_vehicles.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_users.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_parking_history.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid white\")\n\n self.frame_1.show()\n self.frame_2.hide()\n self.frame_3.hide()\n self.frame_4.hide()\n\n def showVehicles(self):\n self.btn_home.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid \"\n \"white\")\n self.btn_vehicles.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:blue;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_users.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid \"\n \"white\")\n self.btn_parking_history.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid \"\n \"white\")\n\n self.frame_1.hide()\n self.frame_2.show()\n self.frame_3.hide()\n self.frame_4.hide()\n\n def showUsers(self):\n self.btn_home.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid \"\n \"white\")\n self.btn_vehicles.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid \"\n \"white\")\n self.btn_users.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:blue;color:#fff;font-weight:bold;border:1px solid white\")\n self.btn_parking_history.setStyleSheet(\n \"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight:bold;border:1px solid \"\n \"white\")\n\n self.frame_1.hide()\n self.frame_2.hide()\n self.frame_3.show()\n self.frame_4.hide()\n\n def showHistory(self):\n self.btn_home.setStyleSheet(\"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font-weight\"\n \":bold;border:1px solid white\")\n self.btn_vehicles.setStyleSheet(\"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font\"\n \"-weight:bold;border:1px solid white\")\n self.btn_users.setStyleSheet(\"width:200px;height:160px;font-size:20px;background:orange;color:#fff;font\"\n \"-weight:bold;border:1px solid white\")\n self.btn_parking_history.setStyleSheet(\"width:200px;height:160px;font-size:20px;background:blue;color:#fff\"\n \";font-weight:bold;border:1px solid white\")\n\n self.frame_1.hide()\n self.frame_2.hide()\n self.frame_3.hide()\n self.frame_4.show()\n\n\n def addHomePageData(self):\n self.vertical_1.setContentsMargins(0, 0, 0, 0)\n button = QPushButton(\"Refresh\")\n\n button.clicked.connect(self.refreshHome)\n vertical_layout = QVBoxLayout()\n vertical_layout.setContentsMargins(0, 0, 0, 0)\n frame = QFrame()\n\n horizontal = QHBoxLayout()\n horizontal.setContentsMargins(0, 0, 0, 0)\n vertical_layout.addLayout(horizontal)\n\n alldata = self.dbOperation.getSlotSpace()\n self.gridLayout = QGridLayout()\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setHorizontalSpacing(0)\n self.gridLayout.setVerticalSpacing(0)\n vertical_layout.addWidget(button)\n vertical_layout.addLayout(self.gridLayout)\n\n row = 0\n i = 0\n for data in alldata:\n label = QPushButton(\"Slot \" + str(data[0]) + \" \\n \" + str(data[1]))\n\n if data[3] == 1:\n label.setStyleSheet(\n \"background-color:green;color:white;padding:5px;width:100px;height:100px;border:1px solid white;text-align:center;font-weight:bold\")\n else:\n label.setStyleSheet(\n \"background-color:red;color:white;padding:5px;width:100px;height:100px;border:1px solid white;text-align:center;font-weight:bold\")\n\n if i % 5 == 0:\n i = 0\n row = row + 1\n\n self.gridLayout.addWidget(label, row, i)\n i = i + 1\n\n frame.setLayout(vertical_layout)\n self.vertical_1.addWidget(frame)\n self.vertical_1.addStretch()\n\n def addVehiclesPage(self, error_label=None):\n layout = QVBoxLayout()\n frame = QFrame()\n\n number_plate_label = QLabel(\"Number Plate : \")\n number_plate_label.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px\")\n owner_name_label = QLabel(\"Owners Name : \")\n owner_name_label.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px\")\n owner_phone_label = QLabel(\"Owner's Phone Number : \")\n owner_phone_label.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px\")\n\n number_plate_input = QLineEdit()\n number_plate_input.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px\")\n owner_name_input = QLineEdit()\n owner_name_input.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px\")\n owner_phone_input = QLineEdit()\n owner_phone_input.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px\")\n\n button = QPushButton(\"Add Vehicle\")\n button.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px;background:green;border:1px solid white\")\n\n layout.addWidget(number_plate_label)\n layout.addWidget(number_plate_input)\n layout.addWidget(owner_name_label)\n layout.addWidget(owner_name_input)\n layout.addWidget(owner_phone_label)\n layout.addWidget(owner_phone_input)\n\n layout.addWidget(button)\n layout.addWidget(error_label)\n\n layout.setContentsMargins(0, 0, 0, 0)\n frame.setMinimumHeight(self.height())\n frame.setMinimumWidth(self.width())\n frame.setMaximumHeight(self.width())\n frame.setMaximumWidth(self.width())\n\n layout.addStretch()\n frame.setLayout(layout)\n button.clicked.connect(\n lambda: self.addVehicles(number_plate_input.text(), owner_name_input.text(), owner_phone_input.text()))\n self.vertical_2.addWidget(frame)\n\n\n def addUsersPage(self):\n data = self.dbOperation.getCurrentVehicle()\n self.table = QTableWidget()\n self.table.setStyleSheet(\"background:#fff\")\n self.table.resize(self.width(), self.height())\n self.table.setRowCount(len(data))\n self.table.setColumnCount(7)\n\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\n self.table.setHorizontalHeaderItem(0, QTableWidgetItem(\"ID\"))\n self.table.setHorizontalHeaderItem(1, QTableWidgetItem(\"OWNER NAME\"))\n self.table.setHorizontalHeaderItem(2, QTableWidgetItem(\"NUMBER PLATE\"))\n self.table.setHorizontalHeaderItem(3, QTableWidgetItem(\"OWNER PHONE NO.\"))\n self.table.setHorizontalHeaderItem(4, QTableWidgetItem(\"ENTRY TIME\"))\n self.table.setHorizontalHeaderItem(5, QTableWidgetItem(\"ACTION\"))\n\n loop = 0\n for smalldata in data:\n self.table.setItem(loop, 0, QTableWidgetItem(str(smalldata[0])))\n self.table.setItem(loop, 1, QTableWidgetItem(str(smalldata[1])))\n self.table.setItem(loop, 2, QTableWidgetItem(str(smalldata[6])))\n self.table.setItem(loop, 3, QTableWidgetItem(str(smalldata[2])))\n self.table.setItem(loop, 4, QTableWidgetItem(str(smalldata[7])))\n self.table.setItem(loop, 5, QTableWidgetItem(str(smalldata[3])))\n self.button_exit = QPushButton(\"Exit\")\n self.button_exit.setStyleSheet(\n \"color:#fff;padding:8px 0px;font-size:20px;background:green;border:1px solid white\")\n self.table.setCellWidget(loop, 6, self.button_exit)\n self.button_exit.clicked.connect(self.exitCall)\n loop = loop + 1\n\n frame = QFrame()\n layout = QVBoxLayout()\n button = QPushButton(\"Refresh\")\n button.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px;background:green;border:1px solid white\")\n button.clicked.connect(self.refreshManage)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n layout.addWidget(button)\n layout.addWidget(self.table)\n frame.setLayout(layout)\n frame.setContentsMargins(0, 0, 0, 0)\n frame.setMaximumWidth(self.width())\n frame.setMinimumWidth(self.width())\n frame.setMaximumHeight(self.height())\n frame.setMinimumHeight(self.height())\n self.vertical_3.addWidget(frame)\n self.vertical_3.addStretch()\n\n def refreshUsers(self):\n data=self.dbOperation.getCurrentVehicle()\n self.table.setRowCount(len(data))\n self.table.setColumnCount(7)\n loop=0\n for smalldata in data:\n self.table.setItem(loop,0,QTableWidgetItem(str(smalldata[0])))\n self.table.setItem(loop,1,QTableWidgetItem(str(smalldata[1])))\n self.table.setItem(loop,2,QTableWidgetItem(str(smalldata[6])))\n self.table.setItem(loop,3,QTableWidgetItem(str(smalldata[2])))\n self.table.setItem(loop,4,QTableWidgetItem(str(smalldata[7])))\n self.table.setItem(loop,5,QTableWidgetItem(str(smalldata[3])))\n self.button_exit=QPushButton(\"Exit\")\n self.table.setCellWidget(loop,6,self.button_exit)\n self.button_exit.clicked.connect(self.exitCall)\n loop=loop+1\n\n\n def refreshHistory(self):\n self.table1.clearContents()\n data=self.dbOperation.getAllVehicle()\n loop=0\n self.table1.setRowCount(len(data))\n self.table1.setColumnCount(7)\n for smalldata in data:\n self.table1.setItem(loop,0,QTableWidgetItem(str(smalldata[0])))\n self.table1.setItem(loop,1,QTableWidgetItem(str(smalldata[1])))\n self.table1.setItem(loop,2,QTableWidgetItem(str(smalldata[6])))\n self.table1.setItem(loop,3,QTableWidgetItem(str(smalldata[2])))\n self.table1.setItem(loop,4,QTableWidgetItem(str(smalldata[7])))\n self.table1.setItem(loop,5,QTableWidgetItem(str(smalldata[3])))\n loop=loop+1\n\n\n def addHistoryPage(self):\n data=self.dbOperation.getAllVehicle()\n self.table1=QTableWidget()\n self.table1.resize(self.width(),self.height())\n self.table1.setRowCount(len(data))\n self.table1.setStyleSheet(\"background:#fff\")\n self.table1.setColumnCount(7)\n\n button=QPushButton(\"Refresh\")\n button.setStyleSheet(\"color:#fff;padding:8px 0px;font-size:20px;background:green;border:1px solid white\")\n button.clicked.connect(self.refreshHistory)\n\n\n self.table1.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\n self.table1.setHorizontalHeaderItem(0,QTableWidgetItem(\"ID\"))\n self.table1.setHorizontalHeaderItem(1,QTableWidgetItem(\"OWNER NAME\"))\n self.table1.setHorizontalHeaderItem(2,QTableWidgetItem(\"NUMBER PLATE\"))\n self.table1.setHorizontalHeaderItem(3,QTableWidgetItem(\"OWNER PHONE NO\"))\n self.table1.setHorizontalHeaderItem(4,QTableWidgetItem(\"ENTRY TIME\"))\n self.table1.setHorizontalHeaderItem(5,QTableWidgetItem(\"EXIT TIME\"))\n\n loop=0\n for smalldata in data:\n self.table1.setItem(loop,0,QTableWidgetItem(str(smalldata[0])))\n self.table1.setItem(loop,1,QTableWidgetItem(str(smalldata[1])))\n self.table1.setItem(loop,2,QTableWidgetItem(str(smalldata[6])))\n self.table1.setItem(loop,3,QTableWidgetItem(str(smalldata[2])))\n self.table1.setItem(loop,4,QTableWidgetItem(str(smalldata[7])))\n self.table1.setItem(loop,5,QTableWidgetItem(str(smalldata[3])))\n loop=loop+1\n\n self.frame5 = QFrame()\n self.layout1 = QVBoxLayout()\n self.layout1.setContentsMargins(0,0,0,0)\n self.layout1.setSpacing(0)\n self.layout1.addWidget(button)\n self.layout1.addWidget(self.table1)\n self.frame5.setLayout(self.layout1)\n self.frame5.setContentsMargins(0,0,0,0)\n self.frame5.setMaximumWidth(self.width())\n self.frame5.setMinimumWidth(self.width())\n self.frame5.setMaximumHeight(self.height())\n self.frame5.setMinimumHeight(self.height())\n self.vertical_4.addWidget(self.frame5)\n self.vertical_4.addStretch()\n\n\n def exitCall(self):\n button = self.sender()\n if button:\n row=self.table.indexAt(button.pos()).row()\n id =str(self.table.item(row,0).text())\n self.dbOperation.exitVehicle(id)\n self.table.removeRow(row)\n","sub_path":"HomeWindow.py","file_name":"HomeWindow.py","file_ext":"py","file_size_in_byte":17381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"178105466","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author:Wen\r\n\"\"\"\r\nfrom DataProcess.GetTrainData import LaneDataLoader\r\nfrom model.MainModel import Deeplabv3\r\nimport torch as t\r\nfrom torch import optim\r\nfrom tensorboardX import SummaryWriter\r\nfrom utils.PrepareSTN import PrepareSTN\r\nimport numpy as np\r\nimport torch.nn.functional as F\r\nloss = 0\r\nepoch = 3\r\ndevice = 3\r\nWriter = SummaryWriter(\"./run\"+str(device)+\"/exp\")\r\nDeeplabv3_Net = Deeplabv3(2).cuda(device)\r\noptimizer = optim.SGD(params = Deeplabv3_Net.parameters(),lr = 0.003,momentum = 0.9)\r\noptimizer.zero_grad()\r\ncrossentroy = t.nn.CrossEntropyLoss(weight=t.tensor([1,30]).float()).cuda(device)\r\nstep = 44550\r\nSavePath = \"/home/fuyongkun/pitch_estimation/model/modeldata\"+str(device)+\"/DeepLabv3.pth\"\r\nDeeplabv3_Net.load_state_dict(t.load(SavePath))\r\ndata , label = next(iter(LaneDataLoader))\r\nsize_data = data.size()\r\nK = np.matrix([\r\n [2304.5494/2.115 , 0 , 1686.2379/2.115],\r\n [0 , 2305.8757/2.1172, 1354.9849/2.1172],\r\n [0 , 0 , 1]\r\n])\r\nSTNPARAM = PrepareSTN(size_data,K,-10,1.72,0,-0.04)\r\nGrid = STNPARAM.MakeGrid().cuda(device)\r\nprint(\"All is ok \")\r\nfor i in range(epoch):\r\n print(\"当前epoch\", i)\r\n for data,label in LaneDataLoader:\r\n step = step + 1\r\n data = data.cuda(device)\r\n label = label.cuda(device)\r\n data = F.grid_sample(data,Grid,mode = 'bilinear')\r\n label = label.unsqueeze(1).float()\r\n label = F.grid_sample(label,Grid,mode = 'nearest')\r\n label = label.squeeze(1).long()\r\n output = Deeplabv3_Net(data)\r\n loss_step = crossentroy(output,label)\r\n loss = loss + loss_step.item()\r\n loss_step.backward()\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n if (step+1)%10 == 0 :\r\n print(\"loss is :\",loss,\"当前step:\",step+1)\r\n Writer.add_scalar(\"Loss\", loss, global_step = step)\r\n loss = 0\r\n if (step+1)%1000 == 0 :\r\n t.save(Deeplabv3_Net.state_dict(),SavePath)\r\n t.cuda.empty_cache()\r\nt.save(Deeplabv3_Net.state_dict(),SavePath)\r\nWriter.close()\r\nprint(\"Finished\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Train/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480436861","text":"from contextlib import contextmanager\nfrom os.path import basename\nfrom urllib.request import urlopen, Request\n\nimport click\n\n\ndef _secho(template=None, **skwargs):\n def func(text, *args, **kwargs):\n text = text.format(*args, **kwargs)\n if template:\n text = template.format(text)\n click.secho(text, **skwargs)\n return func\n\n\ntitle = _secho('>> {0}', fg='cyan', bold=True)\nsection = _secho('> {0}', bold=True)\ninfo = _secho()\nsuccess = _secho(fg='green')\nerror = _secho(fg='red', bold=True)\nwarning = _secho(fg='yellow')\n\n\n@contextmanager\ndef ok(text):\n try:\n click.secho('{0} ...... '.format(text), nl=False)\n yield\n except:\n error('ko')\n raise\n else:\n success('ok')\n\n\ndef unicodify(string):\n '''Ensure a string is unicode and serializable'''\n return string.decode('unicode_escape') if isinstance(string, bytes) else string\n\n\ndef extract_meta_from_headers(url):\n \"\"\"Given a `url`, perform a HEAD request and return metadata.\"\"\"\n req = Request(url, method='HEAD')\n req.add_header('Accept-Encoding', 'identity')\n response = urlopen(req)\n content_disposition = response.headers.get('Content-Disposition', '')\n if 'filename' in content_disposition:\n # Retrieve the filename and remove the last \".\n filename = content_disposition.split('filename=\"')[-1][:-1]\n else:\n filename = basename(url).strip()\n\n content_length = response.headers.get('Content-Length')\n if content_length:\n size = int(content_length)\n else:\n size = 1 # Fake for progress bar.\n\n return filename, size\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97680646","text":"'''\nInput: a List of integers where every int except one shows up twice\nReturns: an integer\n'''\n# def single_number(arr):\n# # get length of arr\n# n = len(arr)\n# # iterate of range of n\n# for i in range(0, n):\n# # create an index variable to compare with i\n# j = 0\n# # while j < length of arr\n# while j -1 :\n\t\t\treturn ['0']\n\t\telse:\n\t\t\tsys.exit(1)\n\nclass Job():\n\tdef __init__(self , script, key , name ,max_cycle ):\n\t\tself.script = script\n\t\tself.key = key\n\t\tself.name = name \n\t\tself.status = 'waiting'\n\t\tself.cputime_last = 0 \n\t\tself.max_cycle = max_cycle\n\t\tself.qsub_time = 0\n\t\tself.qhold_time = 0 \n\t\tself.qrls_time = 0\n\t\tself.io = []\n\t\tself.vmem = []\n\t\tself.maxvmem = 0 \n\t\tself.node = 'unknown'\n\t\tself.counter_qstat = 0 \n\tdef add_atribute(self, queue, max_cycle, resource):\n\t\tself.queue = queue\n\t\tself.max_cycle = max_cycle\n\t\tself.resource = resource\n\tdef qsub(self):\n\t\tcmd = 'cd {1} && qsub -cwd -S /bin/sh -q {0.queue} -l {0.resource} {0.script}'.format(self , os.path.abspath(os.path.dirname(self.script)))\n\t\t#print(cmd)\n\t\tqsub = \"\".join(popen(cmd))\n\t\tif qsub.startswith('Your job'):\n\t\t\tself.jobid = qsub.split()[2]\n\t\t\tself.status = 'qsub'\n\t\t\tself.time_qsub = time.time()\n\t\t\tself.qsub_time += 1 \n\t\telse:\n\t\t\tpass\n\tdef qsub_to_run(self):\n\t\tself.time_last = time.time()\n\t\tself.status = 'running'\n\n\tdef qhold(self):\n\t\tcmd = 'qhold {0.jobid}'.format(self)\n\t\tif os.system(cmd):\n\t\t\tpass\n\t\telse:\n\t\t\tself.status = 'hold'\n\t\t\tself.qhold_time = time.time()\n\tdef qrlease(self):\n\t\tcmd = 'qrls {0.jobid}'.format(self)\n\t\tif os.system(cmd):\n\t\t\tpass\n\t\telse:\n\t\t\tself.status = 'running'\n\t\t\tself.qrls_time = time.time()\n\tdef qdel(self):\n\t\tcmd = 'qdel {0.jobid}'.format(self)\n\t\tif os.system(cmd):\n\t\t\tpass\n\t\telse:\n\t\t\tself.status = 'break'\n\tdef qstat(self):\n\t\tcmd = 'qstat -j {0.jobid}'.format(self)\n\t\tfor i in popen(cmd):\n\t\t\tif i.startswith('usage'):\n\t\t\t\ttmp = i.rstrip().split(':' , 1 )[1]\n\t\t\t\ttmp = tmp.split(',')\n\t\t\t\tself.cputime_current = self.to_second(tmp[0].split('=')[1])\n\t\t\t\tself.io.append(tmp[2].split('=')[1])\n\t\t\t\tself.vmem.append( self.transfer(tmp[3].split('=')[1]) )\n\t\t\t\tself.maxmem = self.get_maxmem( self.transfer( tmp[4].split('=')[1] ))\n\t\n\tdef to_second(self, cputime):\n\t\ttmp = [ int(i) for i in cputime.split(':') ] \n\t\tif len(tmp) == 3 :\n\t\t\treturn tmp[0]*3600 + tmp[1]*60 + tmp[2]\n\t\telif len(tmp) == 4 :\n\t\t\treturn tmp[0]*3600*24 + tmp[1]*3600 + tmp[2]*60 + tmp[3]\n\n\tdef transfer(self , count):\n\t\tc_maxvmem = 0\n\t\tif count.endswith('G'):\n\t\t\tc_maxvmem = float(count.replace('G', '')) * 1e9\n\t\telif count.endswith('M'):\n\t\t\tc_maxvmem = float(count.replace('M', '')) * 1e6\n\t\telif count.endswith('K'):\n\t\t\tc_maxvmem = float(count.replace('K', '')) * 1e3\n\t\telif count == 'N/A':\n\t\t\tc_maxvmem = 0 \n\t\telse:\n\t\t\tc_maxvmem = float(count)\n\t\treturn c_maxvmem\n\n\tdef get_maxmem(self , current_maxvmem):\n\t\tif self.maxvmem < current_maxvmem:\n\t\t\tself.maxvmem = current_maxvmem\n\n\tdef slow_node(self):\n\t\tself.counter_qstat += 1 \n\t\tif self.counter_qstat > 1 and self.counter_qstat % 3 != 0 : \n\t\t\treturn False\n\t\tself.time_current = time.time()\n\t\tself.qstat()\n\t\thold_time = int(self.qrls_time - self.qhold_time)\n\t\tif self.status == 'running' :\n\t\t\tif self.time_last == '':\n\t\t\t\tself.time_last = self.time_current\n\t\t\t\tself.cputime_current = self.cputime_last \n\t\t\tif int(self.time_current - self.time_last) - hold_time > 3600 : \n\t\t\t\tif self.cputime_current - self.cputime_last - hold_time < 10 :\n\t\t\t\t\treturn True\n\t\t\t\telse : \n\t\t\t\t\tself.cputime_last = self.cputime_current \n\t\t\t\t\tself.time_last = self.time_current\n\t\telse:\n\t\t\treturn False\n\tdef __str__(self):\n\t\tdebug_log.info('{0.name}\\t{1}'.format(self , \"|\".join(self.io)))\n\t\t#print(self.vmem)\n\t\ttry:\n\t\t\tv_io = sum([float(i) for i in self.io]) / len(self.io)\n\t\t\tprint(self.io , io)\n\t\texcept :\n\t\t\tv_io = '--'\n\t\ttry:\n\t\t\tm_io = max([float(i) for i in self.io])\n\t\texcept:\n\t\t\tm_io = '--'\n\t\ttry:\n\t\t\tvmem = sum([i for i in self.vmem]) / len(self.vmem)/1e9\n\t\texcept :\n\t\t\tvmem = '--' \n\t\tself.maxvmem /= 1e9 \n\t\treturn \"{0.name}\\t{0.jobid}\\t{1}G\\t{0.maxvmem}G\\t{2}\\t{3}\\t{0.node}\\n\".format(self , str(vmem) , str(v_io) ,str(m_io))\n\ndef makedir(dir):\n\tif os.system('mkdir -p {0}'.format(dir)):\n\t\tsys.exit(\"cannot mkdir -p {0}\".format(dir))\n\ndef generate_split_shell(shell , line_interval , job_prefix, max_cycle):\n\tpat = re.compile(';\\s*;')\n\tqsub_dir = '{0}.{1}.qsub'.format(os.path.abspath(shell) , os.getpid())\n\tmakedir(qsub_dir)\n\tjob_count = 0 \n\tshell_out = '' \n\tjob_dict = {}\n\twith open(shell) as f_in:\n\t\tfor line_count , content in enumerate(f_in):\n\t\t\tcontent = content.lstrip().rstrip().replace('&' , ';')\n\t\t\tif line_count % line_interval == 0 :\n\t\t\t\tif not shell_out == '' :\n\t\t\t\t\tshell_out.write('echo This-Work-is-Completed!\\n')\n\t\t\t\t\tshell_out.close()\n\t\t\t\t\tdebug_log.info('{0} close'.format(script_file))\n\t\t\t\tjob_count += 1\n\t\t\t\tjob_count_str = '{0:0>4}'.format(job_count)\n\t\t\t\tscript_file = '{0}/{1}_{2}.sh'.format(qsub_dir , job_prefix, job_count_str) \n\t\t\t\tshell_out = open(script_file , 'w')\n\t\t\t\tdebug_log.info('{0} open'.format(script_file))\n\t\t\t\tjob_dict[job_count_str] = Job(script_file , job_count_str , '{0}_{1}'.format(job_prefix, job_count_str) , max_cycle)\n\t\t\tif not bool(content) : continue\n\t\t\tif content.startswith('#'):continue\n\t\t\tcontent = content.rstrip(';')\n\t\t\twhile(pat.search(content)):\n\t\t\t\tcontent = pat.sub(';',content)\n\t\t\tshell_out.write('{0} && '.format(content))\n\t\telse:\n\t\t\tshell_out.write('echo This-Work-is-Completed!\\n')\n\t\t\tshell_out.close()\n\treturn qsub_dir, job_dict\n\ndef modify_job_object( obj_dict , args):\n\tfor i in obj_dict:\n\t\ta_job = obj_dict[i]\n\t\t#a_job.memeroy = args.memeory\n\t\t#a_job.p =args.thread\n\t\ta_job.add_atribute(args.queue , args.max_cycle, args.resource)\n\ndef parse_log_file(infile):\n\tfinish_list = []\n\twith open(infile) as f_in:\n\t\tfor i in f_in:\n\t\t\ttmp=i.rstrip().split('\\t')\n\t\t\tif tmp[0].startswith(r'[Finished]:'):\n\t\t\t\tfinish_list += tmp[1:]\n\t\treturn set(finish_list)\n\ndef check_obj_status(obj_dict , script_file):\n\tfor a_file in glob.glob('{0}.*.log'.format(script_file)):\n\t\tfinish_job = parse_log_file(a_file)\n\t\t#print(finish_job)\n\t\tfor i in finish_job:\n\t\t\tobj_dict[i].status = 'done'\n\ndef is_dir_full(quota , dir):\n\tsize = \"\".join(popen('du -s {0}'.format(dir)))\n\tsize = int(size.split()[0])\n\tqq = 0\n\tif quota.endswith('G') : \n\t\tqq = int(quota.replace('G','')) * 1e9\n\telif quota.endswith('M') : \n\t\tqq = int(quota.replace('M','')) * 1e6\n\telif quota.endswith('K') : \n\t\tqq = int(quota.replace('K','')) * 1e3\n\telse:\n\t\tqq = quota\n\tif size >0.95*qq:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef check_die_node():\n\tglobal qhost_counter\n\tqhost_counter += 1 \n\tif qhost_counter > 1 and qhost_counter % 10 != 0 : return [] \n\tdie_node = []\n\tqhost = popen('qhost')\n\tfor i,j in enumerate(qhost):\n\t\tif i > 2 :\n\t\t\ttmp = j.split()\n\t\t\tnode_name , memory_use = tmp[0] ,tmp[5]\n\t\t\tfor cc in tmp[3:]:\n\t\t\t\tif cc.find(r'-') > -1 :\n\t\t\t\t\tdie_node.append(node_name)\n\treturn list(set(die_node))\n\ndef check_o_file(a_job):\n\tif os.path.isfile('{0.script}.o{0.jobid}'.format(a_job)):\n\t\twith open('{0.script}.o{0.jobid}'.format(a_job)) as f_in:\n\t\t\tcontent = \"\".join(f_in.readlines()).rstrip()\n\t\t\tif content.endswith('This-Work-is-Completed!'):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\telse:\n\t\treturn False\n\ndef check_running_job(all_job_list , bool_dir_full ):\n\tdie_node = check_die_node()\n\tqstat = popen('qstat')\n\trunning_stat = {}\n\tnode_dict = {}\n\tfor i,j in enumerate(qstat):\n\t\tif i > 1 :\n\t\t\ttmp = j.split()\n\t\t\trunning_stat[tmp[0]] = tmp[4]\n\t\t\tif tmp[4] == 'r' or tmp[4] == 'hr':\n\t\t\t\tnode = tmp[7].split(r'@')[1]\n\t\t\t\tnode = node.split(r'.')[0]\n\t\t\t\tnode_dict[tmp[0]] = node\n\t\n\t#finish_job_list = []\n\tcount_running_jobs = 0 \n\tfor a_job in all_job_list:\n\t\tif a_job.status == 'waiting' : continue\n\t\tif a_job.jobid in running_stat:\n\t\t\tif a_job.jobid in node_dict:\n\t\t\t\ta_job.node = node_dict[a_job.jobid]\n\t\t\t\tif a_job.node in die_node or a_job.slow_node(): \n\t\t\t\t\ta_job.qdel()\n\t\t\t\t\tcontinue\n\t\t\tif running_stat[a_job.jobid] == 'hqw' or running_stat[a_job.jobid] == 'hr' or running_stat[a_job.jobid] == 'ht':\n\t\t\t\tcount_running_jobs += 1 \n\t\t\telif running_stat[a_job.jobid] == 'qw' or running_stat[a_job.jobid] == 't':\n\t\t\t\tcount_running_jobs += 1 \n\t\t\telif running_stat[a_job.jobid] == 'r' :\n\t\t\t\tif a_job.status == 'qsub':\n\t\t\t\t\ta_job.qsub_to_run()\n\t\t\t\tcount_running_jobs += 1 \n\t\t\telse :\n\t\t\t\ta_job.qdel()\n\t\t\tif bool_dir_full :\n\t\t\t\tif not a_job.status == 'hold' :\n\t\t\t\t\ta_job.hold()\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif a_job.status == 'hold' :\n\t\t\t\t\ta_job.release()\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\telse:\n\t\t\tif check_o_file(a_job):\n\t\t\t\ta_job.status = 'finish'\n\t\t\telse:\n\t\t\t\tif a_job.status == 'failed':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\ta_job.status = 'break'\n\treturn count_running_jobs \n\ndef update_job_list(all ):\n\tunfinish = []\n\tfor i in all:\n\t\tdebug_log.info(i.status)\n\t\tif i.status in [ 'done' , 'finish' , 'failed']:\n\t\t\tpass\n\t\telse:\n\t\t\tunfinish.append(i)\n\treturn unfinish\n\ndef output_log(logfile , done_job_list , obj_dict ):\n\t#print(\"done in\" , [i.key for i in done_job_list])\n\tfinish_job_list = []\n\twith open(logfile , 'a') as f_out:\n\t\ttotal , finish = 0 ,0 \n\t\tfor i in obj_dict:\n\t\t\ttotal += 1 \n\t\t\tif obj_dict[i].status == 'done' or obj_dict[i].status == 'finish':\n\t\t\t\tfinish += 1\n\t\t\t\tfinish_job_list.append(obj_dict[i])\n\t\t\n\t\tif finish > len(done_job_list):\n\t\t\tf_out.write('[Process]:\\t{0}/{1} finished\\n'.format(finish , total))\n\t\t\tf_out.write('[Finished]:\\t{0}\\n'.format(\"\\t\".join([i.key for i in finish_job_list])))\n\t\t\tdone_job_list = finish_job_list\n\t#print(\"done out \" , [i.key for i in done_job_list])\n\treturn done_job_list\n\ndef update_parameters(logfile):\n\tquota , maxjob = '',''\n\twith open(logfile) as f_in:\n\t\tfor i in f_in:\n\t\t\ttmp = i.split()\n\t\t\tif i.startswith('DISK_QUOTA'):\n\t\t\t\tquota = tmp[1]\n\t\t\telif i.startswith('Max_Jobs'):\n\t\t\t\tmaxjob = int(tmp[1])\n\treturn quota , maxjob\n\ndef guard_objs(obj_dict , args ,logfile):\n\tbool_dir_full = False \n\tall_job_list = []\n\tif args.nodu:\n\t\tbool_dir_full = is_dir_full(args.quota , args.analysis_dir )\n\tfor i in sorted(obj_dict):\n\t\tif not obj_dict[i].status == 'done':\n\t\t\tall_job_list.append(obj_dict[i])\n\tdone_job_list = []\n\twhile(all_job_list):\n\t\tfor a_job in all_job_list:\n\t\t\tquota , maxjob = update_parameters(logfile)\n\t\t\t#print(quota , maxjob)\n\t\t\tif args.nodu:\n\t\t\t\tbool_dir_full = is_dir_full(quota , args.analysis_dir )\n\t\t\twhile bool_dir_full:\n\t\t\t\tcount_running_job , finish_job_list = check_running_job(all_job_list , bool_dir_full )\n\t\t\t\tdone_job_list = output_log(logfile , done_job_list , obj_dict)\n\t\t\t\ttime.sleep(args.interval)\n\t\t\t\tquota , maxjob = update_parameters(logfile)\n\t\t\t\tif args.nodu:\n\t\t\t\t\tbool_dir_full = is_dir_full(quota , args.analysis_dir )\n\t\t\telse:\n\t\t\t\tdebug_log.info('{0.name} {0.status}'.format(a_job))\n\t\t\t\tcount_running_job = check_running_job(all_job_list , bool_dir_full )\n\t\t\t\tdone_job_list = output_log(logfile , done_job_list , obj_dict)\n\t\t\t\twhile count_running_job >= maxjob :\n\t\t\t\t\ttime.sleep(args.interval)\n\t\t\t\t\tprint('job number reach max , waiting for finish job : {0.name}'.format(a_job))\n\t\t\t\t\tquota , maxjob = update_parameters(logfile)\n\t\t\t\t\tif args.nodu:\n\t\t\t\t\t\tbool_dir_full = is_dir_full(quota , args.analysis_dir )\n\t\t\t\t\tdebug_log.info('{0.name} {0.status}'.format(a_job))\n\t\t\t\t\tcount_running_job = check_running_job(all_job_list , bool_dir_full )\n\t\t\t\t\tdone_job_list = output_log(logfile , done_job_list , obj_dict)\n\t\t\t\telse:\n\t\t\t\t\tif a_job.status == 'waiting':\n\t\t\t\t\t\tdebug_log.info('{0.name} {0.status}'.format(a_job))\n\t\t\t\t\t\ta_job.qsub()\n\t\t\t\t\t\tdebug_log.info('{0.name} {0.status}'.format(a_job))\n\t\t\t\t\tif a_job.status == 'break':\n\t\t\t\t\t\twith open(logfile, 'a') as f_out:\n\t\t\t\t\t\t\tf_out.write('{0.name} is not finish , it is break down , '.format(a_job))\n\t\t\t\t\t\t\tif not args.noreqsub:\n\t\t\t\t\t\t\t\tif a_job.qsub_time < a_job.max_cycle:\n\t\t\t\t\t\t\t\t\ta_job.qsub()\n\t\t\t\t\t\t\t\t\tf_out.write('reqsub it at the {0.qsub_time} time\\n'.format(a_job))\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\ta_job.status = 'failed'\n\t\t\t\t\t\t\t\t\tf_out.write('reqsub {0.qsub_time} time exceed max cycle, drop it\\n'.format(a_job))\n\t\t\t\t\t\t\t\tdebug_log.info('{0.name} {0.status}'.format(a_job))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf_out.write('pls reqsub it by yourself\\n')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('{0.name} job is running , waiting....'.format(a_job))\n\t\ttime.sleep(args.interval)\n\t\tdebug_log.info('##in {0.name} {0.status}'.format(a_job))\n\t\tcount_running_job = check_running_job(all_job_list , bool_dir_full)\n\t\tdebug_log.info('##in2 {0.name} {0.status}'.format(a_job))\n\t\tdone_job_list = output_log(logfile , done_job_list , obj_dict)\n\t\tdebug_log.info('##out {0.name} {0.status}'.format(a_job))\n\t\tif not args.noreqsub: \n\t\t\tall_job_list = update_job_list(all_job_list )\n\t\t\tdebug_log.info( \"job list : \" + '\\t'.join( [ i.name for i in all_job_list]))\n\t\telse:\n\t\t\twhile( not check_all_finish(all_job_list)):\n\t\t\t\ttime.sleep(args.interval)\n\t\t\t\tcount_running_job = check_running_job(all_job_list , bool_dir_full)\n\t\t\t\tdone_job_list = output_log(logfile , done_job_list , obj_dict)\n\t\t\tall_job_list = []\n\ndef check_all_finish(all):\n\tfor i in all:\n\t\t#print(i.status)\n\t\tif i.status in ['waiting' , 'qsub', 'running', 'hold']:\n\t\t\treturn False\n\telse:\n\t\treturn True\n\ndef output_finish_log(obj_dict , logfile):\n\twith open(logfile ,'a') as f_out:\n\t\tfailed_list = []\n\t\tfor i in obj_dict:\n\t\t\ta_job = obj_dict[i]\n\t\t\tif a_job.status == 'finish':\n\t\t\t\tf_out.write(a_job.__str__())\n\t\t\telif a_job.status == 'failed':\n\t\t\t\tfailed_list.append(a_job.name)\n\t\t\telse:\n\t\t\t\tdebug_log.info(\"finish {0.name} {0.status}\".format(a_job))\n\t\tif len(failed_list) > 0:\n\t\t\tf_out.write('{0} is not finish\\n'.format(\"\\t\".join(failed_list)))\n\t\t\tsys.exit(1)\n\t\telse:\n\t\t\tf_out.write('All jobs finished!')\n\t\t\tsys.exit(0)\n\ndef main():\n\tparser=argparse.ArgumentParser(description=__doc__,\n\t\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\t\tepilog='author:\\t{0}\\nmail:\\t{1}'.format(__author__,__mail__))\n\tparser.add_argument(help='input file',dest='input')\n\t#parser.add_argument('-o','--output',help='output log file',dest='output',type=argparse.FileType('w'),required=True)\n\tparser.add_argument('-l','--lines', help='line number , default is [1] ',dest='line', type=int , default=1)\n\tparser.add_argument('-m','--maxjob',help='max job number , default is [4]', dest='maxjob', type=int ,default=4)\n\tparser.add_argument('-i','--interval',help='interval check time , default is [300]',dest='interval', type=int , default=300)\n\tparser.add_argument('-q','--queue',help='job queue , default is [sci.q]' , dest='queue', default='sci.q')\n\tparser.add_argument('-nr','--noreqsub',help='do not reqsub failed job ,default is reqsub', dest='noreqsub', action='store_true')\n\tparser.add_argument('-nc','--nocontinue',help='do not continue with unfinish log , default is continue',dest='nocontinue',action='store_false')\n\tparser.add_argument('-re','--resource',help='resouce list ,default is [ \"vf=1G -l p=1\" ] ', dest = 'resource',default=' vf=1G -l p=1 ')\n\tparser.add_argument('-prefix','--jobprefix', help='job prefix ,default is [work]',dest='prefix' ,default='work')\n\tparser.add_argument('-maxcycle','--maxcycle',help='max cycle , ,default is [5]', dest='max_cycle',default=5,type=int)\n\tparser.add_argument('-quota', '--quota', help='quota,default is [100000000000000000G]', dest='quota' , default='100000000000000000G')\n\tparser.add_argument('-analysis_dir', '--analysis_dir', help='analysis dir,default is [shell/../..]',dest='analysis_dir' )\n\tparser.add_argument('-nodu','--nodu',help='do not check disk,default is [du]',dest='nodu',action='store_false')\n\targs=parser.parse_args()\n\t\n\t#print(args.noreqsub)\n\t#sys.exit()\n\twork_dir = os.path.abspath(os.path.dirname(args.input))\n\tif args.analysis_dir == None:\n\t\targs.analysis_dir = '{0}/../../'.format(os.path.abspath(work_dir))\n\tglobal debug_log\n\tdebug_log = mylogger(args.input)\n\tlogfile = '{0}.{1}.log'.format(args.input , os.getpid())\n\twith open(logfile,'w') as f_out:\n\t\tf_out.write('DISK_QUOTA\\t{0}\\nMax_Jobs\\t{1}\\n'.format(args.quota, args.maxjob))\n\tshell_dir , obj_dict = generate_split_shell( args.input , args.line , args.prefix , args.max_cycle)\n\tdebug_log.info('generate shell done')\n\tmodify_job_object( obj_dict , args )\n\t#print( obj_dict )\n\tif args.nocontinue:\n\t\tcheck_obj_status(obj_dict , args.input)\n\tdebug_log.info('modify object status done')\n\n\tguard_objs(obj_dict , args , logfile)\n\toutput_finish_log(obj_dict , logfile)\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"bin/src/qsub_sge.py","file_name":"qsub_sge.py","file_ext":"py","file_size_in_byte":16902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"194336085","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 22 12:59:20 2016\n@author: Vijay Yevatkar\n\"\"\"\n\nimport pandas as pd\nimport nltk\n\ndef word_list(p):\n#p = \"C:/Users/u505123/Desktop/Final_Achuth.csv\"\n df = pd.read_csv(p)\n \n all_words2 = [[]]\n ind = 0\n for i in df[\"Phrase\"]:\n if df[\"Indicator\"].ix[ind]==\"Yes\": \n x=[]\n x = nltk.word_tokenize(i) \n \n if len(x):\n #print \"yo\",\n for word in x:\n if word in nltk.corpus.stopwords.words('english'): \n x.remove(word)\n all_words2.append(x)\n ind+=1\n return all_words2","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"393146166","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWave dispaly\n@author: Jiannan Hua\nlast edited: Oct.24 2018\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nimport numpy as np\nimport math\n\ndef fun(x,t):\n y = 2 * np.sin(0.5 * x - 10 * t) + 2 * np.sin(0.6 * x - 60 * t)\n return y\n\nclass WavaDisplay(object):\n def __init__(self, wave_fun):\n self.wave_fun = wave_fun\n\n def Spread(self, xmin, xmax, tmin, tmax, delta_x=0.02, delta_t=0.02):\n self.x = np.linspace(xmin, xmax, (xmax - xmin) / delta_x)\n self.t = np.linspace(tmin, tmax, (tmax - tmin) / delta_t)\n \n fig, ax = plt.subplots(nrows=1, ncols=1)\n \n plt.ion() #interactive mode on\n \n try:\n for t in self.t:\n self.y = self.wave_fun(self.x, t)\n plt.grid(True) #添加网格\n ax.plot(wd.x, wd.y, color ='k', ls ='-')\n plt.pause(delta_t)\n plt.cla()\n except Exception as err:\n print(err)\n \nwd = WavaDisplay(fun)\nwd.Spread(xmin=0, xmax=100, tmin=0, tmax=1)\n\n","sub_path":"wave.py","file_name":"wave.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"264057089","text":"#!/usr/bin/python3\n\"\"\"script that starts a Flask web application\"\"\"\nfrom models import storage\nfrom flask import Flask, escape, render_template\nfrom models.state import State\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef closedb(stor):\n \"\"\"Closes db session\"\"\"\n storage.close()\n\n\n@app.route('/cities_by_states', strict_slashes=False)\ndef states_template():\n \"\"\"function that route /states_list\"\"\"\n states = list(storage.all(State).values())\n states.sort(key=lambda state: state.name)\n return render_template('8-cities_by_states.html', states=states)\n\nif __name__ == '__main__':\n storage.reload()\n app.run(\"0.0.0.0\", 5000)\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124271393","text":"from experiments.constants import GRID_RESULTS_FILE\nfrom experiments.util import set_random_seed, load_checkpoint\nfrom experiments.real.bike_sharing.dataset import *\nfrom experiments.grid_search import grid_search, config_validation, get_grid_search_space\nfrom experiments.real.bike_sharing.grid_eval import plot_and_evaluate_model_gll, plot_dataset_and_net\nfrom experiments.grid_train import train_and_evaluate_gll\nfrom experiments.real.models import get_model\nfrom experiments.util import get_device\n\n\"\"\"Constants\"\"\"\nROOT_GLL = 'experiments/real/bike_sharing/tobit_based/reparam_fixed_std/gll'\nCHECKPOINT_GLL = 'gausian log likelihood model'\n\n\"\"\"Reproducible experiments\"\"\"\n\nset_random_seed()\n\n\"\"\"# PDF Log-Likelihood\"\"\"\n\nclass GausianLogLikelihoodLoss(t.nn.Module):\n\n def __init__(self, gamma):\n super(GausianLogLikelihoodLoss, self).__init__()\n self.gamma = gamma\n self.epsilon = t.tensor(1e-40, dtype = t.float32, requires_grad = False, device = get_device())\n\n def forward(self, y_pred: t.Tensor, y_true: t.Tensor):\n gamma = t.abs(self.gamma)\n return -t.sum(t.log(gamma + self.epsilon) - ((gamma * y_true - y_pred) ** 2) / 2)\n\n\"\"\"### Grid Search\"\"\"\n\ntrain_and_evaluate_net = train_and_evaluate_gll(ROOT_GLL + '/' + CHECKPOINT_GLL, GausianLogLikelihoodLoss,\n plot = False, log = False, model_fn = lambda: get_model(INPUT_SIZE))\n\n\"\"\"Train once with default settings\"\"\"\ndef train_once_gll():\n conf = {\n 'max_lr': 1e-4,\n 'epochs': 10,\n 'batch': 100,\n 'pct_start': 0.3,\n 'anneal_strategy': 'linear',\n 'base_momentum': 0.85,\n 'max_momentum': 0.95,\n 'div_factor': 5,\n 'final_div_factor': 1e4,\n 'weight_decay': 0\n }\n train_and_evaluate_net(dataset_train, dataset_val, bound_min, bound_max, conf)\n plot_and_evaluate_model_gll(bound_min, bound_max, test_df(df), dataset_val, dataset_test,\n ROOT_GLL, CHECKPOINT_GLL, GausianLogLikelihoodLoss, isGrid = False)\n\ndef grid_search_gll():\n grid_config = get_grid_search_space()\n grid_best = grid_search(ROOT_GLL, dataset_train, dataset_val, bound_min, bound_max,\n grid_config, train_and_evaluate_net, CHECKPOINT_GLL, conf_validation = config_validation)\n return grid_best\n\ndef eval_gll_reparam():\n plot_and_evaluate_model_gll(bound_min, bound_max, test_df(df), dataset_val, dataset_test,\n ROOT_GLL, CHECKPOINT_GLL, GausianLogLikelihoodLoss, isGrid = True)\n grid_results = t.load(ROOT_GLL + '/' + GRID_RESULTS_FILE)\n best_config = grid_results['best']\n best_metrics = grid_results[str(best_config)]\n print(best_config)\n print(best_metrics)\n\ndef plot_gll_reparam():\n checkpoint = load_checkpoint(f'{ROOT_GLL}/grid {CHECKPOINT_GLL}.tar')\n plot_dataset_and_net(checkpoint, get_model(INPUT_SIZE), test_df(df))","sub_path":"experiments/real/bike_sharing/tobit_based/reparam_fixed_std/reparam_gll_bike.py","file_name":"reparam_gll_bike.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350944758","text":"import random\n\nrandom_num = random.Random()\n\nnumber = random_num.randrange(1, 1000)\n\nguesses = 0\nmessage = \"\"\n\nwhile True:\n\tguess = int(input(message + \"\\nGuess my number between 1 and 1000: \"))\n\n\tguesses += 1\n\n\tif guess > number:\n\t\tmessage += str(guess) + \" is too high.\\n\"\n\telif guess < number:\n\t\tmessage += str(guess) + \" is too low.\\n\"\n\telse:\n\t\tbreak\n\ninput(\"\\n\\nGreat, you got it in \" + str(guesses)+ \" gusses!\\n\\n\")\n","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249769054","text":"import re\n\nx = 4\ny = 7\nt = \"True\"\nf = \"False\"\nr = re.compile('[aeiou]')\nr\n\nif x + y >= 11:\n print(t)\nelse:\n print(f)\n\na = 0\nb = 25\n\nwhile a < b:\n print(a)\n a += 1\n\nif a == b:\n print(\"Loop Finished\")\n\nanimals = [\"dogs\", \"cats\", \"gophers\", \"turkeys\", \"newts\", \"monkeys\"]\ndisease = \"Tuberculosis\"\n\nfor w in animals:\n print(w, \"are cute\")\n\nfor l in disease:\n check = r.match(l)\n if not check:\n print(l)\n\nnum1 = int(input(\"Give me a number\"))\nnum2 = int(input(\"give me another number\"))\n\nprint(\"Number 1 is\", num1)\nprint(\"Number 2 is\", num2)\nprint(\"Their sum is\", num1 + num2)\nif num1 > num2:\n print(\"Their difference is\", num1 - num2)\nelse:\n print(\"Their difference is\", num2 - num1)\n\nprint(\"Their product is\", num1 * num2)\nif num1 > num2:\n print(\"Their quotient is\", num1 / num2)\nelse:\n print(\"Their quotient is\", num2 / num1)\n\n\n\n\n","sub_path":"ComputationLoopsConditionals.py","file_name":"ComputationLoopsConditionals.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"51984229","text":"import pandas as pd\nimport numpy as np\nfrom threading import Thread, RLock\n\nimport time\n\nclass Count (Thread):\n\tdef __init__(self,chunk,values,col):\n\t\tThread.__init__(self)\n\t\tself.chunk = chunk\n\t\tself.values = values\n\t\tself.col = col\n\t\t\n\t\t\n\tdef run(self):\n\t\tself.values = self.values.append(self.chunk).drop_duplicates(self.col)\n\t\t\n\ndef count_values(file, col, nb_lignes, nb_threads):\n start_time = time.time()\n \n print(\"Lecture du fichier...\")\n\n ch_size = 100000\n\n chunks = pd.read_csv(file, iterator=True, chunksize=ch_size, usecols=[col])\n values = pd.read_csv(file, nrows=1, usecols=[col])\n\n current_chunk = 0\n chunk_nb = np.ceil(nb_lignes/ch_size)\n\n print(\"Début du compte du nombre de valeurs différentes\")\n \n threads = []\n current_thread = 0;\n \n for chunk in chunks:\n \tthreads.append(Count(chunk,values,col))\n \tthreads[current_thread].start()\n \t\t\n \tcurrent_chunk += 1;\n \tcurrent_thread = (current_thread + 1);\n \tprint(\"thread {} {} %\".format(current_thread,np.round(current_chunk*100/chunk_nb, 2)))\n \n \n\n print(\"Temps d'exécution : {} seconds\".format(np.round(time.time()-start_time,2)))\n print(\"Nombre de valeurs : {}\".format(len(values)))\n return len(values),values\n\n","sub_path":"expedia/script/include/count_items_threads.py","file_name":"count_items_threads.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216280491","text":"'''\ninput\n5\nweird\noutput\nwerd\ninput\n4\nword\noutput\nword\ninput\n5\naaeaa\noutput\na\n'''\nnol = int(input())\nans = []\nins = input()\nfor i in ins:\n if len(ans)> 0 and ans[-1] in \"aeiouy\" and i in \"aeiouy\":continue\n else:\n ans.append(i)\nprint(\"\".join(ans))\n\n\n","sub_path":"CodeForces/vowels.py","file_name":"vowels.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"2855232","text":"#PROGRAM TO FACE DETECTION USING OPEN CV IN A PICTURE\r\n\r\nimport cv2\r\n\r\n# Load the cascade\r\n\r\nfcas = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\r\n\r\n# Read the input image\r\n\r\npic = cv2.imread(\"test.jpg\")\r\n\r\n# Convert into grayscale\r\n\r\ngray_img = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)\r\n\r\n# Detect faces\r\n\r\nfaces = fcas.detectMultiScale(gray_img, scaleFactor=1.05, minNeighbors=5)\r\n\r\n# Draw rectangle around the faces\r\n\r\nfor (x, y, w, h) in faces:\r\n cv2.rectangle(pic, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n\r\n# Display the output\r\n\r\ncv2.imshow('img', pic)\r\ncv2.waitKey()\r\n","sub_path":"detectioncode.py","file_name":"detectioncode.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540927729","text":"# _*_ coding:utf-8 _*_\r\nimport numpy as np\r\n\r\ndef HilbertMatrix(n):\r\n H=np.mat(np.zeros((n,n)))\r\n for i in range(n):\r\n for j in range(n):\r\n H[i,j]=1/(i+j+1)\r\n return H\r\ndef cond(A):\r\n def RowSumNorm(A):\r\n A=np.abs(A) \r\n B=A.sum(axis=1)\r\n return np.max(B)\r\n return RowSumNorm(A)*RowSumNorm(A.I)\r\nfor i in range(2,7):\r\n print('%d阶希尔伯特矩阵的条件数为%d'%(i,cond(HilbertMatrix(i))))\r\ndef b(n):\r\n x=np.mat([1 for i in range(n)])\r\n return x*HilbertMatrix(n)\r\n\r\ndef LUSolve(A,b):\r\n A=A.astype(float)\r\n b=b.astype(float)\r\n b=b.T\r\n n=len(A)\r\n p=[i for i in range(n)] #排列阵\r\n for i in range(n-1):\r\n a=A[i,i]\r\n for j in range(i+1,n):\r\n if abs(a) < abs(A[j,i]):\r\n a=A[j,i]\r\n p[i]=j\r\n for k in range(n):\r\n a=A[p[i],k]\r\n A[p[i],k]=A[i,k]\r\n A[i,k]=a\r\n for j in range(i+1,n):\r\n A[j,i]=A[j,i]/A[i,i]\r\n for j in range(i+1,n):\r\n for k in range(i+1,n):\r\n A[j,k]=A[j,k]-A[j,i]*A[i,k]\r\n #LU分解完成\r\n for i in range(n-1):\r\n a=b[i]\r\n b[i]=b[p[i]]\r\n b[p[i]]=a\r\n for i in range (1,n):\r\n for k in range(i):\r\n b[i]=b[i]-A[i,k]*b[k]\r\n for i in range(n):\r\n b[n-1-i]=b[n-1-i]/A[n-1-i,n-1-i] \r\n for k in range(i):\r\n b[n-1-i]=b[n-1-i]-A[n-1-i,n-1-k]*b[n-1-k]/A[n-1-i,n-1-i] \r\n return b\r\n \r\n \r\nfor s in range(2,5):\r\n print('r_%d='%(s),b(s).T-HilbertMatrix(s)*LUSolve(HilbertMatrix(s), b(s)))\r\n print('Δx_%d='%(s),LUSolve(HilbertMatrix(s),b(s))-[[1] for i in range(s)])\r\nprint('n=3时x^bar就一位有效数字都没有了')","sub_path":"NumericalAnalysis/HilbertMatrix.py","file_name":"HilbertMatrix.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"237280509","text":"from django.shortcuts import render\r\nfrom appPro.models import Sucursal, Marca, Categoria, Producto\r\n#from gestorDeProducto.models import *\r\n\r\n# Create your views here.\r\ndef plantilla(request):\r\n\treturn render(request, 'plantillaBase.html', {} )\r\n\t\r\ndef registro(request):\r\n\tnombre = \"\"\r\n\t\r\n\tif request.method == \"POST\":\r\n\t\tnombre = request.POST[\"txtNombre\"]\r\n\t\t\r\n\r\n\tcontexto = {'nombre': nombre, 'valor1' : 135234, 'valor2': 6546}\r\n\treturn render(request, 'registro.html', contexto )\r\n\t\r\ndef productos(request):\r\n\treturn render(request, 'productos.html', {} )\r\n\t\r\n\t\r\ndef sucursal(request):\r\n\tmensaje = \"\"\r\n\tlista \t= {}\r\n\titem \t= {}\r\n\tif request.method == \"POST\": # verifica si existe una solicitud\r\n\t\t#validar los datos antes guardar\r\n\t\tid\t \t\t= int(\"0\" + request.POST[\"txtId\"])\r\n\t\tnombre \t\t= request.POST[\"txtNombre\"]\r\n\t\tdireccion \t= request.POST[\"txtDireccion\"]\r\n\t\ttelefono\t= request.POST[\"txtTelefono\"]\r\n\t\tencargado\t= request.POST[\"txtEncargado\"]\r\n\r\n\t\tif 'btnGrabar' in request.POST:\r\n\t\t\tif id < 1:\r\n\t\t\t\tSucursal.objects.create(nombre=nombre, direccion=direccion, telefono=telefono, encargado=encargado)\r\n\t\t\telse:\r\n\t\t\t\titem = Sucursal.objects.get(pk = id)\r\n\t\t\t\titem.nombre \t= nombre\r\n\t\t\t\titem.direccion \t= direccion\r\n\t\t\t\titem.telefono \t= telefono\r\n\t\t\t\titem.encargado \t= encargado\r\n\t\t\t\titem.save()\r\n\t\t\t\titem = {}\r\n\t\t\t\r\n\t\t\tmensaje = \"La operación realizada con éxito\"\r\n\t\telif 'btnListar' in request.POST:\r\n\t\t\t# lista = Sucursal.objects.all()\t# como filtrar los datos de un modelo (ORM)\t\r\n\t\t\tlista = Sucursal.objects.filter(nombre__contains = nombre)\r\n\t\telif 'btnBuscar' in request.POST:\r\n\t\t\titem = Sucursal.objects.get(pk = id)\r\n\t\telif 'btnEliminar' in request.POST:\r\n\t\t\titem = Sucursal.objects.get(pk = id)\r\n\t\t\titem.delete()\r\n\t\t\tmensaje = \"El registro \" + item.nombre +\" fue eliminado\"\t\t\t\r\n\t\t\titem = {}\r\n\r\n\tcontexto = {'mensaje' : mensaje, 'lista' : lista, 'item' : item}\r\n\treturn render(request, 'sucursal.html', contexto )\r\n\t\r\n\t\r\ndef producto(request):\r\n\tmensaje \t= \"\"\r\n\tlista \t\t= {}\r\n\titem \t\t= {}\r\n\tcmbMarca \t= Marca.objects.filter(activo = True)\r\n\tcmbCategoria= Categoria.objects.filter(activo = True)\r\n\terrores\t\t= {}\r\n\t\r\n\tif request.method == \"POST\": # verifica si existe una solicitud\r\n\t\t#validar los datos antes guardar\r\n\t\tid\t \t\t= int(\"0\" + request.POST[\"txtId\"])\r\n\t\tidMarca\t\t= int(\"0\" + request.POST[\"cmbMarca\"])\r\n\t\tidCategoria\t= int(\"0\" + request.POST[\"cmbCategoria\"])\r\n\t\tcodigo \t\t= int(\"0\" + request.POST[\"txtCodigo\"])\r\n\t\tdescripcion = request.POST[\"txtDescripcion\"]\r\n\t\tstock \t\t= int(\"0\" + request.POST[\"txtStock\"])\r\n\t\tprecioCosto = int(\"0\" + request.POST[\"txtPrecioCosto\"])\r\n\t\tprecioVenta = int(\"0\" + request.POST[\"txtPrecioVenta\"])\r\n\r\n\t\tif 'btnGrabar' in request.POST:\r\n\t\t\tmarca \t\t= buscarPorId(Marca, idMarca)\r\n\t\t\tcategoria \t= buscarPorId(Categoria, idCategoria)\t\r\n\t\t\t\r\n\t\t\tif marca is None:\r\n\t\t\t\terrores['cmbMarca'] = \"No seleccionó la marca\"\r\n\t\t\telif categoria is None:\r\n\t\t\t\terrores['cmbCategoria'] = \"No seleccionó la categoria\"\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tif id < 1:\t\t\r\n\t\t\t\t\tProducto.objects.create(marca=marca, categoria=categoria, codigo=codigo,descripcion=descripcion,stock=stock,precioCosto=precioCosto,precioVenta=precioVenta)\r\n\t\t\t\t\tmensaje = \"El producto fue guardado con éxito\"\r\n\t\t\t\telse:\r\n\t\t\t\t\titem \t\t\t= Producto.objects.get(pk = id)\r\n\t\t\t\t\titem.marca \t\t= marca\r\n\t\t\t\t\titem.codigo \t= codigo\r\n\t\t\t\t\titem.descripcion= descripcion\r\n\t\t\t\t\titem.stock \t\t= stock\r\n\t\t\t\t\titem.precioCosto= precioCosto\r\n\t\t\t\t\titem.precioVenta= precioVenta\r\n\t\t\t\t\titem.save()\r\n\t\t\t\t\titem = {}\t\t\t\r\n\t\t\t\t\tmensaje = \"La operación realizada con éxito\"\r\n\t\telif 'btnListar' in request.POST:\r\n\t\t\t# lista = Sucursal.objects.all()\t# como filtrar los datos de un modelo (ORM)\t\r\n\t\t\tlista = Producto.objects.filter(descripcion__contains = descripcion)\r\n\t\telif 'btnBuscar' in request.POST:\r\n\t\t\titem = Producto.objects.get(pk = id)\r\n\t\telif 'btnEliminar' in request.POST:\r\n\t\t\titem = Producto.objects.get(pk = id)\r\n\t\t\titem.delete()\r\n\t\t\tmensaje = \"El registro \" + item.nombre +\" fue eliminado\"\t\t\t\r\n\t\t\titem = {}\r\n\r\n\tcontexto = {'mensaje' : mensaje, 'lista' : lista, 'item' : item, 'cmbMarca':cmbMarca, 'cmbCategoria':cmbCategoria, 'errores':errores}\r\n\treturn render(request, 'producto.html', contexto )\r\n\r\ndef buscarPorId(modelo, pk):\r\n\ttry:\r\n\t\tobjeto = modelo.objects.get(pk = pk)\r\n\texcept:\r\n\t\tobjeto = None\r\n\treturn objeto","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25527803","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport os\nimport datetime\nimport time\n\nSRC = \"gbkuni30.txt\"\nDST = \"gbkuni30_gen1.h\"\nARRAY = \"gbkuni30\"\n\nbuffer = [0]*65535 # 初始化65535个空列表\nmax_num = 0\n\ntry:\n f = open(SRC, 'r')\n while True:\n l = f.readline()\n if l == '':\n break;\n s = l.strip().split(':') #以:分割,生成不同个数的列表\n if len(s) == 2:\n x1 = int(s[0], 16) # 字符串转换为十六进制\n x2 = int(s[1], 16)\n buffer[x2] = x1 # 针对索引赋值\n if x2 > max_num:\n max_num = x2\n #print(\"%04x %04x\" % (x2, x1))\n print(\"max num %d %x len: %d\" % (max_num, max_num, len(buffer)))\nexcept:\n raise\n\nf = open(DST, \"w\")\n\ntest = \"/**********************************************************************************/\\n\"\ntest += \"/* GBK(GB18030) to UNICODE table, powered by Late Lee */\\n\"\ntest += \"/* http://www.latelee.org */\\n\"\ntest += \"/* %s */\\n\" % (datetime.datetime.now())\ntest += \"/* The source file comes from: */\\n\"\ntest += \"/* http://icu-project.org/repos/icu/data/trunk/charset/source/gb18030/gbkuni30.txt*/\\n\"\n\ntest += \"/**********************************************************************************/\\n\"\n\ntest += \"#ifndef __GBK2UNICODE__H\\n\"\ntest += \"#define __GBK2UNICODE__H\\n\\n\"\n\ntest += \"static unsigned short %s[] = \\n{\\n\" % (ARRAY)\n\nf.write(test) # write text to file\n####\ncnt=0\nfor i in range(0x8140, max_num+1):\n #print(\"%x -- 0x%x\" % (i, buffer[i]))\n ch = \"0x%04x, \" % (buffer[i])\n f.write(ch)\n cnt+=1;\n if cnt % 10 == 0:\n tmp = \" // line num %d \\n\" % (cnt / 10 - 1)\n f.write(tmp)\n\n\n########\ntest= \"\\n\"\ntest+= \"};\\n\\n\"\ntest+= \"#endif //__GBK2UNICODE__H\\n\"\n \nf.write(test) # write text to file\nf.close()\n","sub_path":"project/gbk/gen_gbk.py","file_name":"gen_gbk.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651780224","text":"import map\nimport csv\n\ntmp = csv.reader (open (\"iou.txt\", newline = ''), delimiter = ' ')\nindex = list (tmp)\ntmp = csv.reader (open (\"count.txt\", newline = ''), delimiter = ' ')\ncount = list (tmp)\n\ntotal_count = 0\nfor i in count:\n total_count += i\n\nap = map.meanAveragePrecision (index, total_count)\nprint (ap)","sub_path":"project/total_ap.py","file_name":"total_ap.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"255296002","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'Dirk Colbry'\nSITENAME = \"CyberAmbassadors\"\nSITEURL = 'https://colbrydi.github.io/cyberambassadors'\nPATH = 'content'\n\n#PLUGIN_PATHS=[\"./plugins\"]\n#PLUGINS=['ipynb.markup']\n#MARKUP = { 'md', 'ipynb'}\n\nTIMEZONE = 'America/Detroit'\n\n# Following items are often useful when publishing\nDISQUS_SITENAME = \"DirkColbry\"\nGOOGLE_ANALYTICS = \"UA-121560886-1\"\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nDISPLAY_HOME = True\n\n# Blogroll\n#LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\n#SOCIAL = (('You can add links in your config file', '#'),\n# ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n# Comment following line if you want the default theme\nTHEME = 'themes/tuxlite_cmse'\n\nDISPLAY_PAGES_ON_MENU = False\nDISPLAY_CATEGORIES_ON_MENU = True\n\n#INDEX_SAVE_AS = 'about.html'\n#PAGE_SAVE_AS = 'about2.html'\n#PAGE_URL = 'about2.html'\n\n# Provides menu items, which come before pages / categories\nMENUITEMS = [('Project Summary',SITEURL+'/pages/summary.html'), ('Time Line',SITEURL+'/pages/timeline.html'),('Affiliates',SITEURL+'/pages/affiliates.html'),('Contact',SITEURL+'/pages/contact.html')]\n","sub_path":"source/pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"527028748","text":"#!/bin/bash\n\n# Import my extensions\nimport sys\nsys.path.insert(0, '/home/sarah/Documents/Spirometry/python/extensions')\n\nimport csv\nimport matplotlib.pyplot as plt\nfrom sympy import *\nimport numpy as np\nfrom numpy import array\nfrom numpy.linalg import lstsq\nfrom breath_analysis import split_breaths\n\npath = './'\nfiles = [\n 'data_recording.csv',\n 'mask_example_4wRX_then_4woRX.csv',\n 'mask_example.csv',\n 'mask_example_Rx.csv'\n ]\n\nrelationship = [\n [-6.4, 1],\n [-6.4, 1],\n [-1.5, 1.25]\n ]\n\n# Create data classes\nspir_pressure = []\nmask_pressure = []\ntime = []\n\nfor filename in range(0, 2):\n # Store pressure and flow data\n fname = path + files[filename]\n with open(fname, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter = ',')\n\n # skip header line\n header = reader.next()\n\n for row in reader:\n spir_point = float(row[0])\n mask_point = float(row[1])\n time_point = float(row[2])\n\n # Convert spir_pressure to estimated mouth pressure\n # if(spir_point < 0):\n # spir_point = relationship[filename][0]*spir_point**2 + relationship[filename][1]*spir_point\n\n spir_pressure.append(spir_point)\n mask_pressure.append(mask_point)\n time.append(time_point)\n\n plt.plot(spir_pressure)\n plt.plot(mask_pressure)\n plt.show()\n\n # Have the data not analysis\n # Split breaths\n # Find starts, middles and ends of flow data\n flow_splits = split_breaths(spir_pressure, peak_height=0.015, Fs=300, plot=False)\n flow_starts = flow_splits[0]\n flow_middles = flow_splits[1]\n flow_stops = flow_splits[2]\n\n decays = [[] for i in range(len(flow_middles))]\n peaks = [0 for i in range(len(flow_middles))]\n decay_end = [0 for i in range(len(flow_middles))]\n decay_start = [0 for i in range(len(flow_middles))]\n\n def get_decay_rate(pressure, i, flow_starts, flow_middles, flow_stops):\n #Find peak expiratory pressure\n # get min value between middle and end\n peak = min(pressure[flow_middles[i]:flow_stops[i]])\n peak_index = pressure[flow_middles[i]:flow_stops[i]].index(peak) #index relative end insp (flow middle)\n peaks[i] = peak_index\n print(\"peak:{}\".format(peak))\n print(\"peak_index:{}\".format(peak_index))\n\n # Find first index of value 0 relative to peak\n # If there isn't one, find the length from peak to end point\n# try:\n# first_zero = pressure[flow_middles[i]+peak_index:flow_stops[i]].index(0)\n# except(ValueError):\n# first_zero = flow_stops[i] - (flow_middles[i] + peak_index) - 2\n\n #linear relationship\n decay = [0, 0]\n decay_start = -1\n decay_end = -1\n j = peak_index\n # k_indexes len of expiration\n num_datapoints = flow_stops[i] - flow_middles[i]\n while j < num_datapoints:\n if(decay_start < 0):\n if(pressure[flow_middles[i] + j] > -0.10):\n decay_start = j\n j = j + 4\n else:\n if(pressure[flow_middles[i] + j] >= -0.045):\n decay_end = j - 1\n j = num_datapoints\n j+=1\n\n print(\"decay_start:{}\".format(decay_start))\n print(\"decay_end:{}\".format(decay_end))\n\n# # Find half way point to zero or end, relative to peak\n# drop_percentage = 63\n# thresh = (peak*(100-drop_percentage))/100\n# halfway_point = 0\n# search_index = 0\n# while search_index < first_zero:\n# value = pressure[flow_middles[i]+peak_index+search_index]\n# if value > thresh:\n# halfway_point = search_index\n# search_index = first_zero\n# search_index += 1\n#\n# #halfway_point = (first_zero)/2\n# print(\"first_zero:{}\".format(first_zero+peak_index))\n# print(\"halfway_point:{}\".format(halfway_point+peak_index))\n#\n# # Define range to determine decay rate here\n# # Relative to flow_middle\n# #decay_start = halfway_point + peak_index\n# decay_start = peak_index\n# decay_end = first_zero + peak_index\n# decay_end = halfway_point + peak_index\n\n # Grab the curve of interest and make it all positive\n # (assume no positive section in exp range)\n curve = pressure[flow_middles[i]+decay_start:flow_middles[i]+decay_end]\n curve = [np.abs(c) for c in curve]\n\n # Fit curve to exponential model\n # ln(Y) = At + ln(b)\n # for y(t) = e^(At) + b\n if(len(curve) > 3):\n Fs = 90.0\n measurements = array([np.log(curve)])\n\n # set up array of data multiplied by constants\n one_array = [1 for val in range(len(measurements.T))]\n times = [j/Fs for j in range(len(measurements.T))]\n independents = array([one_array,times])\n\n # Least squares fit to data\n result = lstsq(independents.T, measurements.T)\n\n # Squared Euclidean 2-norm for each col in (b - a*x)\n residual = result[1]\n print(' residual on line fit is {}'.format(residual[0]))\n\n # Parameters - offset and decay constant\n constants = result[0]\n constants[0] = np.exp(constants[0])\n print(' offset is {}, decay is {}'.format(constants[0], constants[1]))\n\n decay = constants\n\n # Optional plot range and decay for each breath\n if(1):\n # Remake curve from best fit\n # Length of curve is from decay_start to flow_stop point\n Fs = float(90)\n times = [x/Fs for x in range(flow_stops[i]-(flow_middles[i]+decay_start))]\n A = decay[0]\n k = decay[1]\n dec_curve = [A*exp(t*k) for t in times]\n dec_curve = [-c for c in dec_curve]\n\n #times = [flow_middles[i]+peaks[i]+t for t in times]\n plt.plot(range(decay_start,(flow_stops[i]-flow_middles[i])), dec_curve, '--k', linewidth=2)\n\n plt.plot(pressure[flow_middles[i]:flow_stops[i]], '-r')\n plt.plot(range(decay_start,decay_end), pressure[flow_middles[i]+decay_start:flow_middles[i]+decay_end], '-g', linewidth=2)\n plt.show()\n\n return (decay_start, decay_end, decay)\n\n\n\n # Fit decay to each expiration\n for i in range(len(flow_middles)):\n params = get_decay_rate(spir_pressure, i, flow_starts, flow_middles, flow_stops)\n decay_start[i] = params[0]\n decay_end[i] = params[1]\n decays[i] = params[2]\n\n #plot\n # Plot all data and show where breath middles & ends are located\n plt.plot(flow)\n dots = [flow[i] for i in flow_middles]\n plt.plot(flow_middles, dots, 'ro')\n dots = [flow[i] for i in flow_stops]\n plt.plot(flow_stops, dots, 'ko')\n plt.legend(['data', 'middles', 'ends'])\n plt.show()\n\n # Plot all data with decays of interest shown\n plt.plot(#pressure, 'b',\n flow, 'r',\n )\n\n # Define breaths of interest\n boi = [\n [0, 1, 2, 3, 4]\n ]\n\n # decays calculated from decay_start\n # decay start is relative to flow_middle\n for b in boi:\n print('\\n')\n for i in b:\n Fs = float(90)\n times = [x/Fs for x in range(flow_stops[i]-(flow_middles[i]+decay_start[i]))]\n A = decay[i][0][0]\n k = decay[i][1][0]\n print(\"decay = {}\".format(k))\n curve = [A*exp(t*k) for t in times]\n curve = [-c for c in curve]\n\n # Plot the decay overtop of the original data\n times = range(flow_middles[i]+decay_start[i], flow_stops[i])\n plt.plot(times, curve, '--k', linewidth=2)\n\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n #plt.xlim(0, 2900)\n plt.ylim(-1, 1)\n plt.grid()\n plt.legend([#\"Pressure\",\n \"Flow\",\n ])\n plt.xlabel(\"Data Point\", fontsize=32)\n plt.ylabel(\"Flow (L/s)\", fontsize=32)\n plt.show()\n","sub_path":"python/breath_hold/fit_decay_mask.py","file_name":"fit_decay_mask.py","file_ext":"py","file_size_in_byte":8179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"485788071","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_one_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/schedule-key-deletion.html\nif __name__ == '__main__':\n \"\"\"\n\tcancel-key-deletion : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/cancel-key-deletion.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # key-id : The unique identifier of the customer master key (CMK) to delete.\nSpecify the key ID or the Amazon Resource Name (ARN) of the CMK.\nFor example:\n\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab\nKey ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n\nTo get the key ID and key ARN for a CMK, use ListKeys or DescribeKey .\n \"\"\"\n add_option_dict = {}\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_one_parameter(\"kms\", \"schedule-key-deletion\", \"key-id\", add_option_dict)\n\n\n\n\n\n","sub_path":"kms_write_1/key-deletion_schedule.py","file_name":"key-deletion_schedule.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472584748","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\nimport csv\nimport asyncio\nfrom json import JSONDecodeError\nfrom typing import Optional, List, Callable, Pattern\n\nfrom .tsv_parser import get_tsv_files_path\nfrom .async_serial import AsyncSerial\n\n\nclass SerialValidator:\n\n def __init__(self, port, script_path):\n script: list = SerialValidator.parser_script(script_path=script_path)\n\n self.serial_instance: AsyncSerial = AsyncSerial(port)\n self.script_to_func_generator(script=script)\n\n @staticmethod\n def translate_parameter(parameter: list):\n translated = []\n\n for p in parameter:\n sub = None\n if p[0] == 'send':\n sub = [p[0], str.encode(p[1][1:-1].encode().decode('unicode_escape'))]\n elif p[0] == 'wait_for_str':\n sub = [p[0], str(p[1][1:-1].encode().decode('unicode_escape'))]\n elif p[0] == 'wait_for_regex':\n sub = [p[0], re.compile(r'{}'.format(p[1][2:-1]))]\n elif p[0] == 'wait_for_json':\n sub = [p[0], str(p[1][1:-1].encode().decode('unicode_escape'))]\n elif p[0] == 'wait_for_time':\n sub = [p[0], int(p[1])]\n else:\n pass\n\n # append Timeout if exist\n if len(p) >= 3:\n sub.append(int(p[2]))\n else:\n sub.append(180) # Time out default 3 minute\n\n translated.append(sub)\n\n return translated\n\n @staticmethod\n def parser_script(script_path: str = None) -> list:\n path = script_path if script_path else get_tsv_files_path()[0]\n\n with open(path) as tf:\n reader = list(csv.reader(tf, delimiter='\\t'))\n if reader[0][0] == 'ACTION' and reader[0][1] == 'CONTENT':\n return SerialValidator.translate_parameter(reader[1:])\n\n return None\n\n def script_to_func_generator(self, script: list):\n hooks = []\n\n for i in script:\n if i[0] == 'wait_for_str':\n hooks.append([self.wait_for_string, *i[1:]])\n\n elif i[0] == 'wait_for_regex':\n hooks.append([self.wait_for_regex, *i[1:]])\n\n elif i[0] == 'send':\n hooks.append([self.send_command, *i[1:]])\n\n elif i[0] == 'wait_for_json':\n hooks.append([self.wait_for_json, *i[1:]])\n\n elif i[0] == 'wait_for_time':\n hooks.append([self.wait_for_time, *i[1:]])\n\n self.serial_instance.hooks = hooks\n\n async def nothing(self):\n return False\n\n async def contains_regex(self, regex: Pattern):\n return SerialValidator.contains(text=self.serial_instance.load_buffer.output.getvalue(), regex=regex)\n\n async def contains_string(self, mesg: str):\n return mesg in self.serial_instance.load_buffer.output.getvalue()\n\n @staticmethod\n async def contains_json(text: str, scheme: dict, regex: Pattern=re.compile(r'^\\s*\\{.*\\}\\s*$')):\n for line in text.splitlines():\n if SerialValidator.contains(text=line, regex=regex):\n try:\n json_value = json.loads(line.strip())\n\n for key in scheme.keys():\n if scheme[key]['type'] == 'int':\n if type(json_value[key]) != int:\n return False\n elif scheme[key]['type'] == 'float':\n if type(json_value[key]) != float:\n return False\n elif scheme[key]['type'] == 'bool':\n if type(json_value[key]) != bool:\n return False\n elif scheme[key]['type'] == 'str':\n if type(json_value[key]) != str:\n return False\n if not SerialValidator.contains(text=json_value[key], regex=re.compile(scheme[key].get('regex', r'.*'))):\n return False\n\n return True\n except Exception as e:\n continue\n\n return False\n\n async def wait_for_time(self, *args):\n return await SerialValidator.wait_for(lambda: self.nothing(), n_retry=args[0])\n\n async def wait_for_regex(self, *args):\n return await SerialValidator.wait_for(lambda: self.contains_regex(re.compile(args[0])), n_retry=args[1])\n\n async def wait_for_string(self, *args):\n return await SerialValidator.wait_for(lambda: self.contains_string(args[0]), n_retry=args[1])\n\n async def wait_for_json(self, *args, regex: str=r'^\\s*\\{.*\\}\\s*$'):\n try:\n return await SerialValidator.wait_for(lambda: SerialValidator.contains_json(\n self.serial_instance.load_buffer.output.getvalue(),\n json.loads(args[0]),\n re.compile(regex)), n_retry=args[1])\n except JSONDecodeError:\n return False\n\n async def send_command(self, *args):\n await self.serial_instance.send_buffer.put(args[0])\n await self.serial_instance.send_buffer.join()\n\n @staticmethod\n def contains(text: str, regex: Pattern) -> bool:\n print(\"\\n======= text ======== \")\n print(text)\n print(\"======= end ========\")\n for line in text.splitlines():\n if bool(regex.search(line)):\n return True\n return False\n\n @staticmethod\n async def wait_for(predict: Callable, n_retry: int = -1, seconds: int = 1):\n result: bool = False\n\n while True:\n if n_retry == 0:\n break\n\n if await predict():\n print(\"get string !!!\")\n result = True\n break\n else:\n await asyncio.sleep(seconds)\n\n if n_retry > 0:\n n_retry -= 1\n\n return result\n\n async def validate(self):\n await self.serial_instance.console()\n\n\ndef main() -> int:\n serial_validator = SerialValidator(port='/dev/cu.usbserial')\n serial_console_future: asyncio.Future = asyncio.ensure_future(serial_validator.validate())\n\n ret: int = 0\n\n try:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(serial_console_future)\n loop.close()\n except Exception as e:\n print(e)\n ret = 1\n\n return ret\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"470650613","text":"\"\"\"Script for plotting DLA data; ie, for the column density function, etc.\nColumn density data courtesy of Yajima Hidenobu\"\"\"\n\nimport matplotlib.pyplot as plt\nimport os.path as path\nimport numpy as np\n\ndef column_density_data(datadir=\"data\"):\n \"\"\"Plot the data on the column density function at z=3\"\"\"\n# celine_data(datadir)\n# peroux_data(datadir)\n omeara_data(datadir)\n noterdaeme_12_data(datadir)\n# prochaska_data(datadir)\n# prochaska_05_data(datadir)\n prochaska_10_data(datadir)\n\ndef format_error_bars(data):\n \"\"\"Take a file formatted for SuperMongo and format it instead for matplotlib,\n adjusting the format of the error bars\n In-Format: log10(x,y,x-errorx,x+errorx, y-errorx, y+errory)\n Out-Format: x,y,lerrorx,uerrorx,lerrory,uerrory\n \"\"\"\n data = 10**data\n #Format the error bars the way matplotlib likes them\n data[:,2]=-data[:,2]+data[:,0]\n data[:,3]=data[:,3]-data[:,0]\n data[:,4]=-data[:,4]+data[:,1]\n data[:,5]=data[:,5]-data[:,1]\n return data\n\ndef celine_data(datadir=\"data\"):\n \"\"\"Plot the Peroux 2001 data on the column density function at z=3\"\"\"\n celine=np.loadtxt(path.join(datadir,\"fn_celine_z3.dat\"))\n celine=format_error_bars(celine)\n plt.errorbar(celine[:,0],celine[:,1],xerr=[celine[:,2],celine[:,3]], yerr=[celine[:,4],celine[:,5]], fmt='o')\n return\n\ndef peroux_data(datadir=\"data\"):\n \"\"\"Plot the Peroux 2005 data on the column density function at z=3\"\"\"\n peroux=np.loadtxt(path.join(datadir,\"peroux05_z3.dat\"))\n peroux=format_error_bars(peroux)\n plt.errorbar(peroux[:,0],peroux[:,1],xerr=[peroux[:,2],peroux[:,3]], yerr=[peroux[:,4],peroux[:,5]], fmt='*')\n\ndef omeara_data(datadir=\"data\"):\n \"\"\"Plot the O'Meara 07 data on the column density function (LLS). Mean redshift is 3.1\"\"\"\n omera=np.loadtxt(path.join(datadir,\"summary.dat\"))\n omera=format_error_bars(omera)\n #Take the first moment\n for i in (1,4,5):\n omera[:,i]*=omera[:,0]\n plt.errorbar(omera[:,0],omera[:,1],xerr=[omera[:,2],omera[:,3]], yerr=[omera[:,4],omera[:,5]], fmt='s',color='black',ms=8)\n\ndef noterdaeme_data(datadir=\"data\"):\n \"\"\"Plot the Noterdaeme 09 data on the column density function at z=2-3\n Format: x, y, xerr, yerr (in logspace)\"\"\"\n data=np.loadtxt(path.join(datadir,\"fhix.dat\"))\n #Madness to put log errors into non-log\n uxer=10**(data[:,2]+data[:,0])-10**data[:,0]\n lxer=-10**(-data[:,2]+data[:,0])+10**data[:,0]\n uyer=10**(data[:,3]+data[:,1])-10**data[:,1]\n lyer=-10**(-data[:,3]+data[:,1])+10**data[:,1]\n NHI = 10**data[:,0]\n plt.errorbar(NHI,10**data[:,1]*NHI,xerr=[lxer,uxer],yerr=[lyer*NHI,uyer*NHI], fmt='^',color='green',ms=10)\n\ndef noterdaeme_12_data(datadir=\"data\"):\n \"\"\"Plot the Noterdaeme 12 data (1210.1213) on the column density function at z=2-3.5\n Format: x, y, xerr, yerr (in logspace)\"\"\"\n data=np.loadtxt(path.join(datadir,\"not_2012.dat\"))\n #Madness to put log errors into non-log\n uxer=10**(data[:,2]+data[:,0])-10**data[:,0]\n lxer=-10**(-data[:,2]+data[:,0])+10**data[:,0]\n uyer=10**(data[:,3]+data[:,1])-10**data[:,1]\n lyer=-10**(-data[:,3]+data[:,1])+10**data[:,1]\n NHI = 10**data[:,0]\n plt.errorbar(NHI,10**data[:,1]*NHI,xerr=[lxer,uxer],yerr=[lyer*NHI,uyer*NHI], fmt='^',color='green',ms=10)\n\ndef prochaska_data(datadir=\"data\"):\n \"\"\"Plot the Prochaska and Wolfe 10 data on the column density function.\n Mean redshift is 3.05.\n Format: x lowerxerr upperxerr y\"\"\"\n data=np.loadtxt(path.join(datadir,\"2fn_sdss_dr5.dat\"))\n data=10**data\n plt.errorbar(data[:,0],data[:,3],xerr=[data[:,0]-data[:,1],data[:,2]-data[:,0]],fmt='.')\n\ndef prochaska_05_data(datadir=\"data\"):\n \"\"\"Plot the Prochaska 05 data on the column density function at z=3\"\"\"\n omera=np.loadtxt(path.join(datadir,\"prochaska_05.dat\"))\n omera=format_error_bars(omera)\n plt.errorbar(omera[:,0],omera[:,1],xerr=[omera[:,2],omera[:,3]], yerr=[omera[:,4],omera[:,5]], fmt='D')\n\ndef prochaska_10_data(datadir=\"data\"):\n \"\"\"Plot the LLS only data of Prochaska 2010, given as a box rather than the more conventional error bars.\n This is at z=3.7\"\"\"\n data=np.loadtxt(path.join(datadir,\"prochaska_lls.dat\"))\n ax=plt.gca()\n ax.fill(10.**data[:,0],10.**(data[:,1]+data[:,0]),'grey')\n\n\ndef dndx_pro(datadir=\"data\"):\n \"\"\"Plot the line densities for DLAs from Prochaska & Wolfe 2009, 0811.2003\"\"\"\n data = np.loadtxt(path.join(datadir,\"dndx.txt\"))\n zcen = (data[1:-1,0]+data[1:-1,1])/2.\n plt.errorbar(zcen, data[1:-1,2],xerr=[zcen-data[1:-1,0], data[1:-1,1]-zcen], yerr=data[1:-1,3], fmt=\"o\",color=\"orange\")\n\ndef omegahi_pro(datadir=\"data\"):\n \"\"\"Plot the total rho_HI density for DLAs from Prochaska & Wolfe 2009, 0811.2003\"\"\"\n data = np.loadtxt(path.join(datadir,\"dndx.txt\"))\n zcen = (data[1:-1,0]+data[1:-1,1])/2.\n rhohi = data[1:-1,4]\n #This is rho_crit at z=0\n rho_crit = 9.3125685124148235e-30\n #This converts from 1e8 M_sun/Mpc^3 to g/cm^3\n conv = 6.7699111782945424e-33\n #A factor of 0.76 from HI mass to gas mass\n #Note: this factor is 0.74, so that the Noterdaeme\n #Omega_DLA is numerically similar to the rho_HI of Prochaska\n omega_DLA = rhohi*conv/rho_crit*1000\n plt.errorbar(zcen, rhohi,xerr=[zcen-data[1:-1,0], data[1:-1,1]-zcen], yerr=data[1:-1,5], fmt=\"o\",color=\"orange\")\n\ndef omegahi_not():\n \"\"\"Omega_DLA from Noterdaeme 2012, 1210.1213\"\"\"\n #He divides these measurements by 0.76,\n #which he thinks gives him the neutral gas mass in DLAs, because all this hydrogen\n #is neutral. However, some of the hydrogen is molecular, so the factor is daft.\n omega_dla = np.array([0.99, 0.87, 1.04, 1.1, 1.27])*0.76\n omega_err = np.array([0.05,0.04, 0.05,0.08,0.13])\n zz = [2.15,2.45,2.75,3.05,3.35]\n plt.errorbar(zz, omega_dla,xerr=0.15, yerr=omega_err, fmt=\"s\",color=\"black\")\n\ndef dndx_not():\n \"\"\"dNdX from Noterdaeme 2012, 1210.1213\"\"\"\n #No error on dndz...use the systematic correction.\n dndz = np.array([0.2,0.2,0.25,0.29,0.36])\n #No error bars quoted in the paper for dndz (?)\n zz = [2.15,2.45,2.75,3.05,3.35]\n dzdx = np.array([3690/11625.,4509/14841.,2867/9900.,1620/5834.,789/2883.])\n plt.errorbar(zz,dndz*dzdx,xerr=0.15,fmt=\"s\",color=\"black\")\n\n# def prochaska_10_data():\n# \"\"\"Plot the six-power-law model of Prochaska 2010. A little too complicated.\"\"\"\n# NHI = np.logspace(14.5,23)\n# fN = np.array(NHI)\n# #Central power law parameters: break, exponent, norm\n# #Lya, pLLS, LLS, SLLS, DLA1, DLA2\n# #Note DLA is defined as k*(N/10^21.75)^b,\n# #but all the others are k * N^b...\n# breaks=np.array([14.5,17.3,19.,20.3,21.75])\n# exp = np.array([-1.5,-1.9,-0.8,-1.2,-1.8,-3])\n# norm = np.array([, ,10.**(-4.5) , 4.56e6,7e-25,7e-25])\n# #DLAs\n# ind = np.where(NHI > 10**breaks[3])\n#\n#\n\ndef absorption_distance():\n \"\"\"Compute X(z), the absorption distance per sightline (eq. 9 of Nagamine et al 2003)\n in dimensionless units.\"\"\"\n #h * 100 km/s/Mpc in h/s\n h100=3.2407789e-18\n # in cm/s\n light=2.9979e10\n #Internal gadget length unit: 1 kpc/h in cm/h\n UnitLength_in_cm=3.085678e21\n redshift = 3\n box = 25000\n #Units: h/s s/cm kpc/h cm/kpc\n return h100/light*(1+redshift)**2*box*UnitLength_in_cm\n\ndef altay_data():\n \"\"\"Plot the simulation cddf from Altay 2011: 1012.4014\"\"\"\n #His bins\n edges = 10**np.arange(17,22.51,0.1)\n #His data values\n cddf = np.array([858492, 747955, 658685, 582018, 518006, 468662, 431614, 406575, 387631, 374532, 359789, 350348, 342146, 334534, 329178, 324411, 320648, 318207, 316232, 314852, 314504, 314583, 313942, 315802, 316330, 316884, 317336, 317979, 316526, 317212, 314774, 309333, 302340, 291816, 275818, 254368, 228520, 198641, 167671, 135412, 103583, 76751, 54326, 37745, 25140, 16784, 10938, 6740, 3667, 1614, 637, 206, 33, 14, 7])\n center = np.array([(edges[i]+edges[i+1])/2. for i in range(0,np.size(edges)-1)])\n width = np.array([edges[i+1]-edges[i] for i in range(0,np.size(edges)-1)])\n #Grid size (in cm^2)\n dX=absorption_distance()\n tot_cells = 16384**2\n cddf=(cddf)/(width*dX*tot_cells)\n plt.loglog(center,cddf)\n\n\n","sub_path":"dla_data.py","file_name":"dla_data.py","file_ext":"py","file_size_in_byte":8202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"8511146","text":"from __future__ import unicode_literals\n\nfrom .serial import Serial\n\nimport csv\n\nSERIALIZER_SETTINGS = dict(\n lineterminator=str(''),\n delimiter=str('~'),\n doublequote=False,\n escapechar=str('\\\\'),\n quoting=csv.QUOTE_NONE)\n\ns = Serial(**SERIALIZER_SETTINGS)\n\n\ndef serialize_bookmark(x):\n x, backwards = x\n ss = s.serialize_values(x)\n direction = '<' if backwards else '>'\n return direction + ss\n\n\ndef unserialize_bookmark(x):\n if not x:\n return None, False\n\n direction = x[0]\n\n if direction not in ('>', '<'):\n raise ValueError\n\n backwards = direction == '<'\n cells = s.unserialize_values(x[1:])\n return cells, backwards\n\n\nclass Page(list):\n def scalar(self):\n return self.one()[0]\n\n def one(self):\n c = len(self)\n\n if c < 1:\n raise RuntimeError('tried to select one but zero rows returned')\n elif c > 1:\n raise RuntimeError('too many rows returned')\n else:\n return self[0]\n\n def keys(self):\n return self._keys\n\n\nclass Paging(object):\n \"\"\"\n Object with paging information. Most properties return a page marker. Prefix these properties with 'bookmark_' to get the serialized version of that page marker. Naming conventions are as follows:\n\n Ordering as returned by the query\n ---------------------------------\n\n - 0: the key used in the where clause\n - 1: the key of the first row returned\n - n: the key of the nth row returned\n - nplus1: the marker of the row returned beyond n\n - further: the direction continuing in this order\n\n\n Ordering once flipped if necessary (ie for backwards-facing pages)\n ------------------------------------------------------------------\n\n - next: the next page\n - previous: the previous page\n\n - current_forward: the marker\n - current_backward: the marker for this page going backwards\n - current: the marker as actually used\n - current_opposite: the marker for the same page in the opposite direction\n\n\n Tests\n -----\n\n - has_next: True if there's more rows after this page.\n - has_previous: True if there's more rows before this page.\n - has_further: True if there's more rows in the paging direction.\n\n \"\"\"\n\n def __init__(\n self,\n rows,\n per_page,\n ocols,\n backwards,\n current_marker,\n get_marker,\n keys=None):\n\n self._keys = keys\n\n self.original_rows = rows\n\n self.per_page = per_page\n self.backwards = backwards\n\n excess = rows[per_page:]\n rows = rows[:per_page]\n self.rows = rows\n\n self.marker_0 = current_marker\n\n if rows:\n self.marker_1 = get_marker(rows[0], ocols)\n self.marker_n = get_marker(rows[-1], ocols)\n else:\n self.marker_1 = None\n self.marker_n = None\n\n if excess:\n self.marker_nplus1 = get_marker(excess[0], ocols)\n else:\n self.marker_nplus1 = None\n\n four = [self.marker_0, self.marker_1, self.marker_n, self.marker_nplus1]\n\n if backwards:\n self.rows.reverse()\n four.reverse()\n\n self.before, self.first, self.last, self.beyond = four\n\n @property\n def has_next(self):\n return bool(self.beyond)\n\n @property\n def has_previous(self):\n return bool(self.before)\n\n @property\n def next(self):\n return (self.last or self.before), False\n\n @property\n def previous(self):\n return (self.first or self.beyond), True\n\n @property\n def current_forwards(self):\n return self.before, False\n\n @property\n def current_backwards(self):\n return self.beyond, True\n\n @property\n def current(self):\n if self.backwards:\n return self.current_backwards\n else:\n return self.current_forwards\n\n @property\n def current_opposite(self):\n if self.backwards:\n return self.current_forwards\n else:\n return self.current_backwards\n\n @property\n def further(self):\n if self.backwards:\n return self.previous\n else:\n return self.next\n\n @property\n def has_further(self):\n if self.backwards:\n return self.has_previous\n else:\n return self.has_next\n\n @property\n def is_full(self):\n return len(self.rows) == self.per_page\n\n def __getattr__(self, name):\n PREFIX = 'bookmark_'\n if name.startswith(PREFIX):\n _, attname = name.split(PREFIX, 1)\n x = getattr(self, attname)\n return serialize_bookmark(x)\n\n raise AttributeError\n","sub_path":"sqlakeyset/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475662962","text":"\"\"\"\nELE/MCE 456 - Found. of Robotics\nFinal Project: Quadcoptor Following Ground Robot\n\nGroup Number 2:\n\nAuthors:\nMatthew Morgan\nAustin Clark\nNataly Cruz\n\"\"\"\n\n### --- Libraries Used --- ###\nimport rospy\nfrom std_msgs.msg import Empty\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Pose\nfrom geometry_msgs.msg import Twist\n\nfrom sensor_msgs.msg import Image ## NEW\nfrom nav_msgs.msg import Odometry ## NEW\nfrom cv_bridge import CvBridge ## NEW\n\nimport cv2 ## opencv\nimport numpy as np ## NEW\nimport array\nimport math\nimport time\n\n### --- Global Veriables --- ###\ncmd_pub = rospy.Publisher(\"/drone/cmd_vel\", Twist, queue_size=1)\nerror_integral = 0.0\nerror_previous = 0.0\n\nrobot_x = 0.0 ## NEW\nrobot_y = 0.0 ## NEW \n\ndesired_z = 20\ndesired_y = 0.0\ndesired_x = 1.0\n\n### --- camera parameters --- ###\n\naf = 320.255 \nIx = 640 \nIy = 360\n\nIx0 = Ix/2\nIy0 = Iy/2\n\n### --- coordinates used for ground robot --- ###\n\nX = 0\nY = 0\n\ncX = 0\ncY = 0\n\n### --- aerial drone coordinates --- ###\n\ndrone_x = 0.0 \ndrone_y = 0.0 \ndrone_z = 0.0 \ndrone_yaw = 0.0\n\n# For Converting ROS Image to OpenCV Image\nbridge = CvBridge() \n\n# this is our callback function: it is executed any time a message on the specified topic\n# is received. In our case, the specified topic is /drone/gt_pose.\n# The message received will be available in the function through the variable msg \n\n### --- callback function --- ###\ndef poseCallback(msg):\n time.sleep(0.05) #slows continuous computation\n\n global drone_x; drone_x = msg.position.x \n global drone_y; drone_y = msg.position.y \n global drone_z; drone_z = msg.position.z\n global drone_yaw; drone_yaw = msg.orientation.z #.w or .z for yaw?\n \n # to use a global variable in a function, you must explicitly\n # redefine with the global attribute\n global cmd_pub \n global error_integral \n global error_previous \n\n\n global robot_x ## NEW\n global robot_y ## NEW\n\n global cX\n global cY\n global X\n global Y\n\n global desired_z\n global desired_y\n global desired_x\n\n ### ---- Frame manipulation math below ---- ###\n \n if (cX != 0.0 or cY != 0.0):\n print('--------------------------------------------')\n R_wq = np.array([[math.cos(drone_yaw), -(math.sin(drone_yaw)), 0.0],\n [math.sin(drone_yaw), math.cos(drone_yaw), 0.0],\n [0.0, 0.0, 1.0]]) \n\n #quadrotor in world frame \n print('')\n print(R_wq)\n print('R_wq')\n\n R_qc = np.array([[0.0, -0.98545, 0.16997],[-1.0, 0.0, 0.0],[0.0, -0.1699, -0.98545]])\n #R_qc = np.array([[-0.9855, 0.0, 0.1698],[0.0, 1.000, 0.0],[-0.1698, 0.0, -0.9855]]) \n #camera in quadrotor frame\n print('')\n print(R_qc)\n print('R_qc')\n\n R_wc = R_wq.dot(R_qc) \n print('')\n print(R_wc)\n print('R_wc')\n #camera in world frame\n\n P_wq = np.array([[drone_x], [drone_y], [drone_z]])\n print('')\n print(P_wq)\n print('P_wq')\n\n T = np.concatenate((R_wc,P_wq), axis = 1) \n #intermediate step to compute homogenous transform matrix\n print('')\n print(T)\n print('T')\n\n T1 = np.array([0.0, 0.0, 0.0, 1.0])\n #intermediate step to compute homogenous transform matrix\n print(' ')\n print(T1)\n print('T1')\n\n\n T_wc = np.vstack((T,T1)) #homogenous transform matrix\n print(' ')\n print(T_wc)\n print('T_wc')\n\n P_ct = np.array([[X],[Y],[20.0],[1.0]]) \n #Target array in camera frame\n print(' ')\n print(P_ct)\n print('P_ct')\n\n\n P_wt = T_wc.dot(P_ct)\n #Target array in world frame ******\n print(' ')\n print(P_wt)\n print('P_wt')\n ##Coordinates of robot determined by camera in world frame\n #robot_x = P_wt[1] \n #robot_y = P_wt[2]\n \n print('--------------------------------------------')\n \n print(\" target x pix \", cX, \" target y pix \", cY) ## open image coordinates\n\n print('')\n print(\" position x \", drone_x)\n print(\" position y \", drone_y)\n print(\" position z \", drone_z)\n print(\" yaw z \", drone_yaw)\n print('')\n\n\n # Issue position from current location of drone in frame\n # Pass parameters to PID controllers \n if (cX != 0.0 or cY != 0.0):\n desired_y = robot_y #Y\n desired_x = robot_x - 1 #X\n if cY > 100 and cY < 300 and desired_z >= 2:\n desired_z = msg.position.z #slow the drone\n desired_z = desired_z - 1\n else:\n desired_z = msg.position.z #slow the drone\n desired_z = desired_z + 0.5\n \n # z controller - up and down\n error_z = desired_z - msg.position.z\n linear_velocity_z = 1*error_z \n\n # y controller - side to side\n error_y = desired_y - msg.position.y\n linear_velocity_y = 0.2*error_y\n \n # x controller - forward and backward\n error_x = desired_x - msg.position.x\n linear_velocity_x = 0.2*error_x\n\n\n #cmd_pub can be issuing multiple velocity commands at once? \n cmdmsg = Twist()\n \n cmdmsg.linear.x = linear_velocity_x \n cmdmsg.linear.y = linear_velocity_y \n cmdmsg.linear.z = linear_velocity_z\n\n cmd_pub.publish(cmdmsg)\n\n\n\n### --- function for imaging data --- ###\n\ndef imageCallback(msg):\n \n global af\n global Ix0\n global Iy0 \n global bridge\n global cX\n global cY\n global X\n global Y\n\n\n cv_image = bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough') #Convert ROS Image to OpenCV Image\n\n\n lower = np.array([240,240,240], dtype = \"uint8\") ## Lower color bound (Off white)\n upper = np.array([255,255,255], dtype = \"uint8\") ## Upper color bound (White)\n\n gray_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) #returns grayscale image \n\n ### ------------------------------------------------- ###\n\n # convert the grayscale image to binary image\n ret,thresh = cv2.threshold(gray_image,230,255,0)\n\t# calculate moments of binary image\n M = cv2.moments(thresh)\n\n\t# calculate x,y coordinate of center of geometry (in pixels)\n if M[\"m00\"] != 0:\n\t cX = int(M[\"m10\"] / M[\"m00\"])\n\t cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n\t cX, cY = 0, 0\n\n X = ((cX-Ix0)*drone_z)/af\n Y = ((cY-Iy0)*drone_z)/af\n\n # put text and highlight the center\n cv2.circle(cv_image, (cX, cY), 5, (255, 255, 255), -1)\n cv2.putText(cv_image, \"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\n # display the image\n cv2.imshow(\"Image\", cv_image)\n cv2.waitKey(3)\n\n ### ---------different image outputs from Opencv--------- ###\n\n mask = cv2.inRange(cv_image, lower, upper) #returns binary mask that fall within bounds\n output = cv2.bitwise_and(cv_image, cv_image, mask = mask) #computes underlying binary representation of integers in input array\n\n cv2.imshow(\"Image window - Grayscale\", gray_image) #Create Window to show the image (grayscale)\n #cv2.imshow(\"Image window - Raw\", cv_image) #Create Window to show the image\n #cv2.imshow(\"Image window - OpenCV\", output) #Create Window to show the image\n cv2.waitKey(3) #Need to wait for window to load\n\n\n\n# Fucntion not used to give positional data to aerial drone\n# Use for reference def odomCallback(msg)\ndef odomCallback(msg):\n global robot_x; global robot_y\n robot_x = msg.pose.pose.position.x\n robot_y = msg.pose.pose.position.y\n\n\ndef my_first_controller():\n # In ROS, nodes are uniquely named. If two nodes with the same\n # name are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n pub = rospy.Publisher(\"/drone_0/cmd_vel\", Empty, queue_size=1)\n\n rospy.init_node('my_first_controller', anonymous=False)\n\n rospy.Subscriber(\"/drone/gt_pose\", Pose, poseCallback, queue_size=1)\n\n rospy.Subscriber(\"/drone/camera/image_raw\", Image, imageCallback, queue_size=1) ## NEW\n rospy.Subscriber(\"/my_robot/odom\", Odometry, odomCallback, queue_size=1) ## NEW\n\n emptymsg = Empty()\n pub.publish(emptymsg)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\n\n\n# this is the main function: the program starts here\nif __name__ == '__main__':\n my_first_controller()","sub_path":"scripts/Quad_Controller_Group2.py","file_name":"Quad_Controller_Group2.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342859180","text":"def conv(sd):\n sl=sd.items()\n for x in sl:\n print(x)\n \n s2=[]\n for x in sl:\n s2.append(x[0]+'='+x[1])\n \n s=';'.join(s2)\n return s\n\n\nsd={'laal': 'red', 'peela': 'yellow', 'neela': 'blue', 'kaala': 'black'}\n\nprint(conv(sd))\n\n#sd={'laal': 'red', 'peela': 'yellow', 'neela': 'blue', 'kaala': 'black'}\n#sl=[]\n#sl=sd.items()\n#for x in sl:\n# print(x)\n\n#s2=[]\n\n#for x in sl:\n# s2+=x\n \n#print(s2)\n#count=0\n#s=' '.join(s2)\n#s=s.replace(' ',';')\n#sx=list(s)\n\n#for i in sx:\n# if i==';':\n# if count%2!=0:\n# count+=1\n# continue\n# i=i.replace(';','=')\n# count+=1\n\n#s=''.join(sx)\n#print(s)\n","sub_path":"py3/dictconv.py","file_name":"dictconv.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97176523","text":"from torch import cuda\n\n# This file contains the configuration parameters which will be used throughout your experiments\nuse_cuda = cuda.is_available()\n\nstart_iteration = 0\nn_iterations = 120000\n\nsave_every_n_iters = 1000\n\nbatch_size = 8\n\nn_classes = 34\n\nmodel = 'CapsuleModel5' # 'CapsuleModel'\n\n# model_id = 1 # CapsuleModel4, 16 inital capsules, vote_dim_seg and init_capsule_dim = 16, vote_dim=32\n# model_id = 2 # CapsuleModel4, top_down_routing=True, vote_dim_seg and init_capsule_dim = 32, vote_dim=128\n# model_id = 3 # CapsuleModel4, concat\n# model_id = 4 # CapsuleModel4, add final two dimensions\n# model_id = 5 # CapsuleModel2, use_top_down_routing = True, no positional encoding\n# model_id = 6 # CapsuleModel2 vote_dim = 128, initial capsules = 16\n\n# model_id = 7 # CapsuleModel5, two_stage=False\nmodel_id = 8 # CapsuelModel5, two_stage=n_iter>20k\n\n# model_id = 9 # CapsuleModel4\n# model_id = 10 # CapsuleModel4, fgbg_weights are 10 (not 3)\n\n# model_id = 11 # CapsuleModel5, two_stage=True\n\nsave_dir = './SavedModels/Run%d/' % model_id\n\nlearning_rate = 1e-3\nweight_decay = 0.0\n\npoly_lr_scheduler = True\nuse_dropout = True\npositional_encoding = False\npositional_encoding_type = 'addition'\nn_init_capsules = 8\nmultiple_capsule_layers = False\nvote_dim = 128 #32\nlmda = 1\ninit_capsule_dim = 32 #16\nvote_dim_seg = 32 #16\nuse_top_down_routing = False\n\ndata_dir = './CityscapesData'\n\nnum_workers = 8\n\nseg_coef = 1.0\nregression_coef = 1.0\nclass_coef = 1.0\n\nh, w = 512, 1024\n\nuse_instance = True\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325003009","text":"import tensorflow as tf\n\n\ndef encoder(units, vocab_size, embedding_dim, max_utterance, max_sentence):\n \"\"\"\n SMN的Encoder,主要对utterances和responses进行基本的\n 嵌入编码,以及对response的Word级语义进行建模\n Args:\n units: GRU单元数\n vocab_size: embedding词汇量\n embedding_dim: embedding维度\n Returns:\n \"\"\"\n utterance_inputs = tf.keras.Input(shape=(max_utterance, max_sentence))\n response_inputs = tf.keras.Input(shape=(max_sentence,))\n\n embeddings = tf.keras.layers.Embedding(vocab_size, embedding_dim, name=\"encoder\")\n utterance_embeddings = embeddings(utterance_inputs)\n response_embeddings = embeddings(response_inputs)\n\n # 这里对response进行GRU的Word级关系建模,这里用正交矩阵初始化内核权重矩阵,用于输入的线性变换。\n response_outputs = tf.keras.layers.GRU(units=units, return_sequences=True,\n kernel_initializer='orthogonal')(response_embeddings)\n\n # 将utterances的第一维和第二维进行调整,方便后面进行utterance-response配对\n # utterance_embeddings = tf.transpose(utterance_embeddings, perm=[1, 0, 2, 3])\n # # 同样的,为了后面求相似度矩阵,这里将第二维和第三维度进行调整\n # response_embeddings = tf.transpose(response_embeddings, perm=[0, 2, 1])\n # response_outputs = tf.transpose(response_outputs, perm=[0, 2, 1])\n\n return tf.keras.Model(inputs=[utterance_inputs, response_inputs],\n outputs=[utterance_embeddings, response_embeddings, response_outputs])\n\n\ndef decoder(units, embedding_dim, max_utterance, max_sentence):\n utterance_inputs = tf.keras.Input(shape=(max_utterance, max_sentence, embedding_dim))\n response_inputs = tf.keras.Input(shape=(max_sentence, embedding_dim))\n response_gru = tf.keras.Input(shape=(max_sentence, units))\n a_matrix = tf.random.uniform(shape=(units, units), maxval=1, minval=-1)\n\n conv2d_layer = tf.keras.layers.Conv2D(filters=8, kernel_size=(3, 3), padding='valid',\n kernel_initializer='he_normal', activation='relu')\n max_polling2d_layer = tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=(3, 3), padding='valid')\n dense_layer = tf.keras.layers.Dense(50, activation='tanh', kernel_initializer='glorot_normal')\n\n # 这里需要做一些前提工作,因为我们要针对每个batch中的每个utterance进行运算,所\n # 以我们需要将batch中的utterance序列进行拆分,使得batch中的序列顺序一一匹配\n utterance_embeddings = tf.unstack(utterance_inputs, num=max_utterance, axis=1)\n matching_vectors = []\n for utterance_input in utterance_embeddings:\n # 求解第一个相似度矩阵,公式见论文\n matrix1 = tf.matmul(utterance_input, response_inputs, transpose_b=True)\n utterance_gru = tf.keras.layers.GRU(units, return_sequences=True,\n kernel_initializer='orthogonal')(utterance_input)\n matrix2 = tf.einsum(\"aij,jk->aik\", utterance_gru, a_matrix)\n # matrix2 = tf.matmul(utterance_gru, a_matrix)\n matrix2 = tf.matmul(matrix2, response_gru, transpose_b=True)\n matrix = tf.stack([matrix1, matrix2], axis=3)\n\n conv_outputs = conv2d_layer(matrix)\n pooling_outputs = max_polling2d_layer(conv_outputs)\n flatten_outputs = tf.keras.layers.Flatten()(pooling_outputs)\n\n matching_vector = dense_layer(flatten_outputs)\n matching_vectors.append(matching_vector)\n vector = tf.stack(matching_vectors, axis=1)\n _, outputs = tf.keras.layers.GRU(units, return_state=True,\n kernel_initializer='orthogonal')(vector)\n\n return tf.keras.Model(inputs=[utterance_inputs, response_inputs, response_gru], outputs=outputs)\n\n\ndef smn(units, vocab_size, embedding_dim, max_utterance, max_sentence):\n utterances = tf.keras.Input(shape=(None, None))\n responses = tf.keras.Input(shape=(None,))\n\n utterances_embeddings, responses_embeddings, responses_gru = \\\n encoder(units=units, vocab_size=vocab_size, embedding_dim=embedding_dim,\n max_utterance=max_utterance, max_sentence=max_sentence)(inputs=[utterances, responses])\n dec_outputs = decoder(units=units, embedding_dim=embedding_dim, max_utterance=max_utterance,\n max_sentence=max_sentence)(\n inputs=[utterances_embeddings, responses_embeddings, responses_gru])\n\n outputs = tf.keras.layers.Dense(2, kernel_initializer='glorot_normal')(dec_outputs)\n outputs = tf.nn.softmax(outputs)\n\n return tf.keras.Model(inputs=[utterances, responses], outputs=outputs)\n\n\nif __name__ == '__main__':\n SMN = smn(512, 1000, 256, 10, 50)\n","sub_path":"hlp/chat/free/model/smn.py","file_name":"smn.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88556782","text":"# coding: utf-8\n\nimport imageio\nfrom pathlib import Path\nimport shutil\nimport pytest\nfrom AxonDeepSeg.data_management.dataset_building import *\nfrom AxonDeepSeg.visualization.get_masks import *\nfrom AxonDeepSeg.ads_utils import download_data\n\nclass TestCore(object):\n def setup(self):\n # Get the directory where this current file is saved\n self.fullPath = Path(__file__).resolve().parent\n # Move up to the test directory, \"test/\"\n self.testPath = self.fullPath.parent\n\n _create_new_test_folder = (\n lambda s, t: self.testPath\n / '__test_files__'\n / s\n / t\n )\n\n self.rawPath = _create_new_test_folder('__test_patch_files__', 'raw')\n self.patchPath = _create_new_test_folder('__test_patch_files__', 'patched')\n self.datasetPath = _create_new_test_folder('__test_patch_files__', 'dataset')\n self.mixedPatchPath = _create_new_test_folder('__test_patch_files__', 'mixedPatched')\n self.mixedDatasetPath = _create_new_test_folder('__test_patch_files__', 'mixedDataset')\n\n self.rawPath16b = _create_new_test_folder('__test_16b_file__', 'raw')\n self.patchPath16b = _create_new_test_folder('__test_16b_file__', 'patched')\n\n self.downloaded_data = Path(\"./SEM_dataset\")\n self.data_split_path = Path(\"./SEM_split\")\n\n @classmethod\n def teardown_class(cls):\n # Get the directory where this current file is saved\n fullPath = Path(__file__).resolve().parent\n # Move up to the test directory, \"test/\"\n testPath = fullPath.parent\n\n _create_new_test_folder = (\n lambda s, t: testPath\n / '__test_files__'\n / s\n / t\n )\n\n patchPath = _create_new_test_folder('__test_patch_files__', 'patched')\n datasetPath = _create_new_test_folder('__test_patch_files__', 'dataset')\n mixedPatchPath = _create_new_test_folder('__test_patch_files__', 'mixedPatched')\n mixedDatasetPath = _create_new_test_folder('__test_patch_files__', 'mixedDataset')\n\n patchPath16b = _create_new_test_folder('__test_16b_file__', 'patched')\n\n downloaded_data = Path(\"./SEM_dataset\")\n data_split_path = Path(\"./SEM_split\")\n\n if patchPath.is_dir():\n shutil.rmtree(patchPath)\n \n if datasetPath.is_dir():\n shutil.rmtree(datasetPath)\n \n if mixedPatchPath.is_dir():\n shutil.rmtree(mixedPatchPath)\n \n if mixedDatasetPath.is_dir():\n shutil.rmtree(mixedDatasetPath)\n\n if patchPath16b.is_dir():\n shutil.rmtree(patchPath16b)\n\n if downloaded_data.is_dir():\n shutil.rmtree(downloaded_data)\n\n if data_split_path.is_dir():\n shutil.rmtree(data_split_path)\n\n # --------------raw_img_to_patches tests-------------- #\n @pytest.mark.unit\n def test_raw_img_to_patches_creates_expected_folders_and_files(self):\n if self.patchPath.is_dir():\n shutil.rmtree(self.patchPath)\n\n raw_img_to_patches(str(self.rawPath), str(self.patchPath))\n\n assert self.patchPath.is_dir()\n\n # These demo image and mask are split into 6 patches each\n path_to_data1 = self.patchPath / 'data1'\n assert(path_to_data1.is_dir())\n assert len([item for item in path_to_data1.iterdir()]) == 12\n\n # These demo image and mask are split into 12 patches each\n path_to_data2 = self.patchPath / 'data2'\n assert(path_to_data2.is_dir())\n assert len([item for item in path_to_data2.iterdir()]) == 24\n\n @pytest.mark.unit\n def test_raw_img_to_patches_doesnt_cutoff_16bit_files(self):\n if self.patchPath16b.is_dir():\n shutil.rmtree(self.patchPath16b)\n\n raw_img_to_patches(str(self.rawPath16b), str(self.patchPath16b), patch_size=512, resampling_resolution=0.005)\n\n img_folder_names = [im.name for im in self.patchPath16b.iterdir()]\n for img_folder in tqdm(img_folder_names):\n path_img_folder = self.patchPath16b / img_folder\n\n if path_img_folder.is_dir():\n # We go through every file in the image folder\n data_names = [d.name for d in path_img_folder.iterdir()]\n for data in data_names:\n # Skip the mask files\n if 'mask' not in data:\n print(data)\n img = imageio.imread(path_img_folder / data)\n img_bins = np.bincount(np.ndarray.flatten(img))\n \n # Assert that not more than 50% of the pixels are the minimum value\n assert img_bins[0]/sum(img_bins) < 0.5\n\n # Assert that not more than 50% of the pixels are the maximum value\n assert img_bins[-1]/sum(img_bins) < 0.5\n\n @pytest.mark.unit\n def test_raw_img_to_patches_creates_masks_with_expected_number_of_unique_values(self):\n if self.patchPath.is_dir():\n shutil.rmtree(self.patchPath)\n\n raw_img_to_patches(str(self.rawPath), str(self.patchPath))\n\n \n img_folder_names = [im.name for im in self.patchPath.iterdir()]\n for img_folder in tqdm(img_folder_names):\n path_img_folder = self.patchPath / img_folder\n if path_img_folder.is_dir():\n # We go through every file in the image folder\n data_names = [d.name for d in path_img_folder.iterdir()]\n for data in data_names:\n\n if 'mask' in data:\n mask = imageio.imread(path_img_folder / data)\n \n image_properties = get_image_unique_vals_properties(mask)\n\n assert image_properties['num_uniques'] == 3\n assert np.array_equal(image_properties['unique_values'], [0, 128, 255])\n\n # --------------patched_to_dataset tests-------------- #\n @pytest.mark.unit\n def test_patched_to_dataset_creates_expected_folders_and_files(self):\n if self.datasetPath.is_dir():\n shutil.rmtree(self.datasetPath)\n\n patched_to_dataset(str(self.patchPath), str(self.datasetPath), 'unique')\n\n assert self.datasetPath.is_dir()\n\n # Dataset folder merges all the patch folders generated\n assert len([item for item in self.datasetPath.iterdir()]) == 12+24\n\n @pytest.mark.unit\n def test_patched_to_dataset_fake_mixed_dataset_creates_expected_dir(self):\n # TEM images are too large to be included in repo (6+ megs), so simply\n # create fake duplicate dataset with SEM images.\n if self.mixedDatasetPath.is_dir():\n shutil.rmtree(self.mixedDatasetPath)\n\n raw_img_to_patches(str(self.rawPath), str(self.mixedPatchPath / 'SEM'))\n\n raw_img_to_patches(str(self.rawPath), str(self.mixedPatchPath / 'TEM'))\n\n patched_to_dataset(str(self.mixedPatchPath), str(self.mixedDatasetPath), 'mixed')\n\n assert self.mixedDatasetPath.is_dir()\n\n # Dataset folder merges all the patch folders generated\n assert len([item for item in self.mixedDatasetPath.iterdir()]) == (12+24)*2\n\n @pytest.mark.unit\n def test_split_data_outputs_expected_number_of_folders(self):\n url_example_data = \"https://osf.io/vrdpe/?action=download\" # URL of example data hosted on OSF\n file_data = \"SEM_dataset.zip\"\n\n if not download_data(url_example_data)==0:\n print('ERROR: Data was not succesfully downloaded and unzipped - please check your link and filename and try again.')\n else:\n print('Data downloaded and unzipped succesfully.')\n \n split_data(self.downloaded_data, self.data_split_path, seed=2019, split = [0.8, 0.2])\n\n train_dir = self.data_split_path / \"Train\"\n valid_dir = self.data_split_path / \"Validation\"\n\n # get sorted list of train/validation directories\n train_subdirs=sorted([x for x in train_dir.iterdir() if x.is_dir()])\n valid_subdirs=sorted([x for x in valid_dir.iterdir() if x.is_dir()])\n\n assert len(train_subdirs)==7\n assert len(valid_subdirs)==2\n\n @pytest.mark.unit\n def test_split_data_throws_error_for_existing_folder(self):\n url_example_data = \"https://osf.io/vrdpe/?action=download\" # URL of example data hosted on OSF\n file_data = \"SEM_dataset.zip\"\n\n if not download_data(url_example_data)==0:\n print('ERROR: Data was not succesfully downloaded and unzipped - please check your link and filename and try again.')\n else:\n print('Data downloaded and unzipped succesfully.')\n \n assert self.data_split_path.is_dir()\n with pytest.raises(IOError):\n split_data(self.downloaded_data, self.data_split_path, seed=2019, split = [0.8, 0.2])\n\n @pytest.mark.unit\n def test_split_data_works_with_override(self):\n url_example_data = \"https://osf.io/vrdpe/?action=download\" # URL of example data hosted on OSF\n file_data = \"SEM_dataset.zip\"\n\n if not download_data(url_example_data)==0:\n print('ERROR: Data was not succesfully downloaded and unzipped - please check your link and filename and try again.')\n else:\n print('Data downloaded and unzipped succesfully.')\n \n assert self.data_split_path.is_dir()\n split_data(self.downloaded_data, self.data_split_path, seed=2019, split = [0.8, 0.2], override=True)\n\n assert self.data_split_path.is_dir()\n","sub_path":"test/data_management/test_dataset_building.py","file_name":"test_dataset_building.py","file_ext":"py","file_size_in_byte":9585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395475064","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfig = plt.figure(figsize=(4,2))\nax = fig.add_subplot(111)\nx = np.linspace(-20,20, 1000)\nline_cosh, = ax.plot(x, x**3, alpha=0.2)\nline_quad, = ax.plot(x, x**2/2)\nax.set_xlabel('test')\nax.set_ylabel('test')\nplt.show()\n\n","sub_path":"apace/Test Figs.py","file_name":"Test Figs.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461732058","text":"import FWCore.ParameterSet.Config as cms\n\n# File: CaloMETOptcaloTowers.cff\n# Author: B. Scurlock\n# Date: 02.28.2008\n#\n# Form Missing ET from optimized calotowers. The HCALRecHit thresholds \n# are based on Feng Liu's optimization study.\n# IMPORTANT: this configuration assumes that RecHits are in the event\n# reconstruct CaloRecHits and create calotowers here\nfrom RecoJets.Configuration.CaloTowersES_cfi import *\nfrom RecoMET.METProducers.CaloTowersOpt_cfi import *\ncaloTowersForMET = cms.EDProducer(\"CaloTowerCandidateCreator\",\n src = cms.InputTag(\"towerMaker\"),\n minimumEt = cms.double(-1.0),\n minimumE = cms.double(-1.0)\n)\n\ncaloTowersOpt = cms.EDProducer(\"CaloTowerCandidateCreator\",\n src = cms.InputTag(\"calotoweroptmaker\"),\n minimumEt = cms.double(-1.0),\n minimumE = cms.double(-1.0)\n)\n\n# sequence caloTowersMETOptRec = { calotoweroptmaker, caloTowersOpt, caloTowersForMET }\ncaloTowersMETOptRec = cms.Sequence(calotoweroptmaker)\n\n","sub_path":"RecoMET/Configuration/python/CaloTowersOptForMET_cff.py","file_name":"CaloTowersOptForMET_cff.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"636979172","text":"\"\"\"\r\nAuthor: Jason Dusek, Peggy Walsh, Jeremy Hinz\r\nDate: 10/25/2020\r\nProject: sepia.py\r\n\"\"\"\r\n\r\nfrom images import Image\r\n\r\n\r\ndef sepia():\r\n picture = Image(\"smokey.gif\")\r\n\r\n for y in range(picture.getHeight()):\r\n for x in range(picture.getWidth()):\r\n (r,g,b) = picture.getPixel(x, y)\r\n\r\n newR = (0.393 * r + 0.769 * g + 0.189 * b)\r\n newG = (0.349 * r + 0.686 * g + 0.168 * b)\r\n newB = (0.272 * r + 0.534 * g + 0.131 * b)\r\n\r\n if newR > 255: newR = 255\r\n if newG > 255: newG = 255\r\n if newB > 255: newB = 255\r\n\r\n picture.setPixel(x,y,(newR,newG,newB))\r\n\r\n picture.draw()\r\n\r\n\r\ndef grayscale(image):\r\n \"\"\"Converts the argument image to grayscale.\"\"\"\r\n for y in range(image.getHeight()):\r\n for x in range(image.getWidth()):\r\n (r, g, b) = image.getPixel(x, y)\r\n r = int(r * 0.299)\r\n g = int(g * 0.587)\r\n b = int(b * 0.114)\r\n lum = r + g + b\r\n image.setPixel(x, y, (lum, lum, lum))\r\n\r\n\r\ndef main(filename = \"smokey.gif\"):\r\n image = Image(filename)\r\n print(\"Close the image window to continue. \")\r\n image.draw()\r\n grayscale(image)\r\n print(\"Close the image window to quit. \")\r\n image.draw()\r\n sepia()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"CH7_GroupProject.py","file_name":"CH7_GroupProject.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326427776","text":"from piggychallenge.extensions import db\nfrom collections import OrderedDict\nfrom flask import current_app\nimport datetime\nfrom piggychallenge.blueprints.user.models import users_challenges\nfrom flask_login import current_user\n\nclass Challenge(db.Model):\n\t__tablename__ = 'challenges'\n\n\tSTAGE = OrderedDict([\n\t\t('before','Before'),\n\t\t('during','During'),\n\t\t('after','After')\n\t])\n\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.String(255), nullable=False)\n\tis_private = db.Column(db.Boolean(), nullable=False, server_default='0')\n\tstage = db.Column(db.Enum(*STAGE, name='possible_stages', native_enum=False), nullable=False)\n\tis_active = db.Column(db.Boolean(), nullable=False, server_default='0')\n\tpassword = db.Column(db.String(128), nullable=True)\n\tstart_date = db.Column(db.Date, nullable=False)\n\tend_date = db.Column(db.Date, nullable=False)\n\t# end_date = db.Column(db.String(128), nullable=False)\n\t# one-to-many cascade delete: https://stackoverflow.com/q/23323947\n\tweights = db.relationship('Weight', cascade='all, delete-orphan')\n\n\n\tcreated_on = db.Column(db.Date, nullable=False)\n\tcreator_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n\tcreator = db.relationship('User', backref=db.backref('challenges_created', lazy='dynamic'))\n\n\tparticipants = db.relationship('User',\n\t\t\t\t\t\t\t\t secondary=users_challenges,\n\t\t\t\t\t\t\t\t # primaryjoin=(users_challenges.c.challenge_id == id),\n\t\t\t\t\t\t\t\t # secondaryjoin=(users_challenges.c.user_id == User.id),\n\t\t\t\t\t\t\t\t backref=db.backref('my_challenges', lazy='dynamic'))\n\n\n\t@classmethod\n\tdef user_made_challenge_name(cls,challenge_name):\n\t\treturn cls.query.filter(cls.name == challenge_name).filter(cls.creator_id == current_user.id).first()\n\n\n\t# https://stackoverflow.com/a/30177588\n\t# Author: Nick Janetakis\n\t@classmethod\n\tdef search(cls, query):\n\t\t\"\"\"\n\t\tSearch a resource by 1 or more fields.\n\n\t\t:param query: Search query\n\t\t:type query: str\n\t\t:return: SQLAlchemy filter\n\t\t\"\"\"\n\t\tif not query:\n\t\t\treturn ''\n\n\t\tsearch_query = cls.query.filter(cls.name.ilike('%{0}%'.format(query))).all()\n\t\treturn search_query\n\n\tdef get_challenge_days(self):\n\t\tif datetime.date.today() >= self.start_date:\n\t\t\tif datetime.date.today() < self.end_date:\n\t\t\t\tself.stage = 'during'\n\t\t\t\tday_count = (self.end_date - datetime.date.today()).days-1\n\t\t\t\tif day_count > 1:\n\t\t\t\t\tdays = 'Ends in {} days'.format(day_count)\n\t\t\t\telif day_count == 1:\n\t\t\t\t\tdays = 'Ends in 1 day'\n\t\t\t\telse:\n\t\t\t\t\tdays = 'Ends in less than a day'\n\t\t\t\tif not self.is_active:\n\t\t\t\t\tself.is_active = True\n\t\t\telse:\n\t\t\t\tself.stage = 'after'\n\t\t\t\tday_count = (datetime.date.today() - self.end_date).days\n\t\t\t\tif day_count > 1:\n\t\t\t\t\tdays = 'Ended {} days ago'.format(day_count)\n\t\t\t\telif day_count == 1:\n\t\t\t\t\tdays = 'Ended 1 day ago'\n\t\t\t\telse:\n\t\t\t\t\tdays = 'Ended less than a day ago'\n\t\t\t\tif self.is_active:\n\t\t\t\t\tself.is_active = False\n\t\t\tdb.session.commit() # changed the challenge stage\n\t\telse:\n\t\t\tday_count = (self.start_date - datetime.date.today()).days-1\n\t\t\tif day_count > 1:\n\t\t\t\tdays = 'Starts in {} days'.format(day_count)\n\t\t\telif day_count == 1:\n\t\t\t\tdays = 'Starts in a day'\n\t\t\telse:\n\t\t\t\tdays = 'Starts in less than a day'\n\t\t\tif self.is_active:\n\t\t\t\tself.is_active = False\n\t\treturn days\n\n\n\tdef num_participants(self):\n\t\treturn len(self.participants)\n\n\n\nclass Weight(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tvalue = db.Column(db.Integer, nullable=False)\n\tdate = db.Column(db.Date, nullable=False)\n\t# one-to-many cascade delete: https://stackoverflow.com/q/23323947\n\tuser_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))\n\tchallenge_id = db.Column(db.Integer, db.ForeignKey('challenges.id', ondelete='CASCADE'))\n\tis_first = db.Column(db.Boolean, nullable=False, server_default='0')\n","sub_path":"piggychallenge/blueprints/challenge/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"429197371","text":"# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests to assure the FeeSchedule Service.\n\nTest-Suite to ensure that the FeeSchedule Service is working as expected.\n\"\"\"\n\nfrom datetime import datetime\n\nfrom pay_api.models import FeeSchedule, Invoice, InvoiceSchema, Payment, PaymentAccount, PaymentLineItem\nfrom pay_api.services.payment_line_item import PaymentLineItem as PaymentLineService\nfrom pay_api.utils.enums import Status\n\n\ndef factory_payment_account(corp_number: str = 'CP0001234', corp_type_code='CP', payment_system_code='PAYBC'):\n \"\"\"Factory.\"\"\"\n return PaymentAccount(corp_number=corp_number, corp_type_code=corp_type_code,\n payment_system_code=payment_system_code)\n\n\ndef factory_payment(payment_system_code: str = 'PAYBC', payment_method_code='CC',\n payment_status_code=Status.DRAFT.value):\n \"\"\"Factory.\"\"\"\n return Payment(payment_system_code=payment_system_code, payment_method_code=payment_method_code,\n payment_status_code=payment_status_code, created_by='test', created_on=datetime.now())\n\n\ndef factory_invoice(payment_id: str, account_id: str):\n \"\"\"Factory.\"\"\"\n return Invoice(payment_id=payment_id,\n invoice_status_code=Status.DRAFT.value,\n account_id=account_id,\n total=0, created_by='test', created_on=datetime.now())\n\n\ndef factory_payment_line_item(invoice_id: str, fee_schedule_id: int, filing_fees: int = 10, total: int = 10,\n status='CREATED'):\n \"\"\"Factory.\"\"\"\n return PaymentLineItem(invoice_id=invoice_id,\n fee_schedule_id=fee_schedule_id,\n filing_fees=filing_fees,\n total=total,\n line_item_status_code=status)\n\n\ndef test_line_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_payment_account()\n payment = factory_payment()\n payment_account.save()\n payment.save()\n invoice = factory_invoice(payment.id, payment_account.id)\n invoice.save()\n fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')\n line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)\n line.save()\n line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id, status='CANCELLED')\n line.save()\n\n p = PaymentLineService.find_by_id(line.id)\n\n assert p is not None\n assert p.id is not None\n assert p.invoice_id is not None\n assert p.filing_fees is not None\n assert p.fee_schedule_id is not None\n assert p.processing_fees is None\n assert p.service_fees is None\n assert p.gst is None\n assert p.pst is None\n assert p.line_item_status_code is not None\n invoice = Invoice.find_by_id(invoice.id)\n schema = InvoiceSchema()\n d = schema.dump(invoice)\n assert d.get('id') == invoice.id\n assert len(d.get('line_items')) == 1\n\n\ndef test_line_invalid_lookup(session):\n \"\"\"Test Invalid lookup.\"\"\"\n p = PaymentLineService.find_by_id(999)\n\n assert p is not None\n assert p.id is None\n","sub_path":"pay-api/tests/unit/services/test_payment_line_item.py","file_name":"test_payment_line_item.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230180990","text":"class ChessBoard(object):\n\n def IsCheckmate(self):\n # check if 'whoseturn' is in checkmate\n # Call GetListOfValidMoves for each piece of current player\n # If there aren't any valid moves for any pieces, then return true\n\n if self.whoseturn == \"black\":\n myColor = 'b'\n enemyColor = 'w'\n else:\n myColor = 'w'\n enemyColor = 'b'\n\n myColorValidMoves = [];\n for row in range(8):\n for col in range(8):\n piece = self.squares[row][col]\n if myColor in piece:\n myColorValidMoves.extend(self.GetListOfValidMoves((row, col)))\n\n if len(myColorValidMoves) == 0:\n return True\n else:\n return False\n\n def IsInCheck(self):\n # check if 'whoseturn' is in check\n # scan through squares for all enemy pieces; if there IsLegalMove to whoseturn's king, then return True.\n if self.whoseturn == \"black\":\n myColor = 'b'\n enemyColor = 'w'\n else:\n myColor = 'w'\n enemyColor = 'b'\n\n kingTuple = (0, 0)\n # First, get current player's king location\n for row in range(8):\n for col in range(8):\n piece = self.squares[row][col]\n if 'K' in piece and myColor in piece:\n kingTuple = (row, col)\n\n # Check if any of enemy player's pieces has a legal move to current player's king\n for row in range(8):\n for col in range(8):\n piece = self.squares[row][col]\n if enemyColor in piece:\n # need to temporarily switch whoseturn due to the way IsLegalMove works!\n self.SwitchWhoseTurn()\n if self.IsLegalMove((row, col), kingTuple):\n self.SwitchWhoseTurn()\n return True\n self.SwitchWhoseTurn()\n\n return False\n\n def SwitchWhoseTurn(self):\n if self.whoseturn == \"black\":\n self.whoseturn = \"white\"\n else:\n self.whoseturn = \"black\"\n\n def DoesMovePutPlayerInCheck(self, fromTuple, toTuple):\n # makes a hypothetical move; returns True if it puts current player into check\n fromSquare_r = fromTuple[0]\n fromSquare_c = fromTuple[1]\n toSquare_r = toTuple[0]\n toSquare_c = toTuple[1]\n fromPiece = self.squares[fromSquare_r][fromSquare_c]\n toPiece = self.squares[toSquare_r][toSquare_c]\n\n self.squares[toSquare_r][toSquare_c] = fromPiece\n self.squares[fromSquare_r][fromSquare_c] = 'e'\n\n retval = self.IsInCheck()\n\n # undo temporary move\n self.squares[toSquare_r][toSquare_c] = toPiece\n self.squares[fromSquare_r][fromSquare_c] = fromPiece\n\n # print \"###Diag: DoesMovePutPlayerInCheck? \",fromTuple,\" to \",toTuple,\": \",retval\n\n return retval\n\n def IsClearPath(self, fromTuple, toTuple):\n # Return true if there is nothing in a straight line between fromTuple and toTuple, non-inclusive\n # Direction could be +/- vertical, +/- horizontal, +/- diagonal\n fromSquare_r = fromTuple[0]\n fromSquare_c = fromTuple[1]\n toSquare_r = toTuple[0]\n toSquare_c = toTuple[1]\n fromPiece = self.squares[fromSquare_r][fromSquare_c]\n\n if abs(fromSquare_r - toSquare_r) <= 1 and abs(fromSquare_c - toSquare_c) <= 1:\n # The base case: just one square apart\n return True\n else:\n if toSquare_r > fromSquare_r and toSquare_c == fromSquare_c:\n # vertical +\n newTuple = (fromSquare_r + 1, fromSquare_c)\n\n elif toSquare_r < fromSquare_r and toSquare_c == fromSquare_c:\n # vertical -\n newTuple = (fromSquare_r - 1, fromSquare_c)\n\n elif toSquare_r == fromSquare_r and toSquare_c > fromSquare_c:\n # horizontal +\n newTuple = (fromSquare_r, fromSquare_c + 1)\n\n elif toSquare_r == fromSquare_r and toSquare_c < fromSquare_c:\n # horizontal -\n newTuple = (fromSquare_r, fromSquare_c - 1)\n\n elif toSquare_r > fromSquare_r and toSquare_c > fromSquare_c:\n # diagonal \"SE\"\n newTuple = (fromSquare_r + 1, fromSquare_c + 1)\n\n elif toSquare_r > fromSquare_r and toSquare_c < fromSquare_c:\n # diagonal \"SW\"\n newTuple = (fromSquare_r + 1, fromSquare_c - 1)\n\n elif toSquare_r < fromSquare_r and toSquare_c > fromSquare_c:\n # diagonal \"NE\"\n newTuple = (fromSquare_r - 1, fromSquare_c + 1)\n\n elif toSquare_r < fromSquare_r and toSquare_c < fromSquare_c:\n # diagonal \"NW\"\n newTuple = (fromSquare_r - 1, fromSquare_c - 1)\n\n if self.squares[newTuple[0]][newTuple[1]] != 'e':\n return False\n else:\n return self.IsClearPath(newTuple, toTuple)\n\n def GetListOfValidMoves(self, fromTuple):\n legalDestinationSpaces = []\n for row in range(8):\n for col in range(8):\n d = (row, col)\n if self.IsLegalMove(fromTuple, d):\n if not self.DoesMovePutPlayerInCheck(fromTuple, d):\n legalDestinationSpaces.append(d)\n return legalDestinationSpaces\n\n def IsLegalMove(self, fromTuple, toTuple):\n fromSquare_r = fromTuple[0]\n fromSquare_c = fromTuple[1]\n toSquare_r = toTuple[0]\n toSquare_c = toTuple[1]\n fromPiece = self.squares[fromSquare_r][fromSquare_c]\n toPiece = self.squares[toSquare_r][toSquare_c]\n\n if self.whoseturn == \"black\":\n enemyColor = 'w'\n if self.whoseturn == \"white\":\n enemyColor = 'b'\n\n if fromTuple == toTuple:\n return False\n\n if \"P\" in fromPiece:\n # Pawn\n if self.whoseturn == \"black\":\n if toSquare_r == fromSquare_r + 1 and toSquare_c == fromSquare_c and toPiece == 'e':\n # moving forward one space\n return True\n if fromSquare_r == 1 and toSquare_r == fromSquare_r + 2 and toSquare_c == fromSquare_c and toPiece == 'e':\n # black pawn on starting row can move forward 2 spaces if there is no one directly ahead\n if self.IsClearPath(fromTuple, toTuple):\n return True\n if toSquare_r == fromSquare_r + 1 and (\n toSquare_c == fromSquare_c + 1 or toSquare_c == fromSquare_c - 1) and enemyColor in toPiece:\n # attacking\n return True\n\n elif self.whoseturn == \"white\":\n if toSquare_r == fromSquare_r - 1 and toSquare_c == fromSquare_c and toPiece == 'e':\n # moving forward one space\n return True\n if fromSquare_r == 6 and toSquare_r == fromSquare_r - 2 and toSquare_c == fromSquare_c and toPiece == 'e':\n # black pawn on starting row can move forward 2 spaces if there is no one directly ahead\n if self.IsClearPath(fromTuple, toTuple):\n return True\n if toSquare_r == fromSquare_r - 1 and (\n toSquare_c == fromSquare_c + 1 or toSquare_c == fromSquare_c - 1) and enemyColor in toPiece:\n # attacking\n return True\n\n elif \"R\" in fromPiece:\n # Rook\n if (toSquare_r == fromSquare_r or toSquare_c == fromSquare_c) and (toPiece == 'e' or enemyColor in toPiece):\n if self.IsClearPath(fromTuple, toTuple):\n return True\n\n elif \"T\" in fromPiece:\n # Knight\n col_diff = toSquare_c - fromSquare_c\n row_diff = toSquare_r - fromSquare_r\n if toPiece == 'e' or enemyColor in toPiece:\n if abs(col_diff) == 1 and abs(row_diff) == 2:\n return True\n if abs(col_diff) == 2 and abs(row_diff) == 1:\n return True\n\n elif \"B\" in fromPiece:\n # Bishop\n if (abs(toSquare_r - fromSquare_r) == abs(toSquare_c - fromSquare_c)) and (\n toPiece == 'e' or enemyColor in toPiece):\n if self.IsClearPath(fromTuple, toTuple):\n return True\n\n elif \"Q\" in fromPiece:\n # Queen\n if self.IsClearPath(fromTuple, toTuple) and (toPiece == 'e' or enemyColor in toPiece):\n if (toSquare_r == fromSquare_r or toSquare_c == fromSquare_c) or (\n abs(toSquare_r - fromSquare_r) == abs(toSquare_c - fromSquare_c)):\n return True\n\n elif \"K\" in fromPiece:\n # King\n if toPiece == 'e' or enemyColor in toPiece:\n if abs(toSquare_c - fromSquare_c) <= 1 and abs(toSquare_r - fromSquare_r) <= 1:\n return True\n\n return False # if none of the other \"True\"s are hit above\n\n def GetPlayerInput(self):\n toTuple = (999, 999)\n while toTuple == (999, 999):\n fromTuple = self.GetPlayerInput_SquareFrom()\n toTuple = self.GetPlayerInput_SquareTo(fromTuple)\n\n return (fromTuple, toTuple)\n\n def GetPlayerInput_SquareTo(self, fromTuple):\n toTuple = ('x', 'x')\n\n validMoveList = self.GetListOfValidMoves(fromTuple)\n\n print(\"List of valid moves for piece at\", fromTuple, \": \", validMoveList)\n print(\"Type '10' to choose another square\")\n\n while (not toTuple in validMoveList):\n cmd_r = int(input(\" To row: \"))\n cmd_c = int(input(\" To col: \"))\n toTuple = (cmd_r, cmd_c)\n if cmd_r == 10:\n toTuple = (999, 999)\n elif not toTuple in validMoveList:\n print(\" Invalid move!\")\n\n return toTuple\n\n def GetPlayerInput_SquareFrom(self):\n ch = \"?\"\n cmd_r = 0\n cmd_c = 0\n while (ch not in self.squares[cmd_r][cmd_c] or self.GetListOfValidMoves((cmd_r, cmd_c)) == []):\n print(\"Player\", self.whoseturn)\n cmd_r = int(input(\" From row: \"))\n cmd_c = int(input(\" From col: \"))\n if self.whoseturn == \"black\":\n ch = \"b\"\n else:\n ch = \"w\"\n\n if (self.squares[cmd_r][cmd_c] == 'e'):\n print(\" Nothing there!\")\n elif (ch not in self.squares[cmd_r][cmd_c]):\n print(\" That's not your piece!\")\n elif self.GetListOfValidMoves((cmd_r, cmd_c)) == []:\n print(\" No valid moves for that piece!\")\n\n return (cmd_r, cmd_c)\n\n def MakeMove(self, moveTuple):\n fromSquare_r = moveTuple[0][0]\n fromSquare_c = moveTuple[0][1]\n toSquare_r = moveTuple[1][0]\n toSquare_c = moveTuple[1][1]\n\n fromPiece = self.squares[fromSquare_r][fromSquare_c]\n toPiece = self.squares[toSquare_r][toSquare_c]\n\n print(fromPiece, \"moves from square (\" + str(fromSquare_r) + \",\" + str(fromSquare_c) + \") to square (\" + str(\n toSquare_r) + \",\" + str(toSquare_c) + \")\")\n if toPiece != 'e':\n print(fromPiece, \"captures\", toPiece + \"!\")\n\n self.squares[toSquare_r][toSquare_c] = fromPiece\n self.squares[fromSquare_r][fromSquare_c] = 'e'\n\n def SetUpBoard(self, opt):\n if opt == 0:\n self.squares[0] = ['bR', 'bT', 'bB', 'bQ', 'bK', 'bB', 'bT', 'bR']\n self.squares[1] = ['bP', 'bP', 'bP', 'bP', 'bP', 'bP', 'bP', 'bP']\n self.squares[2] = ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e']\n self.squares[3] = ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e']\n self.squares[4] = ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e']\n self.squares[5] = ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e']\n self.squares[6] = ['wP', 'wP', 'wP', 'wP', 'wP', 'wP', 'wP', 'wP']\n self.squares[7] = ['wR', 'wT', 'wB', 'wQ', 'wK', 'wB', 'wT', 'wR']\n\n def Draw(self):\n print(\" c0 c1 c2 c3 c4 c5 c6 c7 \")\n print(\" ----------------------------------------\")\n for r in range(8):\n print(\"r\" + str(r) + \"|\")\n for c in range(8):\n if self.squares[r][c] != 'e':\n print(str(self.squares[r][c]), \"|\")\n else:\n print(\" |\")\n if c == 7:\n print() # to get a new line\n print(\" ----------------------------------------\")\n\n def MainLoop(self):\n print(\"Starting Chess...\")\n self.whoseturn = \"black\"\n\n self.SetUpBoard(0) # make sure arg is 0 for standard set-up\n\n while not self.IsCheckmate():\n self.Draw()\n\n if self.IsInCheck():\n print(\"Warning...\", self.whoseturn, \" player is in check!\")\n\n move = self.GetPlayerInput()\n\n self.MakeMove(move)\n\n self.SwitchWhoseTurn()\n\n self.Draw() # draw board a final time to show the end game setup\n print(\"Player \", self.whoseturn, \" has lost the match!\")\n\n def __init__(self):\n self.squares = [['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'], ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'], ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'], ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'], ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e']]\n\n\nif __name__ == \"__main__\":\n b = ChessBoard()\n b.MainLoop()\n","sub_path":"game/ChessBoard.py","file_name":"ChessBoard.py","file_ext":"py","file_size_in_byte":13855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419040542","text":"class Solution():\n \"\"\"\n Given two sorted list, merge and sort them into a single array\n time: O(N)\n \"\"\"\n def merge(self, a, b):\n \"\"\"Merges 2 sorted lists a,b\"\"\"\n ndx_a, ndx_b = 0, 0 # pointer for a, b array\n ndx_curr = 0 # pointer for result array\n\n result = a + b\n while (ndx_a 5 or k < 1:\n raise ValueError(f\"Invalid degree of interpolation ({k}). Must be \"\n f\"an integer greater than 0 and lower or \"\n f\"equal than 5.\")\n\n if monotone and s != 0:\n raise ValueError(\"Smoothing interpolation is not supported with \"\n \"monotone interpolation\")\n\n if monotone and (k == 2 or k == 4):\n raise ValueError(f\"monotone interpolation of degree {k}\"\n f\"not supported.\")\n\n # Monotone interpolation of degree 1 is performed with linear spline\n if monotone and k == 1:\n monotone = False\n\n # Evaluator of splines called in evaluate\n\n def _spline_evaluator_1_m(spl, t, der):\n\n return spl(t, der)\n\n def _process_derivative_1_m(derivative):\n\n return derivative\n\n self._spline_evaluator = _spline_evaluator_1_m\n\n self._process_derivative = _process_derivative_1_m\n\n sample_points = sample_points[0]\n\n if monotone:\n def constructor(data):\n \"\"\"Constructs an unidimensional cubic monotone interpolator\"\"\"\n return PchipInterpolator(sample_points, data)\n\n else:\n\n def constructor(data):\n \"\"\"Constructs an unidimensional interpolator\"\"\"\n return UnivariateSpline(sample_points, data, s=s, k=k)\n\n return np.apply_along_axis(constructor, 1, data_matrix)\n\n def _construct_spline_2_m(self, sample_points, data_matrix, k, s):\n r\"\"\"Construct the matrix of interpolators for surfaces.\n\n Constructs the matrix of interpolators for objects with domain\n dimension = 2. Calling internally during the creationg of the\n evaluator.\n\n Uses internally the scipy interpolator RectBivariateSpline.\n\n Args:\n sample_points (np.ndarray): Sample points of the fdatagrid.\n data_matrix (np.ndarray): Data matrix of the fdatagrid.\n k (integer): Order of the spline interpolators.\n\n Returns:\n (np.ndarray): Array of size n_samples x dim_codomain with the\n corresponding interpolator of the sample i, and image dimension j\n in the entry (i,j) of the array.\n\n Raises:\n ValueError: If the value of the interpolation k is not valid.\n\n \"\"\"\n if np.isscalar(k):\n kx = ky = k\n elif len(k) != 2:\n raise ValueError(\"k should be numeric or a tuple of length 2.\")\n else:\n kx = k[0]\n ky = k[1]\n\n if kx > 5 or kx <= 0 or ky > 5 or ky <= 0:\n raise ValueError(f\"Invalid degree of interpolation ({kx},{ky}). \"\n f\"Must be an integer greater than 0 and lower or \"\n f\"equal than 5.\")\n\n def _spline_evaluator_2_m(spl, t, der):\n\n return spl(t[:, 0], t[:, 1], dx=der[0], dy=der[1], grid=False)\n\n def _process_derivative_2_m(derivative):\n if np.isscalar(derivative):\n derivative = 2 * [derivative]\n elif len(derivative) != 2:\n raise ValueError(\"derivative should be a numeric value \"\n \"or a tuple of length 2 with (dx,dy).\")\n\n return derivative\n\n # Evaluator of splines called in evaluate\n self._spline_evaluator = _spline_evaluator_2_m\n self._process_derivative = _process_derivative_2_m\n\n # Matrix of splines\n spline = np.empty((self._n_samples, self._dim_codomain), dtype=object)\n\n for i in range(self._n_samples):\n for j in range(self._dim_codomain):\n spline[i, j] = RectBivariateSpline(sample_points[0],\n sample_points[1],\n data_matrix[i, :, :, j],\n kx=kx, ky=ky, s=s)\n\n return spline\n\n def _construct_spline_n_m(self, sample_points, data_matrix, k):\n r\"\"\"Construct the matrix of interpolators.\n\n Constructs the matrix of interpolators for objects with domain\n dimension > 2. Calling internally during the creationg of the\n evaluator.\n\n Only linear and nearest interpolators are available for objects with\n domain dimension >= 3. Uses internally the scipy interpolator\n RegularGridInterpolator.\n\n Args:\n sample_points (np.ndarray): Sample points of the fdatagrid.\n data_matrix (np.ndarray): Data matrix of the fdatagrid.\n k (integer): Order of the spline interpolators.\n\n Returns:\n (np.ndarray): Array of size n_samples x dim_codomain with the\n corresponding interpolator of the sample i, and image dimension j\n in the entry (i,j) of the array.\n\n Raises:\n ValueError: If the value of the interpolation k is not valid.\n\n \"\"\"\n # Parses method of interpolation\n if k == 0:\n method = 'nearest'\n elif k == 1:\n method = 'linear'\n else:\n raise ValueError(\"interpolation order should be 0 (nearest) or 1 \"\n \"(linear).\")\n\n def _process_derivative_n_m(derivative):\n if derivative != 0:\n raise ValueError(\"derivates not suported for functional data \"\n \" with domain dimension greater than 2.\")\n\n return derivative\n\n def _spline_evaluator_n_m(spl, t, derivative):\n\n return spl(t)\n\n # Method to process derivative argument\n self._process_derivative = _process_derivative_n_m\n\n # Evaluator of splines called in evaluate\n self._spline_evaluator = _spline_evaluator_n_m\n\n spline = np.empty((self._n_samples, self._dim_codomain), dtype=object)\n\n for i in range(self._n_samples):\n for j in range(self._dim_codomain):\n spline[i, j] = RegularGridInterpolator(\n sample_points, data_matrix[i, ..., j], method, False)\n\n return spline\n\n def evaluate(self, eval_points, *, derivative=0):\n r\"\"\"Evaluation method.\n\n Evaluates the samples at different evaluation points. The evaluation\n call will receive a 3-d array with the evaluation points for\n each sample.\n\n This method is called internally by :meth:`evaluate` when the argument\n `aligned_evaluation` is False.\n\n Args:\n eval_points (np.ndarray): Numpy array with shape\n `(n_samples, number_eval_points, dim_domain)` with the\n evaluation points for each sample.\n derivative (int, optional): Order of the derivative. Defaults to 0.\n\n Returns:\n (np.darray): Numpy 3d array with shape `(n_samples,\n number_eval_points, dim_codomain)` with the result of the\n evaluation. The entry (i,j,k) will contain the value k-th image\n dimension of the i-th sample, at the j-th evaluation point.\n\n Raises:\n ValueError: In case of an incorrect value of the derivative\n argument.\n\n \"\"\"\n derivative = self._process_derivative(derivative)\n\n # Constructs the evaluator for t_eval\n if self._dim_codomain == 1:\n def evaluator(spl):\n \"\"\"Evaluator of object with image dimension equal to 1.\"\"\"\n return self._spline_evaluator(spl[0], eval_points, derivative)\n else:\n def evaluator(spl_m):\n \"\"\"Evaluator of multimensional object\"\"\"\n return np.dstack(\n [self._spline_evaluator(spl, eval_points, derivative)\n for spl in spl_m]).flatten()\n\n # Points evaluated inside the domain\n res = np.apply_along_axis(evaluator, 1, self._splines)\n res = res.reshape(self._n_samples, eval_points.shape[0],\n self._dim_codomain)\n\n return res\n\n def evaluate_composed(self, eval_points, *, derivative=0):\n \"\"\"Evaluation method.\n\n Evaluates the samples at different evaluation points. The evaluation\n call will receive a 3-d array with the evaluation points for\n each sample.\n\n This method is called internally by :meth:`evaluate` when the argument\n `aligned_evaluation` is False.\n\n Args:\n eval_points (np.ndarray): Numpy array with shape\n `(n_samples, number_eval_points, dim_domain)` with the\n evaluation points for each sample.\n derivative (int, optional): Order of the derivative. Defaults to 0.\n\n Returns:\n (np.darray): Numpy 3d array with shape `(n_samples,\n number_eval_points, dim_codomain)` with the result of the\n evaluation. The entry (i,j,k) will contain the value k-th image\n dimension of the i-th sample, at the j-th evaluation point.\n\n Raises:\n ValueError: In case of an incorrect value of the derivative\n argument.\n\n \"\"\"\n shape = (self._n_samples, eval_points.shape[1], self._dim_codomain)\n res = np.empty(shape)\n\n derivative = self._process_derivative(derivative)\n\n if self._dim_codomain == 1:\n def evaluator(t, spl):\n \"\"\"Evaluator of sample with image dimension equal to 1\"\"\"\n return self._spline_evaluator(spl[0], t, derivative)\n\n for i in range(self._n_samples):\n res[i] = evaluator(eval_points[i], self._splines[i]).reshape(\n (eval_points.shape[1], self._dim_codomain))\n\n else:\n def evaluator(t, spl_m):\n \"\"\"Evaluator of multidimensional sample\"\"\"\n return np.array([self._spline_evaluator(spl, t, derivative)\n for spl in spl_m]).T\n\n for i in range(self._n_samples):\n res[i] = evaluator(eval_points[i], self._splines[i])\n\n return res\n","sub_path":"skfda/representation/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":19546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518202969","text":"'''Write a Python function to sum all the numbers in a list. \nSample List : (8, 2, 3, 0, 7) \nExpected Output : 20 \n'''\ndef sum(num):\n total = 0\n for x in num:\n total += x\n return total\nprint(sum((8, 2, 3, 0, 7)))\n","sub_path":"Function/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186406390","text":"import logging\nimport os, binascii\nfrom struct import unpack\nfrom socket import inet_ntoa\nimport configparser\n\n# 每个节点长度\nPER_NODE_LEN = 26\n# 节点 id 长度\nPER_NID_LEN = 20\n# 节点 id 和 ip 长度\nPER_NID_NIP_LEN = 24\n# 构造邻居随机结点\nNEIGHBOR_END = 14\n# 日志等级\nLOG_LEVEL = logging.INFO\n# 配置文件读取\ncg = configparser.ConfigParser()\ncg.read('.env')\nsection = cg.sections()\n\n\ndef get_rand_id():\n \"\"\"\n 生成随机的节点 id,长度为 20 位\n \"\"\"\n return os.urandom(PER_NID_LEN)\n\n\ndef get_neighbor(target):\n return target[:NEIGHBOR_END] + get_rand_id()[NEIGHBOR_END:]\n\n\ndef get_logger(logger_name):\n \"\"\"\n 获取日志实例\n :param logger_name:\n :return:\n \"\"\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(LOG_LEVEL)\n fh = logging.StreamHandler()\n fh.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n )\n logger.addHandler(fh)\n return logger\n\n\ndef get_nodes_info(nodes):\n \"\"\"\n 解析 find_node 回复中 nodes 节点的信息\n :param nodes: 节点薪资\n \"\"\"\n length = len(nodes)\n # 每个节点单位长度为 26 为,node = node_id(20位) + node_ip(4位) + node_port(2位)\n if (length % PER_NODE_LEN) != 0:\n return []\n\n for i in range(0, length, PER_NODE_LEN):\n nid = nodes[i:i + PER_NID_LEN]\n # 利用 inet_ntoa 可以返回节点 ip\n ip = inet_ntoa(nodes[i + PER_NID_LEN:i + PER_NID_NIP_LEN])\n # 解包返回节点端口\n port = unpack(\"!H\", nodes[i + PER_NID_NIP_LEN:i + PER_NODE_LEN])[0]\n yield (nid, ip, port)\n\n\ndef proper_infohash(infohash):\n \"\"\"\n 把 bytes 转换为 hex\n :param infohash:\n :return:\n \"\"\"\n if isinstance(infohash, bytes):\n # 返回二进制数据的16进制的表现形式\n infohash = binascii.hexlify(infohash).decode('utf-8')\n return infohash.upper()\n\n\ndef config(args):\n \"\"\"\n 获取配置参数\n :param args: ['mysql', 'port'] or \"mysql.port\"\n :return: None or section\n \"\"\"\n if isinstance(args, list) and len(args) != 2:\n return None\n elif isinstance(args, str):\n args = args.split('.')\n if len(args) != 2:\n return None\n\n return cg.get(args[0], args[1])\n","sub_path":"dht/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5615908","text":"import requests\nimport json\nimport os\nfrom pprint import pprint\n\n\nHOUSTON_SERVICE_URL=os.environ['HOUSTON_SERVICE_URL']\nurl = f\"http://{HOUSTON_SERVICE_URL}/api/activator/\"\n \n# Additional headers.\nheaders = {'Content-Type': 'application/json' }\n\ndef typestest(resp):\n assert isinstance(resp['activator'], str)\n assert isinstance(resp['activatorLink'], str)\n assert isinstance(resp['apiManagement'], list)\n assert isinstance(resp['available'], bool)\n assert isinstance(resp['billing'], str)\n assert isinstance(resp['category'], str)\n assert isinstance(resp['ci'], list)\n assert isinstance(resp['cd'], list)\n assert isinstance(resp['description'], str)\n assert isinstance(resp['envs'], list)\n assert isinstance(resp['platforms'], list)\n assert isinstance(resp['regions'], list)\n assert isinstance(resp['sensitivity'], str)\n assert isinstance(resp['serverCapacity'], int)\n assert isinstance(resp['source'], str)\n assert isinstance(resp['sourceControl'], list)\n assert isinstance(resp['status'], str)\n assert isinstance(resp['technologyOwner'], str)\n assert isinstance(resp['technologyOwnerEmail'], str)\n assert isinstance(resp['type'], str)\n assert isinstance(resp['userCapacity'], int)\n pprint(resp)\n\n\ndef test_activators():\n #Testing POST request\n resp_json = post()\n oid = str(resp_json['id'])\n #Testing Set Activator Status\n set_activator_status(resp_json['id'])\n #Testing PUT request\n put(oid)\n #Testing DELETE request\n delete(oid)\n #Testing GETALL request\n get_all()\n # Test GET Activator Meta\n get_meta()\n # Test Get Activator Categories\n get_categories()\n \n\ndef post():\n #Test POST Then GET\n # Body\n payload = {\n 'accessRequestedBy': 0,\n 'activator': 'test-activator',\n 'activatorLink': 'test-post-',\n 'apiManagement': [ 'test-post-', 'test-post-1', 'test-post-2', 'test-post-3', 'test-post-4', 'test-post-5' ],\n 'available': True,\n 'billing': 'test-post-',\n 'businessUnit': 'test-post-',\n 'category': 'test-post-',\n 'cd': [ 'test-post-1', 'test-post-2', 'test-post-3' ],\n 'ci': [ 'test-post-1', 'test-post-2' ],\n 'description': 'test-post-test-post-test-post-test-post-test-post-test-post-test-post-test-post-',\n 'envs': [ 'dev', 'prd', 'poc' ],\n 'hosting': [ 'test-post-1', 'test-post-2', 'test-post-3', 'test-post-4', 'test-post-5' ],\n 'id': 0,\n 'lastUpdated': 'test-post-',\n 'name': 'test-post-',\n 'platforms': [ 'test-post-1', 'test-post-2', 'test-post-3', 'test-post-4', 'test-post-5', 'test-post-6' ],\n 'regions': [ 'test-post-1', 'test-post-2', 'test-post-3', 'test-post-4', 'test-post-5' ],\n 'sensitivity': 'test-post-',\n 'serverCapacity': 999999999,\n 'source': 'test-post-',\n 'sourceControl': [ 'test-post-', 'test-post-1' ],\n 'status': 'Available',\n 'technologyOwner': 'test-post-',\n 'technologyOwnerEmail': 'test-post-',\n 'type': 'test-post-',\n 'userCapacity': 999999999\n }\n \n # convert dict to json by json.dumps() for body data.\n resp = requests.post(url, headers=headers, data=json.dumps(payload,indent=4))\n \n # Validate response headers and body contents, e.g. status code.\n resp_json = resp.json()\n assert resp.status_code == 201\n assert resp_json['activator'] == 'test-activator'\n oid = resp_json['id']\n print(oid)\n \n\n resp = requests.get(url+ str(oid), headers=headers)\n resp_json = resp.json()\n resp_headers = resp.headers\n \n #Validate response\n assert resp.status_code == 200\n assert resp_json['activator'] == 'test-activator'\n assert resp_headers['content-type'] == 'application/json'\n assert isinstance(resp_json['accessRequestedBy'], type(None))\n typestest(resp_json)\n return resp_json\n\n\ndef set_activator_status(oid):\n\n url = url = f\"http://{HOUSTON_SERVICE_URL}/api/setactivatorstatus/\"\n payload= {'accessRequestedBy': 0, 'id': oid, 'status': 'Locked' }\n resp = requests.post(url, headers=headers , data= json.dumps(payload,indent=4))\n resp_json = resp.json()\n #Validate response body for updated values\n assert resp.status_code == 200\n assert resp_json['id'] == oid\n assert resp_json['status'] == 'Locked'\n\n\ndef put(oid):\n\n # Test Update Then get new value\n newpayload = {\n 'activator': 'new-test-activator',\n 'accessRequestedBy': 0,\n 'activatorLink': 'test-put-',\n 'apiManagement': [ 'test-put-6', 'test-put-7', 'test-put-8' ],\n 'available': False,\n 'billing': 'billing',\n 'businessUnit': 'businessUnit',\n 'category': 'category',\n 'cd': [ 'test-put-4', 'test-put-5', 'test-put-6' ],\n 'ci': [ 'test-put-7', 'test-put-8' ],\n 'description': 'TheQuickBrownFoxJumpedOverTheLazyDogs',\n 'envs': [ 'dev', 'Prd', 'Poc' ],\n 'hosting': [ 'test-put-11', 'test-put-22', 'test-put-33', 'test-put-44', 'test-put-55' ],\n 'lastUpdated': 'fredbloggs',\n 'name': 'mynewactivatortest',\n 'platforms': [ 'test-put-101', 'test-put-102', 'test-put-103', 'test-put-104', 'test-put-105', 'test-put-106' ],\n 'regions': [ 'test-put-101', 'test-put-210', 'test-put-310', 'test-put-410', 'test-put-510' ],\n 'sensitivity': 'confidential',\n 'serverCapacity': 5,\n 'source': 'original',\n 'sourceControl': [ 'dotmatrix', 'tape' ],\n 'status': 'NotAvailable',\n 'technologyOwner': 'me',\n 'technologyOwnerEmail': 'me@me.com',\n 'type': 'best',\n 'userCapacity': 10\n }\n resp = requests.put(url+oid, headers=headers, data=json.dumps(newpayload,indent=4))\n\n #Validate update/Put response\n assert resp.status_code == 200\n\n #Get Request to get updated values\n resp = requests.get(url+oid, headers=headers)\n resp_json = resp.json()\n oid = resp_json['id']\n\n #Validate response body for updated values\n assert resp.status_code == 200\n assert resp_json['activator'] == 'new-test-activator'\n typestest(resp_json)\n\n\ndef delete(oid):\n\n #Test Delete Then GET\n resp = requests.delete(url+oid, headers=headers)\n assert resp.status_code == 200\n\n resp = requests.get(url+oid, headers=headers)\n #resp_json = resp.json()\n #Todo Ideally we should get 404 Need to check with Karwoo\n assert resp.status_code == 404\n\n\ndef get_all():\n\n geturl = f\"http://{HOUSTON_SERVICE_URL}/api/activators/\"\n resp = requests.get(geturl, headers=headers)\n assert resp.status_code == 200\n\n\ndef get_meta():\n\n url = f\"http://{HOUSTON_SERVICE_URL}/api/activator_meta/\"\n resp = requests.get(url, headers=headers)\n resp_json = resp.json()\n count = resp_json['count']\n #Validate response\n assert resp.status_code == 200\n assert count >=0\n\n\ndef get_categories():\n\n url = f\"http://{HOUSTON_SERVICE_URL}/api/activatorcategories/\"\n resp = requests.get(url, headers=headers)\n pprint(resp.json())\n #Validate response\n assert resp.status_code == 200\n","sub_path":"tests/test_activator.py","file_name":"test_activator.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"451559686","text":"#! usr/bin/env python\n\nimport requests\nimport time\nimport os\nfrom twython import Twython, TwythonError\n\nAPI_KEY = os.environ.get(\"API_KEY\")\nAPI_SECRET = os.environ.get(\"API_SECRET\")\nOAUTH_TOKEN = os.environ.get(\"OAUTH_TOKEN\")\nOAUTH_SECRET = os.environ.get(\"OAUTH_SECRET\")\n\ntwitter = Twython(API_KEY, API_SECRET, OAUTH_TOKEN, OAUTH_SECRET)\n\ndef ping_bandcamp(bandname):\n url = \"http://\" + bandname + \".bandcamp.com\"\n if requests.get(url).history:\n return True\n return False\n\ndef wordlist(from_file):\n with open(from_file) as f:\n words = [w.strip() for w in f.readlines()]\n return words\n\ndef make_tweet():\n try:\n potential = wordlist('sowpods.txt').pop()\n if ping_bandcamp(potential):\n tweet = potential\n return tweet\n except IndexError:\n pass\n\ndef run():\n status = make_tweet()\n twitter.update_status(status=status)\n time.sleep(60) # maybe change this\n\nif __name__ == '__main__':\n while True:\n run()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"627389468","text":"\n# coding: utf-8\n\n# # Human Activity Recognition (HAR) exploration\n# \n# * Download data from [UCI repository](https://archive.ics.uci.edu/ml/machine-learning-databases/00240/)\n# * An predictive analaysis, using tree and linear boosters, based on pre-engineered features are previoulsy performed in this [notebook](https://rpubs.com/burakh/har_xgb)\n# * In this repo we revisit the problem using deep learning algorithms\n\n# In[3]:\n\n\n# Imports\nimport numpy as np\nimport os\nfrom har_exploration_lib import utilities\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n#get_ipython().magic('matplotlib inline')\n\n\n# Load the data:\n\n# In[4]:\n\n\nX_train, labels_train, list_ch_train = utilities.read_data(data_path=\"./har_exploration_lib/data/\", split=\"train\") # train\nX_test, labels_test, list_ch_test = utilities.read_data(data_path=\"./har_exploration_lib/data/\", split=\"train\") # test\n\nassert list_ch_train == list_ch_test, \"Mistmatch in channels!\"\n\n\n# In[6]:\n\n\nprint (\"Training data shape: N = {:d}, steps = {:d}, channels = {:d}\".format(X_train.shape[0],\n X_train.shape[1],\n X_train.shape[2]))\nprint (\"Test data shape: N = {:d}, steps = {:d}, channels = {:d}\".format(X_test.shape[0],\n X_test.shape[1],\n X_test.shape[2]))\n\nprint (\"Channel names: \", list_ch_train)\n\n\nprint (\"labels_train: \", labels_train)\nprint (\"list_ch_train: \", list_ch_train)\nprint (\"X_train: \", X_train[0][127])\n\n\n# These signals are raw and no feature engineering has been performed on them. Let's check whether each signal is normalized:\n\n# In[7]:\n\n\n# Mean value for each channel at each step\nall_data = np.concatenate((X_train,X_test), axis = 0) # all_data.shape = (14704, 128, 9)\nmeans_ = np.zeros((all_data.shape[0],all_data.shape[2])) # means_ = (14704, 9)\nstds_ = np.zeros((all_data.shape[0],all_data.shape[2]))\n\nfor ch in range(X_train.shape[2]):# X_train.shape = (7352, 128, 9)\n means_[:,ch] = np.mean(all_data[:,:,ch], axis=1)\n stds_[:,ch] = np.std(all_data[:,:,ch], axis=1)\n \ndf_mean = pd.DataFrame(data = means_)\ndf_std = pd.DataFrame(data = stds_)\n\n\n# In[8]:\n\n\ndf_std.hist()\nplt.show()\n\n\n# In[9]:\n\n\ndf_mean.hist() # before standardization\nplt.show()\n\n\n# Some channels have mean values near 1, most close to 0. Let's standardize them all using the utilities:\n\n# In[10]:\n\n\nX_train, X_test = utilities.standardize(X_train, X_test)\n\n\n# In[11]:\n\n\n# Check Mean value for each channel at each step\nall_data = np.concatenate((X_train,X_test), axis = 0)\nmeans_ = np.zeros((all_data.shape[0],all_data.shape[2]))\nstds_ = np.zeros((all_data.shape[0],all_data.shape[2]))\n\nfor ch in range(X_train.shape[2]):\n means_[:,ch] = np.mean(all_data[:,:,ch], axis=1)\n stds_[:,ch] = np.std(all_data[:,:,ch], axis=1)\n \ndf_mean = pd.DataFrame(data = means_)\ndf_std = pd.DataFrame(data = stds_)\n\n\n# In[13]:\n\n\ndf_mean.hist() # now, standardized\nplt.show()\n\n\n# Now, all the means are 0 and standard deviations are 1.\n# \n# Let's see how the signals look like for a given example:\n\n# In[24]:\n\n\n# Set up the figure\ndata_lenght = all_data.shape[1] # 128\n\n\nfig = plt.figure(figsize = (10,10)) \nfig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.2, wspace=0.4)\n\nblah = all_data[0,:,0]\nprint(\"all_data[0,:,0] =>\",blah.shape)\nprint(\"all_data[0,:,0] =>\\n\",blah)\n\n# Plot ... for each channel\nx = np.arange(data_lenght)\nfor i in range(9):\n ax = fig.add_subplot(3, 3, i + 1)\n l1 = ax.plot(x,all_data[0,:,i], color = \"blue\")\n\nplt.show()","sub_path":"har_exploration_data.py","file_name":"har_exploration_data.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142049029","text":"#coding:utf-8\n#必要なもののimport \nimport base64\nimport json\nfrom requests import Request, Session\nfrom io import BytesIO\nfrom PIL import Image\nimport pandas as pd\nimport jaconv\nimport re\nimport cv2\nimport numpy as np\nfrom symspellpy.symspellpy import SymSpell, Verbosity\nimport pickle\nfrom pykakasi import kakasi\nimport Levenshtein\nfrom collections import OrderedDict\nimport itertools\n\n#---------------------------------------------------------------------------------\n#歪みを修正する。imageにはpathが入る。\ndef yugami(image):\n img = cv2.imread(image)\n #画像をグレースケール化\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n #閾値を180にして2値化\n threshold = 105\n ret, img_thresh = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY)\n\n #輪郭を取り出している\n img_1, contours, hierarchy = cv2.findContours(img_thresh , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n #mensekiリストに輪郭を追加していっている。\n menseki=[ ]\n\n for i in range(0, len(contours)):\n menseki.append([contours[i],cv2.contourArea(contours[i])])\n\n menseki.sort(key=lambda x: x[1], reverse=True)\n\n #一番面積が大きいものを取り出している。\n cnt = menseki[0][0]\n\n #輪郭のギザギザを無くしている\n epsilon = 0.1*cv2.arcLength(cnt,True)\n #approxに隅の四点の座標が入っている\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n\n #ギザギザをなくした後の描画\n #img3=cv2.drawContours(img, approx, 0,(0, 0, 255),10)\n\n #輪郭の表をリストにして、順番を整理\n approx=approx.tolist()\n\n left = sorted(approx,key=lambda x:x[0]) [:2]\n right = sorted(approx, key=lambda x: x[0])[2:]\n \n left_down= sorted(left,key=lambda x:x[0][1]) [0]\n left_up= sorted(left,key=lambda x:x[0][1]) [1]\n\n right_down= sorted(right,key=lambda x:x[0][1]) [0]\n right_up= sorted(right,key=lambda x:x[0][1]) [1]\n\n\n #補正前の角の座標\n perspective1 = np.float32([left_down,right_down,right_up,left_up])\n #A4の場合の補正後の角の座標\n width = right_down[0][0]-left_down[0][0]\n height=width*2340//1654\n #perspective2 = np.float32([[0, 0], [1654, 0], [1654, 2340], [0, 2340]])\n perspective2 = np.float32([[0, 0],[width, 0],[width, height],[0, height]])\n\n #変換に必要な行列\n psp_matrix = cv2.getPerspectiveTransform(perspective1,perspective2)\n #変換後\n #img_psp = cv2.warpPerspective(img, psp_matrix, (1654, 2340))\n img_psp = cv2.warpPerspective(img, psp_matrix, (width, height))\n return img_psp\n #cv2.imwrite(\"yugami-modified2/\"+str(num)+\".png\",img_psp)\n\n#---------------------------------------------------------------------------------\n#一番単純なOCRコード 投げたarrayをOCRする。\n#画像をCloud Vision APIに投げる\ndef recognize_image1(input_image):#最後にstr_encode_fileに変える\n #pathからbase64にする場合\n def pil_image_to_base64(img_path):###ここは最後に消す\n pil_image = Image.open(img_path)\n buffered = BytesIO()\n pil_image.save(buffered, format=\"PNG\")\n str_encode_file = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n return str_encode_file\n\n #arrayからbase64にする場合\n def array_to_base64(img_array):\n pil_image = Image.fromarray(np.uint8(img_array))\n buffered = BytesIO()\n pil_image.save(buffered, format=\"PNG\")\n str_encode_file = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n return str_encode_file \n \n def get_fullTextAnnotation(json_data):\n text_dict = json.loads(json_data)\n try:\n text = text_dict[\"responses\"][0][\"fullTextAnnotation\"][\"text\"]\n return text\n except:\n print(None)\n return None\n\n #str_encode_file = pil_image_to_base64(input_image) \n str_encode_file = array_to_base64(input_image)# input_imageがarrayの時\n str_url = \"https://vision.googleapis.com/v1/images:annotate?key=\"\n str_api_key = \"AIzaSyDlRRYrHEdjParsfRmh96_3xfafOo1crWY\"\n str_headers = {'Content-Type': 'application/json'}\n str_json_data = {\n 'requests': [\n {\n 'image': {\n 'content': str_encode_file\n },\n 'features': [\n {\n 'type': \"DOCUMENT_TEXT_DETECTION\",\n 'maxResults': 1\n }\n ]\n }\n ]\n }\n\n obj_session = Session()\n obj_request = Request(\"POST\",\n str_url + str_api_key,\n data=json.dumps(str_json_data),\n headers=str_headers\n )\n obj_prepped = obj_session.prepare_request(obj_request)\n obj_response = obj_session.send(obj_prepped,\n verify=True,\n timeout=60\n )\n\n if obj_response.status_code == 200:\n text = get_fullTextAnnotation(obj_response.text)\n \n return text\n else:\n return \"error\"\n\ndef type_check(img):\n height=img.shape[0]\n width=img.shape[1]\n\n img=img[height//2: 5 * height // 8, width*2//5:width*3//5]\n #img = cv2.resize(img,(int(img.shape[1]/5),int(img.shape[0]/5)))\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #gray = cv2.GaussianBlur(gray,(5,5),5)\n \n# plt.gray()\n# plt.imshow(gray)\n\n edges = cv2.Canny(gray,50,150,apertureSize = 3)\n\n linesH = cv2.HoughLinesP(edges, rho=1, theta=np.pi / 360, threshold=50, minLineLength=50, maxLineGap=10)\n try:\n if len(linesH)!=0:\n for line in linesH:\n x1, y1, x2, y2 = line[0]\n if (x1-x2)**2<(y1-y2)**2:\n return \"B\"\n else:\n return \"A\"\n \n else:\n return \"A\"\n except:\n return \"A\"\n\n\n \n#ピンボケ検出\ndef boke(image):\n return cv2.Laplacian(image, cv2.CV_64F).var()\ndef boke_check(image):\n if boke(image)<200:\n return True\n else:\n return False\n#---------------------------------------------------------------------","sub_path":"docker/back/django_project/service/ocr/UNUSED/ancient codes/vision1_main.py","file_name":"vision1_main.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"478764410","text":"import numpy as np\r\nimport pylab as plt\r\n#from cmemristor_gai import stair_memristor\r\n##unknown ##tau_n## ##nucleation time depends on voltage\r\ntau = [0.2,25,50]\r\n\r\n##unknown ##propagation time depends on voltage\r\ntau_p = 10 \r\n\r\n##unknown ##the area of each zone normalized by total juntion area\r\nsi = [0.3, 0.3, 0.4]\r\n\r\n\r\n#si10 = 1.0/9.0\r\n\r\ndef hfunc(t):\r\n if t >= 0:\r\n return 1\r\n else:\r\n return 0\r\n\r\ntime = np.arange(0,60+0.1,0.1) ##simulation time\r\ns = np.zeros(len(time))\r\n\r\ndw = -5*10*(np.exp(-60/10) - np.exp(-time/10))\r\n#dw = 48 - (time - 12*((time/60)**5))\r\nt0 = 10\r\n\r\n#mem = np.zeros(len(time))\r\nfor i,t in enumerate(dw):\r\n if (t + t0) < tau[1]:\r\n tau_n = tau[0]\r\n ht = hfunc(t + t0 - tau_n)\r\n s[i] = 1.0 - si[0]*ht*(1.0 - np.exp( - ((t+t0-tau_n)/tau_p)**2.0 ) )\r\n elif (t + t0) < tau[2]:\r\n tau_n = tau[1]\r\n ht = hfunc(t + t0 - tau_n)\r\n s[i] = (1-si[0]) - si[1]*ht*(1.0 - np.exp( - ((t+t0-tau_n)/tau_p)**2.0 ) )\r\n else:\r\n tau_n = tau[2]\r\n ht = hfunc(t + t0 - tau_n)\r\n s[i] = (1-si[0]-si[1]) - si[2]*ht*(1.0 - np.exp( - ((t+t0-tau_n)/tau_p)**2.0 ) )\r\n \r\n##s the ratio ## down-to-up switching\r\nron = 1.0#/3.0\r\nroff = 40.0 \r\nmem = ron*roff/(s*(ron-roff) + roff)\r\ngmem = 1/mem\r\n\r\n###################\r\n##simulation parameters\r\ndt = 0.025 ##steps\r\n\r\n##trace parameters\r\ntau = 20 ##larger tau, longer trace\r\n#time = np.arange(0, 60 + dt, dt)\r\nA = 1 ## amplitude\r\n\r\n##trace\r\ndef trace(det):\r\n tx = np.zeros(len(time))\r\n for i ,t in enumerate(time):\r\n if t >= det:\r\n tx[i] = A* np.exp((-t+det)/tau)\r\n return tx\r\n\r\n## cal the trace\r\ntx1 = trace(0)\r\n\r\n####################\r\n\r\nplt.figure()\r\nplt.plot(time, tx1,'r--', -time, -tx1, 'r--', linewidth = 2)\r\n#plt.plot(time,mem)\r\nplt.scatter(time,(5-mem)/max(5-mem),s=2)\r\nplt.scatter(-time,-(5-mem)/max(5-mem),s=2)\r\nplt.grid(True)\r\nplt.xlabel(r'$\\Delta t$',fontsize = 16)\r\nplt.ylabel(r'$\\xi(\\%)$',fontsize = 16)\r\nplt.xlim([-61,61])\r\nplt.ylim([-1.1,1.1])\r\n\r\nplt.figure()\r\nplt.plot(time,dw)\r\nplt.show()\r\n","sub_path":"change mem.py","file_name":"change mem.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"286522762","text":"import sys\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\n\r\nfrom PIL import Image\r\n\r\ndef main(k,img): \r\n for i, row in enumerate(img):\r\n for j, col in enumerate(row):\r\n if row[j]*255 >= k:\r\n row[j] = 255\r\n else:\r\n row[j] = 0\r\n \r\n newPxl = np.asarray(img).astype(np.uint32) \r\n newImage = Image.fromarray(newPxl*255)\r\n \r\n return newImage\r\n\r\ndef convert(img):\r\n grayScaleImg = np.zeros(img.shape)\r\n for i, row in enumerate(img):\r\n for j, col in enumerate(row):\r\n pixel = img[i,j,:]\r\n grayScaleImg[i,j] = 255 * (pixel[0] + pixel[1] + pixel[2]) / 3\r\n\r\n grayScale = np.asarray(grayScaleImg).astype(np.uint32) \r\n image = Image.fromarray(grayScale)\r\n \r\n return grayScale\r\n\r\nif __name__ == '__main__':\r\n inImage = sys.argv[1]\r\n outImage = sys.argv[2]\r\n k = int(sys.argv[3])\r\n img = plt.imread(inImage)\r\n if len(img.shape) == 3:\r\n img = convert(img)\r\n newImage = main(k,img)\r\n #newImage.show()\r\n newImage.save(outImage)\r\n","sub_path":"ask1.py","file_name":"ask1.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156475496","text":"from unittest.mock import MagicMock\n\nfrom mpf.tests.MpfTestCase import MpfTestCase\n\n\nclass TestKickback(MpfTestCase):\n\n def getConfigFile(self):\n return 'config.yaml'\n\n def getMachinePath(self):\n return 'tests/machine_files/kickback/'\n\n def test_kickback_with_ball_save(self):\n self.machine.default_platform.set_pulse_on_hit_rule = MagicMock()\n self.mock_event(\"kickback_kickback_test_fired\")\n self.assertFalse(self.machine.ball_saves.kickback_save.enabled)\n\n # kickback is not enabled. nothing should happen\n self.hit_and_release_switch(\"s_kickback\")\n self.advance_time_and_run(.01)\n self.assertEventNotCalled(\"kickback_kickback_test_fired\")\n\n # enable kickback\n self.post_event(\"kickback_enable\")\n self.advance_time_and_run(.01)\n\n # should write a hw rule\n self.machine.default_platform.set_pulse_on_hit_rule.assert_called_once_with(\n self.machine.kickbacks.kickback_test.switch.get_configured_switch(),\n self.machine.kickbacks.kickback_test.coil.get_configured_driver()\n )\n\n # a hit should fire it\n self.hit_and_release_switch(\"s_kickback\")\n self.advance_time_and_run(.01)\n self.assertEventCalled(\"kickback_kickback_test_fired\")\n\n # ball save should be enabled just in case\n self.assertTrue(self.machine.ball_saves.kickback_save.enabled)\n\n # but disable after 6s\n self.advance_time_and_run(6.1)\n self.assertFalse(self.machine.ball_saves.kickback_save.enabled)\n\n # it only works once though\n self.mock_event(\"kickback_kickback_test_fired\")\n self.hit_and_release_switch(\"s_kickback\")\n self.advance_time_and_run(.01)\n self.assertEventNotCalled(\"kickback_kickback_test_fired\")\n","sub_path":"mpf/tests/test_Kickback.py","file_name":"test_Kickback.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"48409494","text":"import math\ndef get_primes(input_list):\n result_list = list()\n for element in input_list:\n if is_prime(element):\n result_list.append(element)\n return result_list\n\ndef is_prime(number):\n if number > 1:\n if number == 2:\n return True\n if number % 2 == 0:\n return False\n for current in range(3, int(math.sqrt(number)+1), 2):\n if number % current == 0:\n return False\n return True\n return False\n\nif __name__ == '__main__':\n input_list = [1,2,3,4,5,6,7,8,9,10]\n result_list = get_primes(input_list)\n print(result_list)\n","sub_path":"Python/PythonCookbook/DatastructuresAndAlgorithms/prime_numbers.py","file_name":"prime_numbers.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"323338843","text":"# Console output utilities\n\nimport re\nimport sys\nimport threading\nimport time\n\n_ind_len = 0\n_ind = \"\"\n\n_print_lock = threading.Lock()\n\n_stdout = sys.stdout\n\ndef SetStdout(fo):\n\tglobal _stdout\n\t_stdout = fo\n\n\ndef P(o, ind = 0, prefix = None):\n\twith _print_lock:\n\t\tglobal _ind_len, _ind\n\t\tif ind > 0:\n\t\t\t_ind_len += ind\n\t\t\tfor i in range(ind):\n\t\t\t\t_ind += \" \"\n\n\t\tif _ind_len > 0:\n\t\t\t#print str(o).split(\"\\n\")\n\t\t\tlines = str(o).split(\"\\n\")\n\t\t\tfor i in range(len(lines)):\n\t\t\t\tif (i == len(lines) - 1) and (len(lines[i]) == 0):\n\t\t\t\t\tcontinue\n\t\t\t\tif prefix is None:\n\t\t\t\t\t_stdout.write(_ind + lines[i] + \"\\n\")\n\t\t\t\telse:\n\t\t\t\t\t_stdout.write(prefix + _ind + lines[i] + \"\\n\")\n\t\telse:\n\t\t\tif prefix is not None:\n\t\t\t\t_stdout.write(prefix)\n\t\t\t_stdout.write(str(o))\n\t\t\t_stdout.write(\"\\n\")\n\n\t\tif ind > 0:\n\t\t\t_ind_len -= ind\n\t\t\t_ind = _ind[: len(_ind) - ind]\n\n\n# No new-line\ndef Pnnl(o, ind = 0):\n\twith _print_lock:\n\t\tglobal _ind_len, _ind\n\t\tif ind > 0:\n\t\t\t_ind_len += ind\n\t\t\tfor i in range(ind):\n\t\t\t\t_ind += \" \"\n\n\t\tif _ind_len > 0:\n\t\t\t#print str(o).split(\"\\n\")\n\t\t\tlines = str(o).split(\"\\n\")\n\t\t\tfor i in range(len(lines)):\n\t\t\t\tif (i == len(lines) - 1) and (len(lines[i]) == 0):\n\t\t\t\t\tcontinue\n\t\t\t\t_stdout.write(_ind + lines[i])\n\t\t\t\t_stdout.flush()\n\t\telse:\n\t\t\t_stdout.write(o)\n\t\t\t_stdout.flush()\n\n\t\tif ind > 0:\n\t\t\t_ind_len -= ind\n\t\t\t_ind = _ind[: len(_ind) - ind]\n\n\n# Measure time\nclass MT:\n\tdef __init__(self, msg, print_time=True):\n\t\tself.msg = msg\n\t\tself.print_time = print_time\n\n\tdef __enter__(self):\n\t\tP(self.msg)\n\t\tglobal _ind_len, _ind\n\t\t_ind_len += 2\n\t\t_ind += \" \"\n\t\tif self.print_time:\n\t\t\tself.start_time = time.time()\n\t\treturn self\n\n\tdef __exit__(self, type, value, traceback):\n\t\tglobal _ind_len, _ind\n\t\tif self.print_time:\n\t\t\tdur = time.time() - self.start_time\n\t\t\tP(\"%.0f ms\" % (dur * 1000.0))\n\t\t_ind_len -= 2\n\t\t_ind = _ind[: len(_ind) - 2]\n\n\n# No new-line\nclass MTnnl:\n\tdef __init__(self, msg, print_time=True):\n\t\tself.msg = msg\n\t\tself.print_time = print_time\n\n\tdef __enter__(self):\n\t\tglobal _ind_len, _ind\n\t\tPnnl(self.msg)\n\t\t_ind_len += 2\n\t\t_ind += \" \"\n\t\tif self.print_time:\n\t\t\tself.start_time = time.time()\n\t\treturn self\n\n\tdef __exit__(self, type, value, traceback):\n\t\tglobal _ind_len, _ind\n\t\tif self.print_time:\n\t\t\tdur = time.time() - self.start_time\n\t\t\tP(\"%.0f ms\" % (dur * 1000.0))\n\t\t_ind_len -= 2\n\t\t_ind = _ind[: len(_ind) - 2]\n\n\ndef sys_stdout_write(msg):\n\twith _print_lock:\n\t\t_stdout.write(msg)\n\t\t_stdout.flush()\n\n\ndef ClearLine():\n\tsys.stdout.write(\"\\033[1K\") # Clear to the beginning of line\n\tsys.stdout.write(\"\\033[1G\") # Move the cursor to the beginning of the column\n\n\nclass Indent:\n\tdef __init__(self, msg):\n\t\tself.msg = msg\n\n\tdef __enter__(self):\n\t\tglobal _ind_len, _ind\n\t\tP(self.msg)\n\t\t_ind_len += 2\n\t\t_ind += \" \"\n\t\treturn self\n\n\tdef __exit__(self, type, value, traceback):\n\t\tglobal _ind_len, _ind\n\t\t_ind_len -= 2\n\t\t_ind = _ind[: len(_ind) - 2]\n\n\ndef Test():\n\tP(\"aa\")\n\n\twith MT(\"dkdkdk\"):\n\t\tP(1.5)\n\t\tP(True)\n\n\tP(\"aa\\nbb\\n\\n cc\\n\\n dd\")\n\tP(1)\n","sub_path":"lib/util/Cons.py","file_name":"Cons.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"510215946","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom drf_yasg.utils import swagger_auto_schema\nfrom recommend.models import Item\nfrom ..models import OrderDetail\nfrom ..serializers import (\n ItemSerializer, OrderDetailSerializer,\n NewOrderSerializer, DeliveryInfoSerializer\n)\nfrom ..swagger.responses import ErrorResponse, SuccessResponse\nfrom ..swagger.swagger import Swagger\nfrom ..swagger.req_params import RequestBody\n\n\n@swagger_auto_schema(method='get', responses=Swagger.list_order_detail_response.RESPONSE)\n@api_view(['GET'])\ndef show_order_details(request, order_id):\n '''\n 사용자의 구매 내역 상세 정보를 조회하고 구매 내역을 추가하는 API\n\n ---\n ## `/api/order//`\n ## 요청 패러미터 \n - order_id : 구매 내역의 id값\n ## 요청 형식\n - 'application/json'\n ## 응답 내용\n - order_detail : 구매 내역 상세 정보 \n - item_info : 구매 아이템 상세 정보\n '''\n data = {}\n \n order = get_object_or_404(OrderDetail, pk=order_id)\n serializer = OrderDetailSerializer(order)\n data['order_detail'] = serializer.data\n \n item_info = []\n items = order.items.all()\n for item in items:\n item_serializer = ItemSerializer(item)\n item_info.append(item_serializer.data)\n data['item_info'] = item_info\n\n return Response(data, status=SuccessResponse.detail_listed.STATUS_CODE)\n\n\n@swagger_auto_schema(method='post',\n responses=Swagger.post_new_order_response.RESPONSE,\n request_body=RequestBody.post_new_order_request.PARAMS)\n@api_view(['POST'])\ndef create_new_order(request):\n '''\n 사용자의 구매 내역 상세 정보를 조회하고 구매 내역을 추가하는 API\n\n ---\n ## `/api/order/`\n ## 요청 패러미터\n - items : 아이템 목록 (['asin', 'asin'] 형태)\n - total_price : 주문 총 가격\n - first_name : 사용자 이름\n - last_name : 사용자 성 \n - email : 사용자 이메일\n - address : 사용자의 배송지 주소\n - postal_code : 배송지 우편번호 \n - is_saving_address : 배송지 주소를 저장할 지에 대한 True/False 값\n ## 요청 형식\n - 'application/json'\n ## 응답 내용\n - new_order : 새롭게 등록된 구매 내역\n - delivery_info : 사용자의 배송지 정보 \n '''\n serializer = NewOrderSerializer(data=request.data)\n\n if serializer.is_valid():\n user = request.user\n new_order = OrderDetail.objects.create(\n user_id=user,\n total_price=serializer.data['total_price'],\n first_name=serializer.validated_data['first_name'],\n last_name=serializer.validated_data['last_name'],\n email=serializer.validated_data['email'],\n address=serializer.validated_data['address'],\n postal_code=serializer.validated_data['postal_code']\n )\n for asin in serializer.validated_data['items']:\n item = get_object_or_404(Item, pk=asin)\n new_order.items.add(item)\n new_order.save()\n\n if serializer.validated_data['is_saving_address'] == True:\n user.first_name = serializer.validated_data['first_name']\n user.last_name = serializer.validated_data['last_name']\n user.email = serializer.validated_data['email']\n user.address = serializer.validated_data['address']\n user.postal_code = serializer.validated_data['postal_code']\n user.save()\n\n new_order_response = OrderDetailSerializer(new_order)\n delivery_info_response = DeliveryInfoSerializer(user)\n data = {}\n data['new_order'] = new_order_response.data\n data['delivery_info'] = delivery_info_response.data\n return Response(data, status=SuccessResponse.item_added.STATUS_CODE)\n \n return Response(serializer.errors, status=ErrorResponse.data_not_valid.STATUS_CODE)","sub_path":"backend/mypage/views/order_views.py","file_name":"order_views.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"331853176","text":"'''\nCreated on Oct 14, 2015 @author: vkholin\nTHIS FILES CONTAINS:\n1) needs comments\n'''\nimport time\n\nfrom allElements import Locators_Class\nfrom commonMethods import Common_Methods_Class\nimport commonMethods\n\nclass Lab_Class(Common_Methods_Class):\n\n def verifyBasicElements(self):\n commonMethods.f.write(\"\\n____verifyBasicElements\")\n data = Lab_Class()\n com = Common_Methods_Class()\n locat = Locators_Class();\n \n# com.verifyElementPresent(driver, locat.lab_elements()[\"status_up_el\"]) \n com.compare(locat.lab_elements()[\"status_up_el\"], data.data_lab()[\"statusUp\"])\n# com.verifyElementPresent(driver, locat.lab_elements()[\"remoteDesk_wndw_hdr\"])\n com.compareTextElementFromDropdown(locat.lab_elements()[\"remoteDesk_viewer\"], data.data_lab()[\"viewerRT\"], )\n com.log('^^^DONE^^^ verifyBasicElements')\n \n def remoteDesktop(self):\n commonMethods.f.write(\"\\n____remoteDesktop\")\n data = Lab_Class()\n com = Common_Methods_Class()\n locat = Locators_Class();\n \n com.clickElement(locat.lab_elements()[\"remoteDesk_wndw_img\"])\n time.sleep(5)\n# com.verifyElementPresent(driver, locat.lab_elements()[\"remoteDesk_canvas\"])\n com.log('^^^DONE^^^ remoteDesktop')\n\n\n def data_lab(self):\n return {\"statusUp\": \"Up\",\n \"viewerRT\": \"ReadyTech Viewer\",\n }\n\n\n\n\n\n\n\n\n\n\n","sub_path":"RT_Projects/ILP_PY/MAT_acceptance_test/page_lab.py","file_name":"page_lab.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406205818","text":"from flask import Flask, request, jsonify,render_template\nimport dtt_test\nimport diabetes_db\napp = Flask(__name__)\n\n\n@app.route('/get_predict_diabetes', methods=['GET','POST'])\ndef get_predict_diabetes():\n if request.method == 'POST':\n Preg = int(request.form['Pregnancies'])\n gluco = int(request.form['Glucose'])\n BP = int(request.form['BloodPressure'])\n ST = int(request.form['SkinThickness'])\n Ins = int(request.form['Insulin'])\n bmi = float(request.form['BMI'])\n diabetespred = float(request.form['DiabetesPedigreeFunction'])\n age = int(request.form['Age'])\n \n prediction = dtt_test.decision_tree().predict_diabeties(Preg,gluco,BP,ST,Ins,bmi,diabetespred,age)\n diabetes_db.save_diabetes_details(Preg,gluco,BP,ST,Ins,bmi,diabetespred,age)\n return \"The Predicted diabetes is {} \".format(prediction) \n\n\nif __name__ == \"__main__\":\n print(\"Starting Python Flask Server For Diabetes Prediction...\")\n app.run(debug = False)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"402225330","text":"import pandas as pd\nimport os\nimport glob\nimport csv\n\ndef extract_columns_to_csv(data, columns=[0, 24, 34], name=\"extract.csv\", mode='a'):\n data.to_csv(name, columns=columns, mode=mode)\n\ndef create_single_csv_from_pdtb(path=\"pdtb_sentences.csv\"):\n \"\"\"Creates a single .csv containing all pair sentences in the pdtb database\n\n Args:\n path (str, optional): Path to the file being created. Defaults to \"pdtb_sentences.csv\".\n \"\"\"\n file_list = glob.glob(\"pdtb/*/*.pipe\")\n for f_path in file_list:\n # read as csv\n data = pd.read_csv(\n f_path,\n sep='|',\n header=None,\n dtype='string',\n error_bad_lines=False,\n engine='python'\n )\n # extract colmns to csv\n extract_columns_to_csv(data, name=path, mode='a')\n\ndef split_single_csv_into_relation_type_files(path):\n data_dict = {}\n with open(path, 'r') as f:\n csv_file = csv.reader(f, delimiter=',')\n for row in csv_file:\n if row != ['', '0', '24', '34']:\n # row[1], row[2], row[2]\n if row[1] not in data_dict.keys():\n data_dict[row[1]] = []\n else:\n data_dict[row[1]].append(row[2:])\n\n for section in data_dict.keys():\n path_to_file = \"pdtb_split/pdtb\" + str(section) + \".pipe.csv\"\n with open(path_to_file, 'w') as f:\n csv_file = csv.writer(f, delimiter='|')\n for row in data_dict[section]:\n csv_file.writerow(row)\n return data_dict\n","sub_path":"pdtb_sentence/pdtb_preprocess.py","file_name":"pdtb_preprocess.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396786974","text":"import niGraphParser\nimport niGraph\n\nimport sys\nimport glob\nimport os\nimport random\nimport time\nimport re\nfrom matplotlib import pyplot\nimport networkx as nx\nimport networkx.algorithms.shortest_paths.dense as dense\nimport xml.etree.ElementTree as ET\nimport networkx.algorithms.simple_paths as nxPaths\n\ngraphsDir = \"..\\DataSets\"\n\n\n\ndef visit(node, sortedNodes, temp, perm, unmarked):\n if node in temp:\n raise Exception\n if node in unmarked:\n unmarked.remove(node)\n temp.add(node)\n for outEdge in (e for e in node.outEdges if not e.isFeedback):\n nextNode = outEdge.target\n visit(nextNode, sortedNodes, temp, perm, unmarked)\n temp.remove(node)\n perm.add(node)\n# sortedNodes.insert(0, node)\n sortedNodes.append(node)\n\n\ndef topologicalSort(graph):\n sortedNodes = []\n tempMarks = set()\n permMarks = set()\n unmarkedVertices = set(graph.getVertices())\n\n while len(unmarkedVertices):\n node = next(iter(unmarkedVertices))\n visit(node, sortedNodes, tempMarks, permMarks, unmarkedVertices)\n\n return sortedNodes\n\ndef longestPathFromTopolSort(sortedNodes):\n pathLength = {}\n for node in sortedNodes:\n pathLength[node] = 0\n\n for node in sortedNodes:\n for inEdge in (e for e in node.inEdges if not e.isFeedback):\n prevNode = inEdge.source\n\n pathLength[prevNode] = max(pathLength[prevNode], pathLength[node] + 1)\n\n return max(pathLength.values())\n\ndef createDOT(graph, filePath):\n fp = open(filePath, \"w\")\n fp.write(\"strict digraph {\\n\");\n for edge in graph.getEdges():\n fp.write(\"n\" + str(edge.source.vertexId) + \" -> n\" + str(edge.target.vertexId) + \"\\n\")\n fp.write(\"}\\n\")\n\ndef calculateDAGPolarPaths(graph):\n sortedVertices = topologicalSort(graph)\n\n possiblePaths = {};\n totalPaths = 0;\n for node in sortedVertices:\n #print (node.vertexId)\n if len(node.outEdges) == 0:\n #print (\"terminal\")\n possiblePaths[node] = 1;\n else:\n #print(\"non-terminal\");\n sum = 0;\n for edge in node.outEdges:\n if not edge.isFeedback:\n sum = sum + possiblePaths[edge.target]\n possiblePaths[node] = sum;\n if len(node.inEdges) == 0:\n totalPaths = totalPaths + sum;\n return totalPaths\n\ndef main():\n sys.setrecursionlimit(10000)\n# return\n\n graphs = glob.glob(os.path.join(graphsDir, \"*.graphml\"))\n\n graphs.sort(key = lambda x: int(re.match(\".*DelayGraph_(\\d+)\\.graphml\", x).group(1)))\n graphs = graphs[23:24]\n\n graph = niGraphParser.parseGraphMlFile(graphs[0])\n createDOT(graph, \"graph0.dot\")\n\n sizes = []\n times = []\n\n fp = open(\"522r_asst1_results_paths_count.csv\",\"w\")\n\n for graphPath in graphs:\n graph = niGraphParser.parseGraphMlFile(graphPath)\n\n pathMatch = re.match(\"(.*)DelayGraph(_\\d+)\\.graphml\", graphPath)\n\n originalTargetPath = pathMatch.group(1) + \"OriginalGoals\" + \\\n pathMatch.group(2) + \".xml\"\n\n print(originalTargetPath)\n\n tree = ET.parse(originalTargetPath)\n root = tree.getroot()\n\n targetPeriod = int(root.find('TargetClockPeriodInPicoSeconds').text)\n\n startTime = time.time()\n\n D=nx.DiGraph()\n for edge in graph.getEdges():\n D.add_edge(edge.source.vertexId,edge.target.vertexId, weight=-edge.delay)\n\n #dist = defaultdict(lambda : defaultdict(lambda: float('inf')))\n D_floyd = dense.floyd_warshall(D)\n\n W=nx.DiGraph()\n for edge in graph.getEdges():\n weightValue = 1 if edge.source.isRegistered else 0\n W.add_edge(edge.source.vertexId,edge.target.vertexId, weight=weightValue)\n\n W_floyd = dense.floyd_warshall(W)\n\n pairExceeding = 0\n pairs = []\n for i,v1 in D_floyd.items():\n for j,v2 in v1.items():\n if v2 < - targetPeriod:\n startReg = 1 if graph.vertices[\"n\" + str(i)].isRegistered else 0\n if W_floyd[i][j] - startReg < 1:\n pairExceeding = pairExceeding + 1\n pairs.append((i,j))\n #for dist in floyd.values():\n # print (dist)\n #if dist < - targetPeriod:\n # pairExceeding = pairExceeding + 1\n\n print(\"pairs over: \" + str(pairExceeding))\n\n #allPossiblePaths = 0;\n #for i,j in pairs:\n # allPossiblePaths = allPossiblePaths + \\\n # len(list(nxPaths.all_simple_paths(W,i,j)))\n\n #print(\"possible paths over: \" + str(allPossiblePaths))\n #print (floyd)\n\n\n\n #totalPaths = len(list(nx.simple_cycles(D))) # calculateDAGPolarPaths(graph)\n totalPaths = pairExceeding\n durationTime = time.time() - startTime\n\n sizes.append(len(graph.getVertices()) + len(graph.getEdges()))\n times.append(durationTime)\n\n #longestPath = longestPathFromTopolSort(sortedVertices)\n\n fp.write(graphPath + \",\")\n fp.write(str(len(graph.vertices))+\",\")\n fp.write(str(len(graph.edges))+\",\")\n fp.write(str(len(graph.getVertices()) + len(graph.getEdges())) + \",\")\n fp.write(str(durationTime) + \",\")\n fp.write(str(totalPaths))\n fp.write(\"\\n\")\n print (graphPath, len(graph.vertices), len(graph.edges), totalPaths)\n #break\n fp.close()\n# print(\"Topological sort took \" + str(durationTime) + \" seconds\")\n\n# pyplot.loglog(sizes, times, \"o\")\n# pyplot.xscale(\"log\")\n# pyplot.yscale(\"log\")\n# pyplot.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"py/522r_asst1.py","file_name":"522r_asst1.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399094349","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n :copyright: (C) 2010-2013 by Contrail Consortium.\n\"\"\"\n\nfrom os.path import exists, join\nfrom os import remove\nfrom threading import Lock\nimport pickle\n\n#from conpaas.core.misc import get_ip_address\nfrom conpaas.core.agent import BaseAgent, AgentException\nfrom conpaas.services.mysql.agent import role\n\nfrom conpaas.core.https.server import HttpErrorResponse, HttpJsonResponse, \\\n FileUploadField\nfrom conpaas.core.expose import expose\n\n\nclass MySQLAgent(BaseAgent):\n\n def __init__(self, config_parser):\n BaseAgent.__init__(self, config_parser)\n self.config_parser = config_parser\n\n self.my_ip = config_parser.get('agent', 'MY_IP')\n self.VAR_TMP = config_parser.get('agent', 'VAR_TMP')\n self.VAR_CACHE = config_parser.get('agent', 'VAR_CACHE')\n self.VAR_RUN = config_parser.get('agent', 'VAR_RUN')\n\n self.master_file = join(self.VAR_TMP, 'master.pickle')\n self.slave_file = join(self.VAR_TMP, 'slave.pickle')\n\n self.master_lock = Lock()\n self.slave_lock = Lock()\n\n def _get(self, get_params, class_file, pClass):\n if not exists(class_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_NOT_EXIST).message)\n try:\n fd = open(class_file, 'r')\n p = pickle.load(fd)\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_READ_FAILED,\n pClass.__name__, class_file, detail=e)\n self.logger.exception(ex.message)\n return HttpErrorResponse(ex.message)\n else:\n return HttpJsonResponse({'return': p.status()})\n\n def _create(self, post_params, class_file, pClass):\n if exists(class_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_EXISTS).message)\n try:\n if type(post_params) != dict:\n raise TypeError()\n self.logger.debug('Creating class')\n p = pClass(**post_params)\n self.logger.debug('Created class')\n except (ValueError, TypeError) as e:\n ex = AgentException(AgentException.E_ARGS_INVALID, detail=str(e))\n self.logger.exception(e)\n return HttpErrorResponse(ex.message)\n except Exception as e:\n ex = AgentException(AgentException.E_UNKNOWN, detail=e)\n self.logger.exception(e)\n return HttpErrorResponse(ex.message)\n else:\n try:\n self.logger.debug('Opening file %s' % class_file)\n fd = open(class_file, 'w')\n pickle.dump(p, fd)\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_COMMIT_FAILED,\n detail=e)\n self.logger.exception(ex.message)\n return HttpErrorResponse(ex.message)\n else:\n self.logger.debug('Created class file')\n return HttpJsonResponse()\n\n def _stop(self, get_params, class_file, pClass):\n if not exists(class_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_NOT_EXIST).message)\n try:\n try:\n fd = open(class_file, 'r')\n p = pickle.load(fd)\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_READ_FAILED,\n detail=e)\n self.logger.exception(ex.message)\n return HttpErrorResponse(ex.message)\n p.stop()\n remove(class_file)\n return HttpJsonResponse()\n except Exception as e:\n ex = AgentException(AgentException.E_UNKNOWN, detail=e)\n self.logger.exception(e)\n return HttpErrorResponse(ex.message)\n\n ###########################################################################\n # methods executed on a MySQL Master #\n ###########################################################################\n def _master_get_params(self, kwargs):\n ret = {}\n if 'master_server_id' not in kwargs:\n raise AgentException(AgentException.E_ARGS_MISSING,\n 'master_server_id')\n ret['master_server_id'] = kwargs.pop('master_server_id')\n if len(kwargs) != 0:\n raise AgentException(AgentException.E_ARGS_UNEXPECTED,\n kwargs.keys())\n ret['config'] = self.config_parser\n return ret\n\n def _slave_get_params(self, kwargs):\n ret = {}\n if 'slaves' not in kwargs:\n raise AgentException(AgentException.E_ARGS_MISSING, 'slaves')\n ret = kwargs.pop('slaves')\n\n if len(kwargs) != 0:\n raise AgentException(AgentException.E_ARGS_UNEXPECTED,\n kwargs.keys())\n\n return ret\n\n # TODO: clean code\n def _take_snapshot(self):\n if not exists(self.master_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_NOT_EXIST).message)\n try:\n fd = open(self.master_file, 'r')\n p = pickle.load(fd)\n ret = p.take_snapshot()\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_READ_FAILED,\n role.MySQLMaster.__name__,\n self.master_file,\n detail=e)\n self.logger.exception(ex.message)\n raise\n else:\n return ret\n\n def _set_password(self, username, password):\n if not exists(self.master_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_NOT_EXIST).message)\n try:\n fd = open(self.master_file, 'r')\n p = pickle.load(fd)\n p.set_password(username, password)\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_READ_FAILED,\n role.MySQLMaster.__name__,\n self.master_file,\n detail=e)\n self.logger.exception(ex.message)\n raise\n\n def _register_slave(self, slave_ip):\n if not exists(self.master_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_NOT_EXIST).message)\n try:\n fd = open(self.master_file, 'r')\n p = pickle.load(fd)\n p.register_slave(slave_ip)\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_READ_FAILED,\n role.MySQLMaster.__name__,\n self.master_file,\n detail=e)\n self.logger.exception(ex.message)\n raise\n\n def _load_dump(self, f):\n if not exists(self.master_file):\n return HttpErrorResponse(AgentException(\n AgentException.E_CONFIG_NOT_EXIST).message)\n try:\n fd = open(self.master_file, 'r')\n p = pickle.load(fd)\n p.load_dump(f)\n fd.close()\n except Exception as e:\n ex = AgentException(AgentException.E_CONFIG_READ_FAILED,\n role.MySQLMaster.__name__,\n self.master_file,\n detail=e)\n self.logger.exception(ex.message)\n raise\n\n @expose('POST')\n def create_master(self, kwargs):\n \"\"\"Create a replication master\"\"\"\n self.logger.debug('Creating master')\n try:\n kwargs = self._master_get_params(kwargs)\n self.logger.debug('master server id = %s'\n % kwargs['master_server_id'])\n except AgentException as e:\n return HttpErrorResponse(e.message)\n else:\n with self.master_lock:\n return self._create(kwargs, self.master_file, role.MySQLMaster)\n\n @expose('POST')\n def set_password(self, kwargs):\n \"\"\"Create a replication master\"\"\"\n self.logger.debug('Updating password')\n try:\n if 'username' not in kwargs:\n raise AgentException(AgentException.E_ARGS_MISSING, 'username')\n username = kwargs.pop('username')\n if 'password' not in kwargs:\n raise AgentException(AgentException.E_ARGS_MISSING, 'password')\n password = kwargs.pop('password')\n if len(kwargs) != 0:\n raise AgentException(AgentException.E_ARGS_UNEXPECTED,\n kwargs.keys())\n self._set_password(username, password)\n return HttpJsonResponse()\n except AgentException as e:\n return HttpErrorResponse(e.message)\n\n @expose('UPLOAD')\n def load_dump(self, kwargs):\n self.logger.debug('Uploading mysql dump ')\n self.logger.debug(kwargs)\n #TODO: archive the dump?\n if 'mysqldump_file' not in kwargs:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_MISSING, 'mysqldump_file').message)\n file = kwargs.pop('mysqldump_file')\n if not isinstance(file, FileUploadField):\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_INVALID,\n detail='\"mysqldump_file\" should be a file').message)\n try:\n self._load_dump(file.file)\n except AgentException as e:\n return HttpErrorResponse(e.message)\n else:\n return HttpJsonResponse()\n\n @expose('POST')\n def create_slave(self, kwargs):\n '''\n Creates a slave. Steps:\n 1. do a mysqldump and record position\n 2. send the dump to the slave agent and let it\n start the mysql slave\n '''\n self.logger.debug('master in create_slave ')\n ret = self._take_snapshot()\n\n # TODO: why multiple keys?\n for position in ret.keys():\n master_log_file = ret[position]['binfile']\n master_log_pos = ret[position]['position']\n mysqldump_path = ret[position]['mysqldump_path']\n try:\n kwargs = self._slave_get_params(kwargs)\n for key in kwargs:\n # TODO: Why do I receive the slave_ip in unicode??\n slave = kwargs[key]\n self._register_slave(str(slave['ip']))\n from conpaas.services.mysql.agent import client\n client.setup_slave(str(slave['ip']),\n slave['port'],\n key,\n self.my_ip,\n master_log_file,\n master_log_pos,\n mysqldump_path)\n self.logger.debug('Created slave %s' % str(slave['ip']))\n return HttpJsonResponse()\n except AgentException as e:\n return HttpErrorResponse(e.message)\n\n ###########################################################################\n # methods executed on a MySQL Slave #\n ###########################################################################\n def _slave_get_setup_params(self, kwargs):\n ret = {}\n if 'mysqldump_file' not in kwargs:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_MISSING, 'mysqldump_file').message)\n file = kwargs.pop('mysqldump_file')\n if not isinstance(file, FileUploadField):\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_INVALID,\n detail='\"mysqldump_file\" should be a file').message)\n ret['mysqldump_file'] = file.file\n\n if 'master_host' not in kwargs:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_MISSING, 'master_host').message)\n ret['master_host'] = kwargs.pop('master_host')\n\n if 'master_log_file' not in kwargs:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_MISSING, 'master_log_file').message)\n ret['master_log_file'] = kwargs.pop('master_log_file')\n\n if 'master_log_pos' not in kwargs:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_MISSING, 'master_log_pos').message)\n ret['master_log_pos'] = kwargs.pop('master_log_pos')\n\n if 'slave_server_id' not in kwargs:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_MISSING, 'slave_server_id').message)\n ret['slave_server_id'] = kwargs.pop('slave_server_id')\n\n if len(kwargs) != 0:\n return HttpErrorResponse(AgentException(\n AgentException.E_ARGS_UNEXPECTED, kwargs.keys()).message)\n\n ret['config'] = self.config_parser\n return ret\n\n @expose('UPLOAD')\n def setup_slave(self, kwargs):\n \"\"\"Create a replication Slave\"\"\"\n self.logger.debug('slave in setup_slave ')\n self.logger.debug(kwargs)\n #TODO: archive the dump?\n try:\n kwargs = self._slave_get_setup_params(kwargs)\n except AgentException as e:\n return HttpErrorResponse(e.message)\n else:\n with self.slave_lock:\n return self._create(kwargs, self.slave_file, role.MySQLSlave)\n\n # TODO: Update slave - if manager changes!\n","sub_path":"conpaas-services/src/conpaas/services/mysql/agent/internals.py","file_name":"internals.py","file_ext":"py","file_size_in_byte":13889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"610137537","text":"\n\n#calss header\nclass _NIBS():\n\tdef __init__(self,): \n\t\tself.name = \"NIBS\"\n\t\tself.definitions = [u'a man who is in a position of authority or who thinks he is more important than he really is: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_nibs.py","file_name":"_nibs.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312162233","text":"from django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\n\nfrom tango_shared.models import BaseUserContentModel\n\nfrom .managers import CommentManager\n\n\nCOMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)\n\n\nclass Comment(BaseUserContentModel):\n \"\"\"\n A user comment about some object.\n\n Unlike django.contrib.comments, we're not allowing unauthenticated users to comment\n We've learned better. So, no user_email field, etc.\n \"\"\"\n\n # Metadata about the comment\n ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True)\n is_public = models.BooleanField(\n _('is public'),\n default=True,\n help_text=_('Uncheck this box to make the comment effectively disappear from the site.')\n )\n is_removed = models.BooleanField(\n _('is removed'),\n default=False,\n help_text=_(\"\"\"Check this box if the comment is inappropriate.\n A \"This comment has been removed\" message will be displayed instead.\"\"\")\n )\n site = models.ForeignKey(\n Site,\n on_delete=models.PROTECT,\n related_name='comment_site'\n )\n content_type = models.ForeignKey(\n ContentType,\n on_delete=models.PROTECT,\n verbose_name=_('content type'),\n related_name=\"contenttype_set_for_%(class)s\"\n )\n object_pk = models.TextField(_('object ID'))\n content_object = GenericForeignKey(ct_field=\"content_type\", fk_field=\"object_pk\")\n\n objects = CommentManager()\n\n class Meta:\n app_label = 'tango_comments'\n db_table = 'django_comments'\n ordering = ('post_date',)\n permissions = [(\"can_moderate\", \"Can moderate comments\")]\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __str__(self):\n return \"%s\" % (self.content_object)\n\n def get_content_object_url(self):\n \"\"\"\n Get a URL suitable for redirecting to the content object.\n \"\"\"\n return reverse(\n \"comments-url-redirect\",\n args=(self.content_type_id, self.object_pk)\n )\n\n def get_absolute_url(self, anchor_pattern=\"#c%(id)s\"):\n return self.get_content_object_url() + (anchor_pattern % self.__dict__)\n\n def get_as_text(self):\n \"\"\"\n Return this comment as plain text. Useful for emails.\n \"\"\"\n d = {\n 'user': self.user or self.name,\n 'date': self.post_date,\n 'comment': self.text,\n 'domain': self.site.domain,\n 'url': self.get_absolute_url()\n }\n return _('Posted by %(user)s at %(date)s\\n\\n%(comment)s\\n\\nhttp://%(domain)s%(url)s') % d\n\n\nclass CommentFlag(models.Model):\n \"\"\"\n Records a flag on a comment. This is intentionally flexible; right now, a\n flag could be:\n\n * A \"removal suggestion\" -- where a user suggests a comment for (potential) removal.\n\n * A \"moderator deletion\" -- used when a moderator deletes a comment.\n\n You can (ab)use this model to add other flags, if needed. However, by\n design users are only allowed to flag a comment with a given flag once;\n if you want rating look elsewhere.\n \"\"\"\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.PROTECT,\n verbose_name=_('user'),\n related_name=\"comment_flag\"\n )\n comment = models.ForeignKey(\n Comment,\n on_delete=models.CASCADE,\n verbose_name=_('comment'),\n related_name=\"flags\"\n )\n flag = models.CharField(_('flag'), max_length=30, db_index=True)\n flag_date = models.DateTimeField(_('date'), default=None)\n\n # Constants for flag types\n SUGGEST_REMOVAL = \"removal suggestion\"\n MODERATOR_DELETION = \"moderator deletion\"\n MODERATOR_APPROVAL = \"moderator approval\"\n\n class Meta:\n app_label = 'tango_comments'\n db_table = 'django_comment_flags'\n unique_together = [('user', 'comment', 'flag')]\n verbose_name = _('comment flag')\n verbose_name_plural = _('comment flags')\n\n def __str__(self):\n return \"%s flag of comment ID %s by %s\" % \\\n (self.flag, self.comment_id, self.user.get_username())\n\n def save(self, *args, **kwargs):\n if self.flag_date is None:\n self.flag_date = timezone.now()\n super(CommentFlag, self).save(*args, **kwargs)\n","sub_path":"build/lib/tango_comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125531992","text":"import feast\nimport pandas as pd\nfrom sklearn import tree\nfrom sklearn.preprocessing import OrdinalEncoder\n\n\nclass CreditScoringModel:\n categorical_features = [\n \"person_home_ownership\",\n \"loan_intent\",\n \"city\",\n \"state\",\n \"location_type\",\n ]\n\n zipcode_features = [\n \"zipcode_features:city\",\n \"zipcode_features:state\",\n \"zipcode_features:location_type\",\n \"zipcode_features:tax_returns_filed\",\n \"zipcode_features:population\",\n \"zipcode_features:total_wages\",\n ]\n\n target = \"loan_status\"\n\n def __init__(self):\n # Load model\n self.classifier = tree.DecisionTreeClassifier()\n\n # Load ordinal encoder\n self.encoder = OrdinalEncoder()\n\n # Set up feature store\n self.fs = feast.FeatureStore(repo_path=\"feature_repo\")\n\n def train(self, loans):\n training_df = self._get_historical_zipcode_features(loans)\n\n self._fit_ordinal_encoder(training_df)\n self._apply_ordinal_encoding(training_df)\n\n train_X = training_df[\n training_df.columns.drop(self.target)\n .drop(\"event_timestamp\")\n .drop(\"created_timestamp\")\n .drop(\"loan_id\")\n ]\n train_X = train_X.reindex(sorted(train_X.columns), axis=1)\n train_Y = training_df.loc[:, self.target]\n self.classifier.fit(train_X[sorted(train_X)], train_Y)\n\n def _get_historical_zipcode_features(self, loans):\n return self.fs.get_historical_features(\n entity_df=loans, features=self.zipcode_features\n ).to_df()\n\n def _fit_ordinal_encoder(self, requests):\n self.encoder.fit(requests[self.categorical_features])\n\n def _apply_ordinal_encoding(self, requests):\n requests[self.categorical_features] = self.encoder.transform(\n requests[self.categorical_features]\n )\n\n def predict(self, request):\n # Get Zipcode features from Feast\n zipcode_features = self._get_online_zipcode_features(request)\n\n # Join features to request features\n features = request.copy()\n features.update(zipcode_features)\n features_df = pd.DataFrame.from_dict(features)\n\n # Apply ordinal encoding to categorical features\n self._apply_ordinal_encoding(features_df)\n\n # Sort columns\n features_df = features_df.reindex(sorted(features_df.columns), axis=1)\n\n # Make prediction\n features_df[\"prediction\"] = self.classifier.predict(features_df)\n\n # return result of credit scoring\n return features_df[\"prediction\"].iloc[0]\n\n def _get_online_zipcode_features(self, request):\n zipcode = request[\"zipcode\"][0]\n\n return self.fs.get_online_features(\n entity_rows=[{\"zipcode\": zipcode}], features=self.zipcode_features,\n ).to_dict()\n","sub_path":"credit_model.py","file_name":"credit_model.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501485360","text":"# Zahl erraten (ganze Zahl zwischen 0 und 9)\r\n# ------------------------------------------\r\n# Zuerst: Import der Funktion randint() aus dem Modul \"random\":\r\nfrom random import randint\r\n\r\n# randint(a, b) erzeugt eine ganze Zufallszahl: a <= Zufallszahl <= b\r\nr = randint(0, 9)\r\n\r\n# Um das Programm zu testen lassen wir r ausgeben. \r\n# (Normalfall: \"auskommentiert\")\r\n# print('Zufallszahl:', r)\r\n\r\n# Die Anzahl Versuche werden mit dem \"Schleifenzaehler\" i gezaehlt\r\ni = 1 \r\n\r\nprint(\"Errate eine Zahl z, die vom Programm zufaellig erzeugt wurde.\")\r\nprint(\"z ist eine ganze Zahl zwischen 0 und 9.\")\r\nprint()\r\nz = input(\"Gib deine Vermutung ein: z = \")\r\nz = eval(z)\r\n\r\nwhile z != r:\r\n if z > r:\r\n print(\"Die eingegebene Zahl ist zu gross!\")\r\n else:\r\n print(\"Die eingegebene Zahl ist zu klein!\")\r\n z = input(\"Neuer Versuch: z = \")\r\n z = eval(z)\r\n i = i + 1 # Der Schleifenzaehler wird um 1 erhoeht\r\nprint(\"Erraten! Anzahl Versuche:\", i)\r\n","sub_path":"Quellen/python/python - peter krattenmacher/BSPzuAB_PY/BSP_zahlenraten.py","file_name":"BSP_zahlenraten.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229545949","text":"\"\"\"\r\nWritten by Bryant Yang\r\nVersion 1.0\r\nLast updated: August 22nd 2018\r\n\r\nVersion History:\r\n v1.01 - Changed table write locations due to database move and table names\r\n v1.0 - Created code and documented all code details\r\n\"\"\"\r\n\r\nfrom github import Github\r\nimport requests\r\nfrom datetime import datetime\r\nimport time\r\nimport urllib3\r\nimport sqlalchemy\r\nimport pymssql\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom addToSQL import issue_info, milestone_details,repo_info, issue_assignees,user_info, \\\r\n epic_repos,epic_repo_children_issues, issue_pipelines, issue_labels\r\n\r\n#Disables Warnings\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\n\r\n######Start of timing for the program\r\nstart_time = time.time()\r\nprint(datetime.now())\r\nsetTime = start_time\r\n\r\n#####SQL server connection configuration\r\nserver_properties= {\r\n 'server': 'SE116960.maple.fg.rbc.com',\r\n 'port': '1433',\r\n}\r\n\r\nconn = pymssql.connect(\r\n server=server_properties['server'],\r\n port=server_properties['port'],\r\n autocommit=True\r\n)\r\n\r\nengine = sqlalchemy.create_engine(\"mssql+pymssql://@SE116960.maple.fg.rbc.com/src_API_dev\")\r\n\r\n######Github Configuration setup\r\ngithubToken = \"ebe648f7046e2200cb75709a29c4397aae735e4b\" #This may need to be changed for specific users\r\ng = Github(base_url=\"https://rbcgithub.fg.rbc.com/api/v3\", login_or_token=githubToken, verify=False)\r\norg = g.get_organization(\"SBZ0\") #This is JUST our organization group in the RBC github\r\n\r\n#####Zenhub configuration setup\r\nzenhub_token = \"4915920923186adb62e91773cc43201963240d70e011eed71a28180be30abb33aacf6e5ff4097aae\" #This may need to be changed for specific users\r\nzenhub_headers = {\"X-Authentication_Token\":\"%s\" %zenhub_token}\r\nzenhub_endpoint = \"https://zenhubwm.fg.rbc.com/\"\r\n\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\nprint(\"Initialization Time:\", time.time()-start_time) #Estimated Initialization Time: 2s\r\nstr_today = datetime.today().strftime('%Y-%m-%d')\r\n\r\n#####################\r\n# DEFINED FUNCTIONS #\r\n#####################\r\n\r\n##### Tables written to by add_more_issue_info()\r\n# Git_Issue_Info ~1616 rows expected to be added\r\n# Git_Repo_Info ~104 rows expected to be added\r\n# Git_Zen_Issue_Pipelines ~1618 rows expected to be added\r\n# Git_Issue_Assignees ~1057 rows expected to be added\r\n# Git_Issue_Labels ~2618 rows expected to be added\r\ndef add_more_issue_info(): #20 secs to run, 40 secs to commit to SQL\r\n repo_ids = [] #Need the repo IDs for other queries in zenhub\r\n for y in org.get_repos():\r\n repo_ids.append(y.id)\r\n for x in y.get_issues(state = \"all\"):\r\n milestoneId = x.milestone.id if x.milestone != None else None\r\n add_item = issue_info(\r\n issue_no = x.number,\r\n repo_id = y.id,\r\n issue_id = x.id,\r\n issue_name = x.title,\r\n issue_url = x.url,\r\n issue_creator_id = x.user.id,\r\n issue_last_update = x.updated_at,\r\n issue_close_date = x.closed_at,\r\n issue_create_date = x.created_at,\r\n milestone_id = milestoneId,\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to issue_info\r\n# print(add_item) #Optional choice to print out details of row added to issue_info\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplictate entry already exists\r\n session.rollback()\r\n continue\r\n finally:\r\n response = requests.get(\"https://zenhubwm.fg.rbc.com/p1/repositories/%d/issues/%d?access_token=%s\"\r\n %(y.id, x.number, zenhub_token),headers=zenhub_headers, verify=False)\r\n dat = response.json()\r\n add_item = issue_pipelines(\r\n issue_id = x.id,\r\n pipeline = dat.get(\"pipeline\").get(\"name\") if dat.get(\"pipeline\") != None else None,\r\n estimate = dat.get(\"estimate\").get(\"value\") if dat.get(\"estimate\") != None else None,\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to issue_pipelines\r\n# print(add_item) #Optional choice to print out details of row added to issue_pipelines\r\n session.commit() #Commits add to the SQL server\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplictate entry already exists\r\n session.rollback()\r\n finally:\r\n for i in x.get_labels():\r\n add_item = issue_labels(\r\n\r\n label_name = i.name,\r\n label_color = i.color,\r\n issue_id = x.id,\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to issue_labels\r\n# print(add_item) #Optional choice to print out details of row added to issue_labels\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplictate entry already exists\r\n session.rollback()\r\n continue\r\n\r\n for i in x.assignees:\r\n add_item = issue_assignees(\r\n issue_id = x.id,\r\n user_ids = i.id,\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to issue_assignees\r\n# print(add_item) #Optional choice to print out details of row added to issue_assignees\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplictate entry already exists\r\n session.rollback()\r\n continue\r\n\r\n add_item = repo_info(\r\n repo_id = y.id,\r\n repo_name = y.name,\r\n repo_org = y.organization.name,\r\n repo_last_updated = y.updated_at,\r\n repo_create_date = y.created_at,\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to repo_info\r\n# print(add_item) #Optional choice to print out details of row added to repo_info\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplictate entry already exists\r\n session.rollback()\r\n continue\r\n return repo_ids\r\n\r\n##### Tables written to by milestone_details_add()\r\n# Git_Milestone_Details ~66 rows expected to be added\r\ndef milestone_details_add():\r\n for y in org.get_repos():\r\n for x in y.get_milestones():\r\n add_item = milestone_details(\r\n milestone_id = x.id,\r\n milestone_name = x.title,\r\n milestone_create_date = x.created_at,\r\n milestone_due_date = x.due_on,\r\n snap_date = str_today\r\n )\r\n\r\n try:\r\n session.add(add_item) #Adds a new row entry to milestone_details\r\n# print(add_item) #Optional choice to print out the detials of row added to milestone_details\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplicate entry already exists\r\n session.rollback()\r\n continue\r\n\r\n##### Tables written to by user_information_add()\r\n# Git_User_Info ~35 rows expected to be added\r\ndef user_information_add():\r\n for x in org.get_members():\r\n add_item = user_info(\r\n user_ids = x.id,\r\n user_names = x.name,\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to user_info\r\n# print(add_item) #Optional choice to print out the detials of row added to user_info\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplicate entry already exists\r\n session.rollback()\r\n continue\r\n\r\n##### Tables written to by epics_add(repo_ids)\r\n# Git_Zen_Epic_Repos ~110 rows expected to be added\r\n# Git_Zen_Epic_Parent_Child_Issues ~325 rows expected to be added\r\ndef epics_add(repo_ids):\r\n for i in repo_ids:\r\n response = requests.get(\"https://zenhubwm.fg.rbc.com/p1/repositories/%d/epics?access_token=%s\"\r\n %(i, zenhub_token),headers=zenhub_headers, verify=False)\r\n\r\n dat = response.json()\r\n if dat.get(\"epic_issues\") is not None:\r\n for x in dat.get(\"epic_issues\"):\r\n add_item = epic_repos(\r\n repo_id = x.get(\"repo_id\"),\r\n issue_no = x.get(\"issue_number\"),\r\n snap_date = str_today\r\n )\r\n\r\n try:\r\n session.add(add_item) #Adds a new row entry to epic_repos\r\n # print(add_item) #Optional choice to print out the details of row added to epic_repos\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplicate entry already exists\r\n session.rollback()\r\n continue\r\n finally:\r\n response = requests.get(\"https://zenhubwm.fg.rbc.com/p1/repositories/%d/epics/%d?access_token=%s\"\r\n %(i, x.get(\"issue_number\"), zenhub_token),headers=zenhub_headers, verify=False)\r\n\r\n dat = response.json()\r\n\r\n for q in dat.get(\"issues\"):\r\n add_item = epic_repo_children_issues(\r\n epic_parent_repo_id = x.get(\"repo_id\"),\r\n epic_parent_issue_no = x.get(\"issue_number\"),\r\n epic_child_repo_id = q.get(\"repo_id\"),\r\n epic_child_issue_no = q.get(\"issue_number\"),\r\n is_epic = q.get(\"is_epic\"),\r\n snap_date = str_today\r\n )\r\n try:\r\n session.add(add_item) #Adds a new row entry to epic_repo_children_issues\r\n # print(add_item) #Optional choice to print out the details of row added\r\n #to epic_repo_children_issues\r\n session.commit() #Commits add to the SQL server\r\n continue\r\n except sqlalchemy.exc.IntegrityError: #Deals with error if duplicate entry already exists\r\n session.rollback()\r\n continue\r\n\r\n####################################\r\n# FUNCTION CALLS & TIMING OF CALLS #\r\n####################################\r\n\r\nrepo_ids = add_more_issue_info()\r\nprint(\"Updated issue_info, repo_info, issue_pipelines, and issue_labels SQL Tables:\",\r\n time.time()-setTime) #Estimated Execution Time: 595s - 700s\r\nsetTime = time.time()\r\n\r\nmilestone_details_add()\r\nprint(\"Updated milestone_details SQL Table:\", time.time()-setTime) #Estimated Execution Time: 10s\r\nsetTime = time.time()\r\n\r\nuser_information_add()\r\nprint(\"Updated user_information_add SQL Table:\", time.time()-setTime)#Estimated Execution Time: 3s\r\nsetTime = time.time()\r\n\r\nepics_add(repo_ids)\r\nprint(\"Updated epics_repo, and epic_repo_children_issues SQL Tables:\",\r\n time.time()-setTime)#Estimated Execution Time: 30-40s\r\nsetTime = time.time()\r\n\r\nprint(\"Total Execution Time (Seconds):\", time.time() - start_time)#Estimated Total Time: 650s - 1500s\r\n","sub_path":"wmar-github-zenhub-pulls/Github_Issue_Add_Details.py","file_name":"Github_Issue_Add_Details.py","file_ext":"py","file_size_in_byte":13231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"177769425","text":"\"\"\"\nModule that contains the command line app.\n\nWhy does this file exist, and why not put this in __main__?\n\n You might be tempted to import things from __main__ later, but that will\n cause problems: the code will get executed twice:\n\n - When you run `python -msmartass` python will execute\n ``__main__.py`` as a script. That means there won't be any\n ``smartass.__main__`` in ``sys.modules``.\n - When you import __main__ it will get executed again (as a module) because\n there's no ``smartass.__main__`` in ``sys.modules``.\n\n Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration\n\"\"\"\nimport logging\nfrom functools import wraps\n\nimport click\nimport pkg_resources\n\nfrom . import DumbProcessor, SmartProcessor\nfrom .clickutils import ClickContextObj\nfrom .fileutils import open_subfile, update_subfile\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _common_cli(func):\n default_log_level = logging.getLevelName(logging.INFO).lower()\n\n @click.option(\n '--log-level',\n type=click.Choice(\n [\n logging.getLevelName(lev).lower()\n for lev in [\n logging.DEBUG,\n logging.INFO,\n logging.WARNING,\n logging.ERROR,\n logging.CRITICAL,\n ]\n ]\n ),\n default=default_log_level,\n show_default=True,\n show_choices=True,\n help='log level displayed',\n )\n @click.option(\n '--backup/--no-backup',\n default=True,\n show_default=True,\n help='enable/disable creation of backup files',\n )\n @click.option(\n '--process-comments/--no-process-comments',\n default=False,\n show_default=True,\n help='enable/disable processing of comment events',\n )\n @click.option(\n '--skip-name',\n multiple=True,\n default=[],\n metavar='ACTOR_NAME',\n help=(\n 'lines by this actor (case insensitive, uses filename globbing) '\n 'will be skipped. May be passed multiple times.'\n ),\n )\n @click.option(\n '--skip-style',\n multiple=True,\n default=[],\n metavar='STYLE_NAME',\n help=(\n 'lines in this style (case insensitive, uses filename globbing) '\n 'will be skipped. May be passed multiple times.'\n ),\n )\n @click.version_option(\n pkg_resources.get_distribution(__name__.split('.')[0]).version\n )\n @click.argument(\n 'subfiles',\n nargs=-1,\n required=True,\n metavar='FILE',\n type=click.Path(\n exists=True, file_okay=True, dir_okay=False, writable=True\n ),\n )\n @click.pass_context\n @wraps(func)\n def wrapper(ctx, log_level, *args, **kwargs):\n obj = ctx.obj = ctx.obj or ClickContextObj()\n level = getattr(logging, log_level.upper())\n obj.log_level = level\n return ctx.invoke(func, *args, **kwargs)\n\n return wrapper\n\n\ndef _run_cli(\n processor_factory,\n backup,\n process_comments,\n skip_name,\n skip_style,\n subfiles,\n):\n\n processor_args = dict(\n process_comments=process_comments,\n names_to_skip=skip_name,\n styles_to_skip=skip_style,\n )\n processor = processor_factory(**processor_args)\n\n for subfile in subfiles:\n try:\n (subdoc, encoding, newline) = open_subfile(subfile)\n\n (\n total_events,\n events_processed,\n events_updated,\n ) = processor.process_document(subdoc, subfile)\n LOGGER.info(\n '%s: events=%d, processed=%d, updated=%d',\n subfile,\n total_events,\n events_processed,\n events_updated,\n )\n if events_updated:\n update_subfile(subfile, subdoc, encoding, newline, backup)\n except RuntimeError as err:\n LOGGER.error('%s: %s: %s', subfile, type(err).__name__, str(err))\n\n\n@click.command(no_args_is_help=True)\n@_common_cli\ndef smartass(*args, **kwargs):\n \"\"\"Smarten punctionation on ass subtitle files.\"\"\"\n _run_cli(SmartProcessor, *args, **kwargs)\n\n\n@click.command(no_args_is_help=True)\n@_common_cli\ndef dumbass(*args, **kwargs):\n \"\"\"Unsmarten punctuation on ass subtitle files.\"\"\"\n _run_cli(DumbProcessor, *args, **kwargs)\n","sub_path":"src/smartass/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"336977103","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMIT License\n\nCopyright (c) 2021-present Daniel [Mathtin] Shiko \nProject: Minecraft Discord Bot\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n__author__ = \"DeadBlasoul\"\n\nimport logging\nimport os\nimport sys\nfrom typing import Dict, List, Type, Any\nfrom xml.etree import ElementTree\n\nfrom license import license_py_file\n\nlog = logging.getLogger('srv-gen')\n\nNoneType = type(None)\n\n\ndef indent(string: str, level: int = 1) -> str:\n return (' ' * 4) * level + string\n\n\ndef indent_all(lines: List[str], level: int = 1) -> List[str]:\n return [indent(line, level) for line in lines]\n\n\ndef gen_function(name: str, args: List[str], body: List[str], ret_type: Type[Any] = NoneType) -> List[str]:\n ret_type_name = ret_type.__name__ if ret_type is not NoneType else 'None'\n return [\n f'def {name}({\", \".join(args)}) -> {ret_type_name}:'\n ] + indent_all(body)\n\n\ndef gen_class_name(source_name: str) -> str:\n return 'X' + source_name.title().replace('-', '')\n\n\ndef gen_property_name(source_name: str) -> str:\n return source_name.upper().replace('-', '_')\n\n\ndef gen_header() -> str:\n return '''\\\n__author__ = None\n \nimport os.path\nfrom typing import Dict\nimport xml.etree.ElementTree as ET\nfrom .exceptions import MissingResourceException\n\n\ndef res_path(local_path: str):\n path = os.getenv('RESOURCE_PATH')\n return os.path.join(path, local_path)\n'''\n\n\nclass CodeGenerator(object):\n @property\n def code(self) -> str:\n \"\"\"Returns generated source code\"\"\"\n raise NotImplementedError()\n\n\nclass TypeViewGenerator(CodeGenerator):\n _strings: List[str]\n _code: List[str]\n _class_name: str\n _property_name: str\n _type_name: str\n\n def __init__(self, type_name: str, declaration: List[str]):\n self._type_name = type_name\n self._strings = declaration\n self._code = []\n self._class_name = gen_class_name(self._type_name)\n self._property_name = gen_property_name(self._type_name)\n\n def _generate_code(self) -> None:\n self._code = []\n self._gen_class_header()\n self._gen_get()\n for string_name in self._strings:\n self._gen_property(string_name)\n\n def _gen_class_header(self) -> None:\n self._code += [\n f'class {self._class_name}(object):',\n indent(f'_type_name = \"{self._type_name}\"'),\n ''\n ] + indent_all(\n gen_function(\n '__init__', args=['self', 'section'], body=['self._section = section']\n )\n ) + ['']\n\n def _gen_get(self) -> None:\n self._code += indent_all(\n gen_function(\n 'get', args=['self', 'string_name'], ret_type=str,\n body=['return self._section.get(self._type_name, string_name)']\n )\n ) + ['']\n\n def _gen_property(self, name: str) -> None:\n self._code += indent_all(\n ['@property'] +\n gen_function(\n gen_property_name(name), args=['self'], ret_type=str,\n body=[f'return self.get(\"{name}\")']\n )\n ) + ['']\n\n @property\n def code(self) -> str:\n if not self._code:\n self._generate_code()\n return '\\n'.join(self._code)\n\n @property\n def lines(self) -> List[str]:\n if not self._code:\n self._generate_code()\n return self._code\n\n @property\n def property_name(self):\n return self._property_name\n\n @property\n def class_name(self):\n return self._class_name\n\n\nclass SectionViewGenerator(CodeGenerator):\n _code: List[str]\n _types: List[TypeViewGenerator]\n _class_name: str\n _property_name: str\n _section_name: str\n _parent_class_name: str\n\n def __init__(self, parent_class_name: str, section_name: str, declaration: Dict[str, List[str]]):\n self._code = []\n self._section_name = section_name\n self._types = [TypeViewGenerator(k, v) for k, v in declaration.items()]\n self._class_name = SectionViewGenerator._gen_section_class_name(self._section_name)\n self._property_name = SectionViewGenerator._gen_section_property_name(self._section_name)\n self._parent_class_name = parent_class_name\n\n def _generate_code(self) -> None:\n self._code = []\n self._gen_class_name()\n for type_generator in self._types:\n self._code += indent_all(type_generator.lines)\n self._code.append('')\n self._gen_section_name()\n for type_generator in self._types:\n self._code.append(indent(f'{type_generator.property_name}: {type_generator.class_name}'))\n self._code.append('')\n self._gen_constructor()\n self._code.append('')\n self._gen_get()\n\n def _gen_class_name(self) -> None:\n self._code.append(f'class {self._class_name}(object):')\n\n def _gen_section_name(self) -> None:\n self._code.append(indent(f'_section_name = \"{self._section_name}\"'))\n\n @staticmethod\n def _gen_section_property_name(source_name) -> str:\n prop_name = gen_property_name(source_name)\n return prop_name[:-1] if prop_name.lower().endswith('s') else prop_name\n\n @staticmethod\n def _gen_section_class_name(source_name) -> str:\n prop_name = gen_class_name(source_name)\n return prop_name[:-1] if prop_name.lower().endswith('s') else prop_name\n\n def _gen_constructor(self) -> None:\n self._code += indent_all(\n gen_function(\n '__init__', args=['self', 'res'],\n body=['self._res = res'] + [\n f'self.{t.property_name} = {self._parent_class_name}.{self._class_name}.{t.class_name}(self)' for t\n in self._types]\n )\n )\n\n def _gen_get(self) -> None:\n self._code += indent_all(\n gen_function(\n 'get', args=['self', 'type_name', 'string_name'], ret_type=str,\n body=['return self._res.get(self._section_name, type_name, string_name)']\n )\n )\n\n @property\n def code(self) -> str:\n if not self._code:\n self._generate_code()\n return '\\n'.join(self._code)\n\n @property\n def lines(self) -> List[str]:\n if not self._code:\n self._generate_code()\n return self._code\n\n @property\n def property_name(self):\n return self._property_name\n\n @property\n def class_name(self):\n return self._class_name\n\n\nclass StringResourceViewGenerator(CodeGenerator):\n _code: List[str]\n _name: str\n _class_name: str\n _property_name: str\n _sections: List[SectionViewGenerator]\n\n def __init__(self, name: str, declaration: Dict[str, Dict[str, List[str]]]):\n self._code = []\n self._name = name\n self._class_name = gen_class_name(self._name)\n self._property_name = gen_property_name(self._name)\n self._sections = [SectionViewGenerator(self._class_name, k, v) for k, v in declaration.items()]\n\n def _generate_code(self) -> None:\n self._code = []\n\n self._gen_class_name()\n for section in self._sections:\n self._code += indent_all(section.lines)\n self._code.append('')\n self._gen_private_fields()\n for section in self._sections:\n self._code.append(indent(f'{section.property_name}: {section.class_name}'))\n self._code.append('')\n self._gen_constructor()\n self._code.append('')\n self._gen_switch_lang()\n self._code.append('')\n self._gen_get()\n self._code += ['', '']\n self._gen_instance()\n self._code.append('')\n\n def _gen_private_fields(self):\n self._code += indent_all([\n '_lang: str',\n '_root: ET.Element',\n '_path_cache: Dict[str, str]'\n ])\n\n def _gen_class_name(self) -> None:\n self._code.append(f'class {self._class_name}(object):')\n\n def _gen_constructor(self) -> None:\n self._code += indent_all(\n gen_function('__init__', args=['self', 'lang: str = \\'en\\''],\n body=[\n f'self._strings_path = res_path(\"{self._name}.xml\")',\n 'if not os.path.isfile(self._strings_path):',\n indent(f'raise MissingResourceException(self._strings_path, \"{self._name}.xml\")'),\n 'self._root = ET.parse(self._strings_path).getroot()'\n ] + [\n f'self.{s.property_name} = {self._class_name}.{s.class_name}(self)' for s in self._sections\n ] + [\n 'self.switch_lang(lang)'\n ])\n )\n\n def _gen_switch_lang(self):\n self._code += indent_all(\n gen_function('switch_lang', args=['self', 'lang: str'], body=['self._lang = lang', 'self._path_cache = {}'])\n )\n\n def _gen_get(self):\n self._code += indent_all(gen_function(\n 'get', args=['self', 'section_name', 'type_name', 'string_name'], ret_type=str,\n body=[\n 'path = \\'.\\'.join([section_name, type_name, string_name])',\n 'if path in self._path_cache:',\n indent('return self._path_cache[path]'),\n 'res = self._root.find(f\\'.//{section_name}/string[@lang=\"{self._lang}\"]'\n '[@type=\"{type_name}\"][@name=\"{string_name}\"]\\')',\n 'self._path_cache[path] = res.text if res is not None else path',\n 'return self._path_cache[path]'\n ]\n )\n )\n\n def _gen_instance(self):\n self._code.append(f'{self._property_name} = {self._class_name}()')\n\n @property\n def code(self) -> str:\n if not self._code:\n self._generate_code()\n return '\\n'.join(self._code)\n\n\ndef extract_distinct_declaration(xml_file_path) -> Dict[str, Dict[str, List[str]]]:\n if not os.path.isfile(xml_file_path):\n raise Exception(f'There is no resource file via path \"{xml_file_path}\"')\n\n log.info(f'Loading {xml_file_path}')\n root = ElementTree.parse(xml_file_path).getroot()\n\n declaration = {}\n for section in root:\n for resource in section:\n section_name = section.tag\n type_name = resource.attrib['type']\n resource_name = resource.attrib['name']\n\n if section_name not in declaration:\n declaration[section_name] = {}\n if type_name not in declaration[section_name]:\n declaration[section_name][type_name] = []\n if resource_name not in declaration[section_name][type_name]:\n declaration[section_name][type_name].append(resource_name)\n\n return declaration\n\n\ndef main(_):\n xml = 'res/strings.xml'\n output = 'src/util/resources.py'\n declaration = extract_distinct_declaration(xml)\n generator = StringResourceViewGenerator('strings', declaration)\n\n with open(output, 'w') as file:\n file.write(gen_header() + '\\n\\n' + generator.code)\n\n license_py_file(output)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/resource_view_generator.py","file_name":"resource_view_generator.py","file_ext":"py","file_size_in_byte":12251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118460801","text":"from arduscope import Arduscope, ArduscopeScreen\n\nwith Arduscope(port='/dev/ttyUSB0') as arduino:\n arduino.frequency = 2000\n arduino.pulse_width = 0.05\n arduino.trigger_value = 2.5\n arduino.amplitude = 5.0\n arduino.n_channels = 2\n arduino.trigger_channel = \"A0\"\n arduino.trigger_offset = 0.0\n\n arduino.start_acquire()\n arduino.live_plot()\n\nscreen = arduino.last_screen\nx = screen.x\na0 = screen.channels[0]\na1 = screen.channels[1]\n\nscreen.save(file=\"prueba.csv\", overwrite=True)\n\nArduscopeScreen.load(file=\"prueba.csv\")\n","sub_path":"scripts/uso_basico.py","file_name":"uso_basico.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325759803","text":"# RoboDK API\nfrom robolink import *\n# Robot toolbox\nfrom robodk import *\nimport os\nimport time\nimport matlab\nimport matlab.engine\nimport sys\n\n\n# Create an exception when an error occurs and wait for a button to be pressed\ndef myexcepthook(type, value, traceback, oldhook=sys.excepthook):\n oldhook(type, value, traceback)\n input(\"Press RETURN. \")\nsys.excepthook = myexcepthook\n\n\nclass Interface:\n\n # Constructor\n def __init__(self):\n # Set filedirectiry for placement of matlab code and RoboDK file\n filedirectoryRoboDK = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'RoboDK'))\n filedirectoryMatlab = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'Matlab'))\n\n # Start MATLAB engine, and set cd to placement of matlab functions.\n print('Starting Matlab engine')\n self.eng = matlab.engine.start_matlab()\n self.eng.cd(filedirectoryMatlab)\n\n # Generate a Robolink object RDK. This object interfaces with RoboDK.\n print('Starting RoboDK')\n self.RDK = Robolink()\n\n # Open RoboDK file\n self.RoboDK_file = filedirectoryRoboDK + r'\\Assembly Cell.rdk'\n # RDK.AddFile(RoboDK_file)\n self.robot = self.RDK.Item('M-6IB', 2)\n\n # Initilize all RoboDK programs\n self.program_interior = self.RDK.Item('AssembleInterior')\n self.program_top_blue = self.RDK.Item('Attach_Top_Cover_Blue')\n self.program_top_red = self.RDK.Item('Attach_Top_Cover_Red')\n self.program_detach = self.RDK.Item('Detach_Top')\n self.program_final_step = self.RDK.Item('Final step')\n \n #Run the program\n self.run_program()\n\n def run_program(self):\n run_assembly = True\n while run_assembly == True:\n # Reset program\n self.reset()\n # Get user input\n self.user_prompts()\n # Place bottom cover in assembly fixture by using inverse kinematics\n self.run_inverse_kinematics()\n # Assemble interior of the phone\n self.assemble_interior()\n # Place the final product on an conveyour belt.\n self.program_final_step.RunProgram()\n while self.program_final_step.Busy():\n time.sleep(.300)\n # Ask the user if they want to produce another phone.\n answer = input('Want another one? [yes/no]\\n')\n run_assembly = (answer == 'yes')\n\n #User input\n def user_prompts(self):\n # Lets you choose the color for the bottom and top cover\n self.color_bottom = int(input('Choose a color for the bottom cover: \\n 0 = Red \\n 1 = Blue \\n'))\n self.color_top = int(input('Choose a color for the top cover: \\n 0 = Red \\n 1 = Blue \\n'))\n self.engraving_prompt = input('Do you want custom text? [yes/no]\\n')\n \n # Notify user:\n if self.engraving_prompt == 'yes':\n self.text = input('Write custom text: ')\n # If text is not upper case or too long, try again\n while self.text.isupper() != True or len(self.text) > 5:\n self.text = input(\n 'Text should be upper case and less than 6 characters.\\nTry again: ')\n\n\n def assemble_interior(self):\n print('Placing interior...\\n')\n self.program_interior.RunProgram()\n while self.program_interior.Busy():\n time.sleep(.300)\n\n if self.color_top == 0:\n # Calls the 'Assemble Red Phone' program\n print('Placing top cover...\\n')\n self.program_top_red.RunProgram()\n while self.program_top_red.Busy():\n time.sleep(.300)\n elif self.color_top == 1:\n # Calls the 'Assemble Blue Phone' program\n print('Placing top cover...\\n')\n self.program_top_blue.RunProgram()\n while self.program_top_blue.Busy():\n time.sleep(.300)\n else:\n print('ERROR:\\n')\n\n print('Placing top cover...\\n')\n self.program_detach.RunProgram()\n while self.program_detach.Busy():\n time.sleep(.300)\n\n def reset(self):\n # Makes sure that alle the parts are in the right places before starting the assembly\n reset_program = self.RDK.Item('Reset')\n reset_program.RunProgram()\n\n # params text engraving or not\n def engrave_run(self):\n print('Engraving text...')\n # Initialise program\n gcodeGenerator = self.RDK.Item('Generate G-Code')\n gcodeImport = self.RDK.Item('Import G-Code')\n pickupPhone = self.RDK.Item('Attach_phone')\n viaPoint = self.RDK.Item('Via_engraver')\n # Generate g-code for engraving text\n text = str(('%s' % self.text))\n gcodeGenerator.RunProgram([('%s' % text)])\n while gcodeGenerator.Busy():\n time.sleep(.300)\n\n # Run program to import g-code\n gcodeImport.RunProgram()\n while gcodeImport.Busy():\n time.sleep(.300)\n\n # Move to via point\n self.robot.MoveJ(viaPoint)\n\n # Run g-code\n Milling = self.RDK.Item('Millingsettings', 8)\n time.sleep(.300)\n Milling.RunProgram()\n while Milling.Busy():\n time.sleep(.100)\n\n # Delete auto-generated files\n Milling.Delete()\n Milling = self.RDK.Item('Millingsettings')\n Milling.Delete()\n\n # Move to via point\n self.robot.MoveJ(viaPoint)\n\n def run_inverse_kinematics(self):\n if self.color_bottom == 0:\n # Attatch the red bottom cover\n print('Picking up bottom cover...\\n')\n self.eng.AttachBottomCoverRed(0.0)\n elif self.color_bottom == 1:\n # Attatch the blue bottom cover\n print('Picking up bottom cover...\\n')\n self.eng.AttachBottomCoverBlue(0.0)\n else:\n print('ERROR:\\n')\n\n # Run engraving\n if self.engraving_prompt == 'yes':\n self.engrave_run()\n\n print('Putting down buttom cover...')\n self.eng.DetachBottomCover(0.0)\n\n\n\nprogram = Interface()","sub_path":"Python/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598915764","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author : Thistledown\n@Contact : 120768091@qq.com\n@Software: PyCharm\n@File : ex44.py\n@Time : 2018/9/20 16:18\n\"\"\"\n'继承 VS. 包含'\n\n'什么是继承' \\\n'> 继承是用来描述一个类从它的父类那里获得大部分甚至全部父类的功能' \\\n'父子类之间有3中交互方法' \\\n'> 1. 子类的方法隐性继承父类方法' \\\n'> 2. 子类重写父类的方法' \\\n'> 3. 对子类的操作改变父类'\n\nprint('-' * 10)\n'隐性继承'\nclass Parent(object):\n def implict(self):\n print('PARENT implict()')\n\nclass Child(Parent):\n pass\n\ndad = Parent()\nson = Child()\n\ndad.implict()\nson.implict()\n\nprint('-' * 10)\n'���写方法'\nclass Parent(object):\n def override(self):\n print('PARENT override()')\n\nclass Child(Parent):\n def override(self):\n print('CHILD override()')\n\ndad = Parent()\nson = Child()\n\ndad.override()\nson.override()\n\nprint('-' * 10)\n'之前或之后改变父类'\n\nclass Parent(object):\n def altered(self):\n print('PARENT altered()')\n\nclass Child(Parent):\n def altered(self):\n print('CHILD, BEFORE PARENT altered()')\n # super()函数是用于调用父类(超类)的一个方法\n # super(Child, self).altered()\n super().altered() # Python 3中可以直接使用该用法\n print('CHILD, AFTER PARENT altered()')\n\ndad = Parent()\nson = Child()\n\ndad.altered()\nson.altered()\n\nprint('-' * 10)\n'三种组合使用'\n\nclass Parent(object):\n def override(self):\n print('PARENT override()')\n\n def implicit(self):\n print('PARENT implicit()')\n\n def altered(self):\n print('PARENT altered()')\n\nclass Child(Parent):\n def override(self):\n print('CHILD override()')\n\n def altered(self):\n print('CHILD, BEFORE PARENT override()')\n super().altered()\n print('CHILD, AFTER PARENT override()')\n\ndad = Parent()\nson = Child()\n\ndad.implicit()\nson.implicit()\n\ndad.override()\nson.override()\n\ndad.altered()\nson.altered()\n\nprint('-' * 10)\n'使用super()的原因'\n\nclass BadStuff(object):\n pass\n\nclass SuperFun(Child, BadStuff):\n pass\n\n\nprint('-' * 10)\n'包含'\n\nclass Other(object):\n def override(self):\n print('OTHER override()')\n\n def implicit(self):\n print('OTHER implicit()')\n\n def altered(self):\n print('OTHER altered()')\n\nclass Child(object):\n def __init__(self):\n self.other = Other()\n\n def implicit(self):\n self.other.implicit()\n\n def override(self):\n print('CHILD override()')\n\n def altered(self):\n print('CHILD, BEFORE OTHER altered()')\n self.other.altered()\n print('CHILD, AFTER OTHER altered()')\n\nson = Child()\n\nson.implicit()\nson.override()\nson.altered()\n","sub_path":"ch3/ex44.py","file_name":"ex44.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"251421143","text":"import os\r\n\r\nos.chdir(\"Text input\")\r\n\r\ndef move(coords, d):\r\n x, y, z = coords\r\n assert x + y + z == 0 and d in [\"e\", \"w\", \"se\", \"sw\", \"ne\", \"nw\"]\r\n if d == \"e\":\r\n return (x + 1, y - 1, z)\r\n if d == \"w\":\r\n return (x - 1, y + 1, z)\r\n if d == \"ne\":\r\n return (x + 1, y, z - 1)\r\n if d == \"nw\":\r\n return (x, y + 1, z - 1)\r\n if d == \"se\":\r\n return (x, y - 1, z + 1)\r\n if d == \"sw\":\r\n return (x - 1, y, z + 1)\r\n\r\n\r\ndef parse_line(l):\r\n steps = []\r\n l = list(l.strip())\r\n while len(l) > 0:\r\n c = l.pop(0)\r\n if c not in 'we':\r\n c += l.pop(0)\r\n steps.append(c)\r\n return steps\r\n \r\n\r\ndef get_neighbors(coords):\r\n return [move(coords, d) for d in [\"e\", \"w\", \"se\", \"sw\", \"ne\", \"nw\"]]\r\n\r\n\r\n# part 1\r\n\r\ntiles_are_flipped = dict()\r\n\r\nINPUT_FILE = \"lobby_layout.txt\"\r\n\r\nfor line in open(INPUT_FILE):\r\n steps = parse_line(line)\r\n coord = (0,0,0)\r\n for step in steps:\r\n coord = move(coord, step)\r\n is_flipped = tiles_are_flipped.get(coord, False)\r\n tiles_are_flipped[coord] = not is_flipped\r\n\r\nprint(sum(v for v in tiles_are_flipped.values() if v))\r\n","sub_path":"advent_of_code_day_24.py","file_name":"advent_of_code_day_24.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635695369","text":"import datetime\n\nfrom django.test import TestCase\nfrom django.utils.timezone import make_aware\n\nfrom ..admin import PatronAdmin\nfrom ..filters import PendingRewardsFilter\nfrom ..models import Patron, Payment, Reward\n\n\nclass FiltersTestCase(TestCase):\n def setUp(self):\n self.patron_1 = Patron.objects.create(\n name=\"Jan Kowalski\",\n email=\"jan@kowalski.extra\",\n since=make_aware(datetime.datetime(2016, 1, 20))\n )\n self.reward_1 = Reward.objects.create(\n name=\"gte_10\",\n value=11\n )\n self.payment_1 = Payment.objects.create(\n status=Payment.STATUS.PROCESSED,\n completed=False,\n reward=self.reward_1,\n patron=self.patron_1,\n pledge=\"20.00\",\n month=datetime.date(2016, 1, 20),\n )\n\n def test_pending_rewards(self):\n filter = PendingRewardsFilter(None, {'pending_rewards': 'true'},\n Patron, PatronAdmin)\n patrons = filter.queryset(None, Patron.objects.all())\n assert len(patrons) == 0\n\n Payment.objects.create(\n status=Payment.STATUS.PROCESSED,\n completed=False,\n reward=self.reward_1,\n patron=self.patron_1,\n pledge=\"20.00\",\n month=datetime.date(2016, 2, 20),\n )\n\n Payment.objects.create(\n status=Payment.STATUS.PROCESSED,\n completed=False,\n reward=self.reward_1,\n patron=self.patron_1,\n pledge=\"20.00\",\n month=datetime.date(2016, 3, 20),\n )\n\n patrons = filter.queryset(None, Patron.objects.all())\n assert len(patrons) == 1\n assert patrons[0].pk == self.patron_1.pk\n","sub_path":"patreonmanager/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334587513","text":"class HalvingGame(object):\n def __init__(self, N):\n self.N = N\n\n def startState(self):\n return (1, self.N)\n\n def isEnd(self, state):\n player, number = state\n return number == 0\n\n def utility(self, state):\n player, number = state\n assert number == 0\n return player * float(\"inf\")\n\n def player(self, state):\n player, number = state\n return player\n\n def actions(self, state):\n return ['-', '/']\n\n def succ(self, state, action):\n player, number = state\n if action == '-':\n return -player, number - 1\n elif action == '/':\n return -player, number // 2\n\n\ndef humanPolicy(game, state):\n while True:\n action = input(\"Input your choice\")\n if action in game.actions(state):\n return action\n\n\ndef minimaxPolicy(game, state):\n def recurse(state):\n if game.isEnd(state):\n return game.utility(state), 'none'\n choices = [(recurse(game.succ(state, action))[0], action) for action in game.actions(state)]\n if game.player(state) == 1:\n return max(choices)\n elif game.player(state) == -1:\n return min(choices)\n\n\n value, action = recurse(state)\n print('minimax says action = {}, value = {}'.format(action, value))\n return action\n\n\n# policies = {1: humanPolicy, -1: humanPolicy}\n# policies = {1: minimaxPolicy, -1: minimaxPolicy}\npolicies = {1: humanPolicy, -1: minimaxPolicy}\ngame = HalvingGame(N=15)\nstate = game.startState()\n\nwhile not game.isEnd(state):\n print(\"=\" * 10, state)\n player = game.player(state)\n policy = policies[player]\n action = policy(game, state)\n state = game.succ(state, action)\n\nprint(\"utility = {}\".format(game.utility(state)))\n","sub_path":"AI/HalvingGame.py","file_name":"HalvingGame.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"602638193","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport os\nfrom configparser import ConfigParser\n\nfrom base.Logger import ApiLogger\n\n\nclass ConfigManager(object):\n '''\n 指定配置文件默认目录 默认为 /config/*.ini\n 配置文件:mdconfig.ini 对应方法:ConfigManager().mdconfig(section, option)\n 根据obj.fileKeys可以验证文件是否被加载(无后缀)\n '''\n\n def __init__(self, path=None):\n self.logger = ApiLogger.getLoger()\n self.path = path\n self.fileKeys = []\n if not self.path:\n proDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.path = proDir + '\\\\config\\\\'\n self._readConfigs()\n\n '''\n 根据配置文件目录加载 *.ini 文件\n 根据文件名动态设置方法\n '''\n\n def _readConfigs(self):\n configFiles = os.listdir(self.path)\n # 遍历config目录下的所有 *.ini 文件\n for configFile in configFiles:\n filename, fileType = configFile.split(\".\")[0:2]\n # 判断文件名是否合法\n if fileType != \"ini\":\n self.logger.warning(\"配置文件读取失败:%s\" % configFile)\n continue\n self.fileKeys.append(filename)\n # 创建配置解析器\n conf = ConfigParser()\n conf.read(\"%s%s\" % (self.path, configFile))\n\n # 为对象动态添加方法\n def run(conf):\n return lambda section, option: conf.get(section, option)\n\n setattr(self, filename, run(conf))\n\n\n'''\n# 测试代码\nif __name__ == '__main__':\n configManager = ConfigManager()\n # 获取config.ini文件的DEFAULT->TIMEOUT\n print(configManager.config(\"DEFAULT\", \"TIMEOUT\"))\n # 获取mdconfig.ini文件的DEFAULT->TIMEOUT\n print(configManager.mdconfig(\"DEFAULT\", \"TIMEOUT\"))\n # 查看哪些文件被加载(不带后缀,也就是方法名)\n print(configManager.fileKeys)\n'''\n","sub_path":"base/ConfigManager.py","file_name":"ConfigManager.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399842291","text":"\"\"\"\nVarious functions that use the Optimizer class to\ndo some common tasks.\n\"\"\"\nimport os\nimport torch\nimport numpy as np\n\nfrom losses import *\nfrom utils import *\nfrom functools import partial\nfrom optimizer import Optimizer\nfrom skimage.io import imread, imsave\nfrom torchvision.models import (alexnet, resnet50, vgg16)\n\ndef visualize(network, layer, idx, img_shape=(3, 224, 224),\n init_range=(0, 1), max_iter=400, lr=1, sigma=0,\n debug=False):\n \"\"\"\n Perform standard Deep Dream-style visualization on the\n network.\n \n Parameters:\n\n network: the network to be run, a torch module\n\n layer: the layer of the network that the desired neuron\n to visualize is part of, also a torch module\n\n idx: a tuple of indexes into the output of the given\n layer (like (0,0,0,0) for a BCHW conv layer) that\n extracts the desired neuron\n\n img_shape: a tuple specifying the shape of the images the\n network takes in, in CHW form (a batch dimension is\n expected by Optimizer and so is automatically added)\n\n init_range: the range of values to randomly initialize\n the image to\n\n max_iter: the maximum number of iterations to run\n the optimization for\n\n lr: the 'learning rate' (the multiplier of the gradient\n as it's added to the image at each step; \n\n sigma: the standard deviation (or list of stddevs)of \n a Gaussian filter that smooths the image each iteration,\n standard for inception loop-style visualization\n\n debug: prints loss at every iteration if true, useful for\n finding the right learning rateo\n \n Returns:\n\n optimized image\n loss for the last iteration\n \"\"\"\n # partial application, since the index can't be passed in optimizer code\n loss_func = partial(specific_output_loss, idx=idx)\n optimizer = Optimizer(network, layer, loss_func)\n\n # set the 'target' optimization to be very high -- just want\n # to increase it as much as possible\n # since optimizer is actually gradient descent, make it negative\n # TODO: allow selection of populations, not just single neurons\n target_shape = (1,) \n target = torch.ones(*target_shape) * 100\n target = target.cuda()\n\n # now start optimization\n rand_img = torch_rand_range(img_shape, init_range).unsqueeze(0).cuda()\n return optimizer.optimize(rand_img, target, max_iter=max_iter,\n lr=lr, sigma=sigma, debug=debug)\n\ndef gen_one_image(network, layer, image, noise_level, \n loss_func, constant_area=0, max_iter=1000,\n lr=np.linspace(10, 0.5, 1000), sigma=0, grayscale=False,\n debug=False):\n \"\"\"\n Generate a single modified stimulus from a source image.\n (This function is primarily for use by other wrappers).\n\n Parameters:\n\n layer: the actual layer object, part of the network, that\n you're extracting features from for the generation\n\n image: a single image, in BCHW format, on the same device\n as the network (for now just GPU)\n\n grayscale: whether or not the optimization should be done in\n grayscale (enforcing the RGB channels stay the same)\n\n other arguments are same as std_generate\n \"\"\"\n # constant_area's default is actually dependent on image\n # so 0 there is just a non-None placeholder\n # set to the center (max_dim / 5) pixels by default\n if constant_area == 0:\n h_center = int(image.shape[2] / 2)\n w_center = int(image.shape[3] / 2)\n\n h_span = int(image.shape[2] / 10)\n w_span = int(image.shape[3] / 10)\n\n constant_area = (h_center - h_span, h_center + h_span,\n w_center - w_span, w_center + w_span)\n\n with torch.no_grad():\n acts = []\n hook = layer.register_forward_hook(\n lambda m,i,o: acts.append(o))\n\n _ = network(image)\n\n act = acts[0]\n hook.remove()\n\n noisy_act = add_noise(act, noise_level)\n\n optimizer = Optimizer(network, layer, loss_func)\n\n new_img, loss = optimizer.optimize(image, noisy_act,\n constant_area=constant_area, max_iter=max_iter,\n lr=lr, sigma=sigma, clip_image=True, \n grayscale=grayscale, debug=debug)\n\n return new_img.detach().cpu().numpy().transpose(0, 2, 3, 1).squeeze(), loss\n\n\ndef std_generate(net_name, lay_idx, images, noise_level, \n constant_area=0, max_iter=1000,\n lr=np.linspace(10, 0.5, 1000), sigma=0, debug=False,\n alpha=2, beta=2, lambda_a=0, lambda_b=0):\n \"\"\"\n Standard stimulus generation, using torchvision models.\n\n Parameters:\n\n net_name: whether to use alexnet, resnet50, or vgg16\n\n lay_idx: the layer you want (counting convolutional and linear\n layers; e.g. resnet50 has about 50)\n\n images: a directory containing only images, the ones\n the network will run on\n\n noise_level: standard deviation for the gaussian noise\n to add to the intermediate representation\n\n constant_area: the area of the image to keep constant at\n each iteration ((h1, h2, w1, w2) indices), defaults to\n the center 20%\n\n max_iter: the maximum number of iterations to run,\n set to a reasonable default\n\n lr: the 'learning rate', the multiplier of the gradient\n when added to the image, can vary a lot depending on the\n scale of the image pixel values and the \n\n sigma: the standard deviation of a gaussian used to smooth\n the generated image at each timestep, as regularization\n (0 means no smoothing)\n\n alpha: the value of alpha for the alpha-norm loss term\n\n beta: the value of beta for the total variation loss term\n\n lambda_a: the weight for the alpha-norm loss term\n\n lambda_b: the weight for the beta-norm loss term\n\n Returns:\n\n nothing, but in a new directory (net_name + images), a\n modified form of each image is saved\n \"\"\"\n network = {\n 'alexnet' : alexnet,\n 'resnet50' : resnet50,\n 'vgg16' : vgg16,\n }[net_name](pretrained=True).cuda()\n\n # treat each Conv2d or Linear module as a single layer\n layers = [l for l in get_atomic_layers(network)\n if isinstance(l, torch.nn.modules.conv.Conv2d)\n or isinstance(l, torch.nn.modules.linear.Linear)]\n layer = layers[lay_idx]\n\n\n # make a directory for the new images if it doesn't already exist\n try:\n os.mkdir(f\"modified_{images}\")\n except FileExistsError:\n # it already exists; just add to it\n pass\n\n loss_func = partial(standard_loss, alpha=alpha, beta=beta,\n lambda_a = lambda_a, lambda_b = lambda_b)\n\n if isinstance(images, str):\n # assume it's a directory of only images\n # TODO: be more flexible here\n files = os.listdir(images)\n\n for fname in files:\n img, grayscale = load_img_torchvision(images + '/' + fname)\n img = img.cuda()\n\n new_img, loss = gen_one_image(network, layer,\n img, noise_level, loss_func,\n constant_area, max_iter, lr, sigma, grayscale,\n debug)\n\n imsave(fname=f\"modified_{images}/{net_name}_{lay_idx}_{noise_level}_{fname}\", arr=new_img)\n else:\n # TODO: maybe make this work with a list of already-loaded images\n raise NotImplementedError\n","sub_path":"optimization-generation-master/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"26800380","text":"#coding:utf-8\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport re\nimport ssl\nimport jieba\t#分词包\nimport numpy\t#numpy计算包\nimport codecs\t#codecs提供open方法来指定打开文件的语言编码,它会在读取的时候自动转换成内部unicode\nimport pandas as pd\nimport urllib.request\nfrom bs4 import BeautifulSoup as bs\n\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (10.0,5.0)\nfrom wordcloud import WordCloud\n\n#分析网页函数\ndef getNowPlayingMovie_list():\n\tcontext = ssl._create_unverified_context()\n\tresp = urllib.request.urlopen('https://movie.douban.com/cinema/nowplaying/hangzhou/', context=context)\n\thtml_data = resp.read().decode('utf-8')\n\n\tsoup = bs(html_data,'html.parser')\n\tnowplaying_movie = soup.find_all('div',id='nowplaying')\n\tnowplaying_movie_list = nowplaying_movie[0].find_all('li', class_='list-item')\n\n\tnowplaying_list = []\n\tfor item in nowplaying_movie_list:\n\t\tnowplaying_dict = {}\n\t\tnowplaying_dict['id'] = item['data-subject']\n\t\tfor tag_img_item in item.find_all('img'):\n\t\t\tnowplaying_dict['name'] = tag_img_item['alt']\n\t\t\tnowplaying_list.append(nowplaying_dict)\n\treturn nowplaying_list\n\n#爬去网页函数\ndef getCommentsById(movieId,pageNum):\n\tcontext = ssl._create_unverified_context()\n\teachCommentList = []\n\tif pageNum > 0:\n\t\tstart = (pageNum-1) * 20\n\telse:\n\t\treturn False\n\n\trequrl = 'https://movie.douban.com/subject/'+ movieId + '/comments?start='+ str(start) +'&limit=20'\n\tprint (requrl)\n\n\tresp = urllib.request.urlopen(requrl, context=context)\n\thtml_data = resp.read().decode('utf-8')\n\tsoup = bs(html_data, 'html.parser')\n\tcomment_div_lists = soup.find_all('div',class_='comment')\n\n\tfor item in comment_div_lists:\n\t\tif item.find_all('p')[0].string is not None:\n\t\t\teachCommentList.append(item.find_all('p')[0].string)\n\n\treturn eachCommentList;\n\ndef main():\n\tcommentList=[]\n\tNowPlayingMovie_list = getNowPlayingMovie_list();\n\t#循环获取第一个电影的前10页的评论\n\tfor i in range(3):\n\t\tnum = i + 1\n\t\tcommentList_temp = getCommentsById(NowPlayingMovie_list[0]['id'], num)\n\t\tcommentList.append(commentList_temp)\n\n\t#将列表中的数据转换为字符串\n\tcomments = ''\n\tfor k in range(len(commentList)):\n\t\tcomments = comments + (str(commentList[k]).strip())\n\n\t#使用正则表达式去除标点符号\n\tpattern = re.compile(r'[\\u4e00-\\u9fa5]+')\n\tfilterdata = re.findall(pattern, comments)\n\tclean_comments = ''.join(filterdata)\n\n\t#使用结巴分词进行中文分词\n\tsegment = jieba.lcut(clean_comments)\n\twords_df = pd.DataFrame({'segment':segment})\n\n\t#去掉停用词\n\tstopwords = pd.read_csv(\"./stopwords.txt\",index_col=False,quoting=3,sep=\"\\t\",names=['stopword'],encoding='utf-8')\n\twords_df = words_df[~words_df.segment.isin(stopwords.stopword)]\n\n\t#统计词频\n\twords_stat=words_df.groupby(by=['segment'])['segment'].agg({\"计数\":numpy.size})\n\twords_stat=words_stat.reset_index().sort_values(by=[\"计数\"], ascending=False)\n\n\t#进行云词显示\n\twordcloud=WordCloud(font_path=\"simhei.ttf\", background_color=\"white\", max_font_size=80)\n\tword_frequence = {x[0]:x[1] for x in words_stat.head(1000).values}\n\tword_frequence_list = []\n\tfor key in word_frequence:\n\t\ttemp = (key,word_frequence[key])\n\t\tword_frequence_list.append(temp)\n\n\twordcloud=wordcloud.fit_words(dict(word_frequence_list))\n\tplt.imshow(wordcloud)\n\tplt.axis(\"off\")\n\tplt.show()\n#主函数\nmain()\n","sub_path":"movie_spider.py","file_name":"movie_spider.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73046877","text":"import requests\r\nimport core\r\nimport re\r\nimport time\r\nfrom Wappalyzer import WebPage\r\nimport GetMessage\r\nfrom WebLogicScan import WebLogicScan\r\nfrom Init import app\r\nfrom exts import db\r\nfrom models import BugList\r\nfrom Init import redispool\r\nfrom POCScan import selfpocscan2\r\n'''\r\n获取输入网址基础信息:\r\n 1,WEB指纹识别,技术识别 Finger \r\n 2,状态码 Status\r\n 3,标题 Title\r\n 4,收录扫描时间 Date\r\n 5,响应包 response\r\n 6,端口开放信息\r\n \r\n'''\r\n\r\n\r\nclass GetBaseMessage():\r\n def __init__(self, url, attackurl,rep):\r\n self.domain = url\r\n self.redispool = redispool\r\n self.url=attackurl\r\n self.rep=rep\r\n\r\n def GetStatus(self):\r\n redispool.append(\"runlog\",\"正在获取{}网页状态码\\n\".format(self.url))\r\n print(\"正在获取{}网页状态码\".format(self.url))\r\n try:\r\n return str(self.rep.status_code)\r\n except Exception as e:\r\n print(e)\r\n return \"None\"\r\n\r\n def GetTitle(self):\r\n redispool.append(\"runlog\", \"正在获取{}网页标题!\\n\".format(self.url))\r\n print(\"正在获取网页标题!\")\r\n if self.rep != None:\r\n try:\r\n title=re.findall('(.*?)', self.rep.text)[0]\r\n return title\r\n except Exception as e:\r\n print(e)\r\n return None\r\n return None\r\n\r\n def GetDate(self):\r\n redispool.append(\"runlog\", \"正在获取{}系统当前时间!\\n\".format(self.url))\r\n print(\"正在获取系统当前时间!\")\r\n return str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n\r\n def GetResponseHeader(self):\r\n redispool.append(\"runlog\", \"正在获取{}网页响应头!\\n\".format(self.url))\r\n print(\"正在获取网页响应头!\")\r\n context = \"\"\r\n try:\r\n for key, val in self.rep.headers.items():\r\n context += (key + \": \" + val + \"\\r\\n\")\r\n return context\r\n except Exception as e:\r\n print(e)\r\n return context\r\n\r\n def GetFinger(self):\r\n redispool.append(\"runlog\", \"正在获取{}网站指纹及技术!\\n\".format(self.url))\r\n print(\"正在获取网站指纹及技术!\")\r\n try:\r\n finger=WebPage(self.url, self.rep).info()\r\n return finger\r\n except Exception as e:\r\n print(e)\r\n return \"Unknow\"\r\n\r\n def PortScan(self):\r\n redispool.append(\"runlog\", \"正在对{}目标进行端口扫描!\\n\".format(self.url))\r\n print(\"正在对目标进行端口扫描!\")\r\n if \"/\" in self.domain:\r\n host=self.domain.split(\"/\")[0]\r\n else:\r\n host=self.domain\r\n print(host)\r\n try:\r\n return GetMessage.PortScan(host)\r\n except Exception as e:\r\n print(e)\r\n return \"Unknow\"\r\n\r\n def SenDir(self):\r\n redispool.append(\"runlog\", \"正在进行{}敏感目录及文件探测!\\n\".format(self.url))\r\n print(\"正在进行敏感目录及文件探测!\")\r\n try:\r\n return GetMessage.SenFileScan(self.domain, self.url)\r\n except Exception as e:\r\n print(e)\r\n return \"None\"\r\n\r\n def WebLogicScan(self):\r\n redispool.append(\"runlog\", \"正在进���{}weblogic漏洞检测!\\n\".format(self.url))\r\n print(\"正在进行weblogic漏洞检测!\")\r\n try:\r\n results=WebLogicScan.run(self.domain)\r\n with app.app_context():\r\n for result in results:\r\n vulnerable, bugurl, bugname, bugdetail = result\r\n if vulnerable:\r\n bug = BugList(oldurl=self.domain, bugurl=bugurl, bugname=bugname,\r\n buggrade=redispool.hget('bugtype', bugname),\r\n payload=bugurl, bugdetail=bugdetail)\r\n redispool.pfadd(redispool.hget('bugtype', bugname), bugurl)\r\n redispool.pfadd(bugname, bugurl)\r\n db.session.add(bug)\r\n db.session.commit()\r\n except Exception as e:\r\n print(e)\r\n pass\r\n\r\n def AngelSwordMain(self):\r\n redispool.append(\"runlog\", \"正在使用碎遮内置POC进行{}漏洞检测!\\n\".format(self.url))\r\n print(\"正在使用碎遮内置POC进行漏洞检测!\")\r\n try:\r\n selfpocscan2.AngelSwordMain(self.url)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n # redispool=redis.ConnectionPool(host='127.0.0.1',port=6379, decode_responses=True)\r\n # redispool = redis.Redis(connection_pool=ImportToRedis.redisPool)\r\n try:\r\n rep=requests.get(url=\"http://127.0.0.1/\",headers=core.GetHeaders(),timeout=10)\r\n test=GetBaseMessage(\"127.0.0.1\",\"http://127.0.0.1\",rep)\r\n print(test.GetDate())\r\n # test.AngelSwordMain()\r\n # print(test.GetStatus())\r\n # print(test.GetTitle())\r\n # print(test.GetResponseHeader())\r\n # print(test.GetFinger())\r\n # print(test.PortScan())\r\n # print(test.SenDir())\r\n except Exception as e:\r\n print(e)\r\n print(\">>>>>>>>>超时\", \"cyan\")\r\n pass\r\n\r\n","sub_path":"BaseMessage.py","file_name":"BaseMessage.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268357536","text":"#!/usr/bin/env python\n__author__ = 'wparkinson'\nfrom osgeo import gdal\n\n# My Write Image Function --------------------------------------------\ndef writeTiff(name, cols, rows, bands, type, geoTransform, proj, data):\n outDataset = driver.Create(name, cols, rows, bands, type)\n outDataset.SetGeoTransform(geoTransform)\n outDataset.SetProjection(proj)\n for i in range(0,bands):\n outBand = outDataset.GetRasterBand(i+1)\n if (bands==1):\n outBand.WriteArray(data)\n else:\n outBand.WriteArray(data[i])\n outDataset = None #close File\n\n\n # My Write Image Function from Template Image --------------------------------------------\ndef writeTiffTemplate(name, template, data, type):\n #Setup Raster Writing Variables --------------------------------------------\n driver = gdal.GetDriverByName('GTiff')\n driver.Register()\n cols = template.RasterXSize\n rows = template.RasterYSize\n bands = template.RasterCount\n geoTransform = template.GetGeoTransform()\n proj = template.GetProjection()\n\n outDataset = driver.Create(name, cols, rows, bands, type)\n outDataset.SetGeoTransform(geoTransform)\n outDataset.SetProjection(proj)\n for i in range(0,bands):\n outBand = outDataset.GetRasterBand(i+1)\n if (bands==1):\n outBand.WriteArray(data)\n else:\n outBand.WriteArray(data[i])\n outDataset = None #close File\n\n#GDT_Unknown = 0, GDT_Byte = 1, GDT_UInt16 = 2, GDT_Int16 = 3,\n#GDT_UInt32 = 4, GDT_Int32 = 5, GDT_Float32 = 6, GDT_Float64 = 7,\n#GDT_CInt16 = 8, GDT_CInt32 = 9, GDT_CFl\n\n\n# thsi is also a test","sub_path":"writeGeoTiff.py","file_name":"writeGeoTiff.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"309697396","text":"from odoo import api, fields, models\n\n\nclass CurrencyRate(models.Model):\n _inherit = \"res.currency.rate\"\n\n value = fields.Float('Value', default=\"1\")\n\n @api.onchange('value')\n def calculate_currency(self):\n self.rate = 1 / self.value\n\n @api.onchange('rate')\n def calculate_rate(self):\n self.value = 1 / self.rate\n","sub_path":"ln10_co_etet/models/res_currency.py","file_name":"res_currency.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"524415827","text":"#-------------------------------------------------------------------------------\n# $Id$\n#\n# Project: EOxServer \n# Authors: Fabian Schindler \n#\n#-------------------------------------------------------------------------------\n# Copyright (C) 2013 EOX IT Services GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n# copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#-------------------------------------------------------------------------------\n\n\nfrom lxml import etree\n\nfrom django.contrib.gis.geos import Polygon\nfrom django.utils.timezone import now\n\nfrom eoxserver.core.config import get_eoxserver_config\nfrom eoxserver.core.util.timetools import isoformat\nfrom eoxserver.backends.access import retrieve\nfrom eoxserver.contrib.osr import SpatialReference\nfrom eoxserver.resources.coverages.models import (\n RectifiedStitchedMosaic, ReferenceableDataset\n)\nfrom eoxserver.resources.coverages.formats import getFormatRegistry\nfrom eoxserver.resources.coverages import crss, models\nfrom eoxserver.services.gml.v32.encoders import GML32Encoder, EOP20Encoder\nfrom eoxserver.services.ows.component import ServiceComponent, env\nfrom eoxserver.services.ows.common.config import CapabilitiesConfigReader\nfrom eoxserver.services.ows.common.v20.encoders import OWS20Encoder\nfrom eoxserver.services.ows.wcs.v20.util import (\n nsmap, ns_xlink, ns_gml, ns_wcs, ns_eowcs,\n OWS, GML, GMLCOV, WCS, CRS, EOWCS, SWE, INT, SUPPORTED_INTERPOLATIONS\n)\n\n\nclass WCS20CapabilitiesXMLEncoder(OWS20Encoder):\n def encode_capabilities(self, sections, coverages_qs=None,\n dataset_series_qs=None):\n conf = CapabilitiesConfigReader(get_eoxserver_config())\n\n all_sections = \"all\" in sections\n caps = []\n if all_sections or \"serviceidentification\" in sections:\n caps.append(\n OWS(\"ServiceIdentification\",\n OWS(\"Title\", conf.title),\n OWS(\"Abstract\", conf.abstract),\n OWS(\"Keywords\", *[\n OWS(\"Keyword\", keyword) for keyword in conf.keywords\n ]),\n OWS(\"ServiceType\", \"OGC WCS\", codeSpace=\"OGC\"),\n OWS(\"ServiceTypeVersion\", \"2.0.1\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_application-profile_earth-observation/1.0/conf/eowcs\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_application-profile_earth-observation/1.0/conf/eowcs_get-kvp\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_service-extension_crs/1.0/conf/crs\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS/2.0/conf/core\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_protocol-binding_get-kvp/1.0/conf/get-kvp\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_protocol-binding_post-xml/1.0/conf/post-xml\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/GMLCOV/1.0/conf/gml-coverage\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/GMLCOV/1.0/conf/multipart\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/GMLCOV/1.0/conf/special-format\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/GMLCOV_geotiff-coverages/1.0/conf/geotiff-coverage\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_geotiff-coverages/1.0/conf/geotiff-coverage\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_service-model_crs-predefined/1.0/conf/crs-predefined\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_service-extension_interpolation/1.0/conf/interpolation\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_service-extension_range-subsetting/1.0/conf/record-subsetting\"),\n OWS(\"Profile\", \"http://www.opengis.net/spec/WCS_service-extension_scaling/1.0/conf/scaling\"),\n OWS(\"Fees\", conf.fees),\n OWS(\"AccessConstraints\", conf.access_constraints)\n )\n )\n\n if all_sections or \"serviceprovider\" in sections:\n caps.append(\n OWS(\"ServiceProvider\",\n OWS(\"ProviderName\", conf.provider_name),\n self.encode_reference(\"ProviderSite\", conf.provider_site),\n OWS(\"ServiceContact\",\n OWS(\"IndividualName\", conf.individual_name),\n OWS(\"PositionName\", conf.position_name),\n OWS(\"ContactInfo\",\n OWS(\"Phone\",\n OWS(\"Voice\", conf.phone_voice),\n OWS(\"Facsimile\", conf.phone_facsimile)\n ),\n OWS(\"Address\",\n OWS(\"DeliveryPoint\", conf.delivery_point),\n OWS(\"City\", conf.city),\n OWS(\"AdministrativeArea\", conf.administrative_area),\n OWS(\"PostalCode\", conf.postal_code),\n OWS(\"Country\", conf.country),\n OWS(\"ElectronicMailAddress\", conf.electronic_mail_address)\n ),\n self.encode_reference(\n \"OnlineResource\", conf.onlineresource\n ),\n OWS(\"HoursOfService\", conf.hours_of_service),\n OWS(\"ContactInstructions\", conf.contact_instructions)\n ),\n OWS(\"Role\", conf.role)\n )\n )\n )\n\n\n if all_sections or \"operationsmetadata\" in sections:\n component = ServiceComponent(env)\n versions = (\"2.0.0\", \"2.0.1\")\n get_handlers = component.query_service_handlers(\n service=\"WCS\", versions=versions, method=\"GET\"\n )\n post_handlers = component.query_service_handlers(\n service=\"WCS\", versions=versions, method=\"POST\"\n )\n all_handlers = sorted(\n set(get_handlers + post_handlers),\n key=lambda h: (getattr(h, \"index\", 10000), h.request)\n )\n\n operations = []\n for handler in all_handlers:\n methods = []\n if handler in get_handlers:\n methods.append(\n self.encode_reference(\"Get\", conf.http_service_url)\n )\n if handler in post_handlers:\n post = self.encode_reference(\"Post\", conf.http_service_url)\n post.append(\n OWS(\"Constraint\",\n OWS(\"AllowedValues\",\n OWS(\"Value\", \"XML\")\n ), name=\"PostEncoding\"\n )\n )\n methods.append(post)\n\n operations.append(\n OWS(\"Operation\",\n OWS(\"DCP\",\n OWS(\"HTTP\", *methods)\n ),\n # apply default values as constraints\n *[\n OWS(\"Constraint\",\n OWS(\"NoValues\"),\n OWS(\"DefaultValue\", str(default)),\n name=name\n ) for name, default\n in getattr(handler, \"constraints\", {}).items()\n ],\n name=handler.request\n )\n )\n caps.append(OWS(\"OperationsMetadata\", *operations))\n\n\n if all_sections or \"servicemetadata\" in sections:\n service_metadata = WCS(\"ServiceMetadata\")\n\n # get the list of enabled formats from the format registry\n formats = filter(\n lambda f: f, getFormatRegistry().getSupportedFormatsWCS()\n )\n service_metadata.extend(\n map(lambda f: WCS(\"formatSupported\", f.mimeType), formats)\n )\n\n # get a list of supported CRSs from the CRS registry\n supported_crss = crss.getSupportedCRS_WCS(format_function=crss.asURL)\n extension = WCS(\"Extension\")\n service_metadata.append(extension)\n extension.extend(\n map(lambda c: CRS(\"crsSupported\", c), supported_crss)\n )\n\n base_url = \"http://www.opengis.net/def/interpolation/OGC/1/\"\n\n extension.append(\n INT(\"InterpolationMetadata\", *[\n INT(\"InterpolationSupported\",\n base_url + supported_interpolation\n ) for supported_interpolation in SUPPORTED_INTERPOLATIONS\n ])\n )\n\n caps.append(service_metadata)\n\n inc_contents = all_sections or \"contents\" in sections\n inc_coverage_summary = inc_contents or \"coveragesummary\" in sections\n inc_dataset_series_summary = inc_contents or \"datasetseriessummary\" in sections\n inc_contents = inc_contents or inc_coverage_summary or inc_dataset_series_summary\n\n if inc_contents:\n contents = []\n\n if inc_coverage_summary:\n coverages = []\n\n # reduce data transfer by only selecting required elements\n # TODO: currently runs into a bug\n #coverages_qs = coverages_qs.only(\n # \"identifier\", \"real_content_type\"\n #)\n\n for coverage in coverages_qs:\n coverages.append(\n WCS(\"CoverageSummary\",\n WCS(\"CoverageId\", coverage.identifier),\n WCS(\"CoverageSubtype\", coverage.real_type.__name__)\n )\n )\n contents.extend(coverages)\n\n if inc_dataset_series_summary:\n dataset_series_set = []\n\n # reduce data transfer by only selecting required elements\n # TODO: currently runs into a bug\n #dataset_series_qs = dataset_series_qs.only(\n # \"identifier\", \"begin_time\", \"end_time\", \"footprint\"\n #)\n\n for dataset_series in dataset_series_qs:\n minx, miny, maxx, maxy = dataset_series.extent_wgs84\n\n dataset_series_set.append(\n EOWCS(\"DatasetSeriesSummary\",\n OWS(\"WGS84BoundingBox\",\n OWS(\"LowerCorner\", \"%f %f\" % (miny, minx)),\n OWS(\"UpperCorner\", \"%f %f\" % (maxy, maxx)),\n ),\n EOWCS(\"DatasetSeriesId\", dataset_series.identifier),\n GML(\"TimePeriod\",\n GML(\"beginPosition\", isoformat(dataset_series.begin_time)),\n GML(\"endPosition\", isoformat(dataset_series.end_time)),\n **{ns_gml(\"id\"): dataset_series.identifier + \"_timeperiod\"}\n )\n )\n )\n\n contents.append(WCS(\"Extension\", *dataset_series_set))\n\n caps.append(WCS(\"Contents\", *contents))\n\n root = WCS(\"Capabilities\", *caps, version=\"2.0.1\", updateSequence=conf.update_sequence)\n return root\n\n def get_schema_locations(self):\n return nsmap.schema_locations\n\n\n\n\nclass GMLCOV10Encoder(GML32Encoder):\n def __init__(self, *args, **kwargs):\n self._cache = {}\n\n def get_gml_id(self, identifier):\n if identifier[0].isdigit():\n return \"gmlid_%s\" % identifier\n return identifier\n\n def encode_grid_envelope(self, low_x, low_y, high_x, high_y):\n return GML(\"GridEnvelope\",\n GML(\"low\", \"%d %d\" % (low_x, low_y)),\n GML(\"high\", \"%d %d\" % (high_x, high_y))\n )\n\n def encode_rectified_grid(self, size, extent, sr, grid_name):\n size_x, size_y = size\n minx, miny, maxx, maxy = extent\n srs_name = sr.url\n \n swap = crss.getAxesSwapper(sr.srid)\n frmt = \"%.3f %.3f\" if sr.IsProjected() else \"%.8f %.8f\"\n labels = (\"x\", \"y\") if sr.IsProjected() else (\"long\", \"lat\")\n\n axis_labels = \" \".join(swap(*labels))\n origin = frmt % swap(minx, maxy)\n x_offsets = frmt % swap((maxx - minx) / float(size_x), 0)\n y_offsets = frmt % swap(0, (miny - maxy) / float(size_y))\n\n return GML(\"RectifiedGrid\",\n GML(\"limits\",\n self.encode_grid_envelope(0, 0, size_x - 1, size_y - 1)\n ),\n GML(\"axisLabels\", axis_labels),\n GML(\"origin\",\n GML(\"Point\",\n GML(\"pos\", origin),\n **{\n ns_gml(\"id\"): self.get_gml_id(\"%s_origin\" % grid_name),\n \"srsName\": srs_name\n }\n )\n ),\n GML(\"offsetVector\", x_offsets, srsName=srs_name),\n GML(\"offsetVector\", y_offsets, srsName=srs_name),\n **{\n ns_gml(\"id\"): self.get_gml_id(grid_name),\n \"dimension\": \"2\"\n }\n )\n\n def encode_referenceable_grid(self, size, sr, grid_name):\n size_x, size_y = size\n swap = crss.getAxesSwapper(sr.srid)\n labels = (\"x\", \"y\") if sr.IsProjected() else (\"long\", \"lat\")\n axis_labels = \" \".join(swap(*labels))\n\n return GML(\"ReferenceableGrid\",\n GML(\"limits\",\n self.encode_grid_envelope(0, 0, size_x - 1, size_y - 1)\n ),\n GML(\"axisLabels\", axis_labels),\n **{\n ns_gml(\"id\"): self.get_gml_id(grid_name),\n \"dimension\": \"2\"\n }\n )\n\n def encode_domain_set(self, coverage, srid=None, size=None, extent=None,\n rectified=True):\n grid_name = \"%s_grid\" % coverage.identifier\n srs = SpatialReference(srid) if srid is not None else None\n\n if rectified:\n return GML(\"domainSet\",\n self.encode_rectified_grid(\n size or coverage.size, extent or coverage.extent,\n srs or coverage.spatial_reference, grid_name\n )\n )\n else:\n return GML(\"domainSet\",\n self.encode_referenceable_grid(\n size or coverage.size, srs or coverage.spatial_reference,\n grid_name\n )\n )\n\n def encode_bounded_by(self, extent, sr=None):\n minx, miny, maxx, maxy = extent\n sr = sr or SpatialReference(4326)\n swap = crss.getAxesSwapper(sr.srid)\n labels = (\"x\", \"y\") if sr.IsProjected() else (\"long\", \"lat\")\n axis_labels = \" \".join(swap(*labels))\n axis_units = \"m m\" if sr.IsProjected() else \"deg deg\"\n frmt = \"%.3f %.3f\" if sr.IsProjected() else \"%.8f %.8f\"\n # Make sure values are outside of actual extent\n if sr.IsProjected():\n minx -= 0.0005\n miny -= 0.0005\n maxx += 0.0005\n maxy += 0.0005\n else:\n minx -= 0.000000005\n miny -= 0.000000005\n maxx += 0.000000005\n maxy += 0.000000005\n\n return GML(\"boundedBy\",\n GML(\"Envelope\",\n GML(\"lowerCorner\", frmt % swap(minx, miny)),\n GML(\"upperCorner\", frmt % swap(maxx, maxy)),\n srsName=sr.url, axisLabels=axis_labels, uomLabels=axis_units,\n srsDimension=\"2\"\n )\n )\n\n # cached range types and nil value sets\n def get_range_type(self, pk):\n cached_range_types = self._cache.setdefault(models.RangeType, {})\n try:\n return cached_range_types[pk]\n except KeyError:\n cached_range_types[pk] = models.RangeType.objects.get(pk=pk)\n return cached_range_types[pk]\n\n def get_nil_value_set(self, pk):\n cached_nil_value_set = self._cache.setdefault(models.NilValueSet, {})\n try:\n return cached_nil_value_set[pk]\n except KeyError:\n try:\n cached_nil_value_set[pk] = models.NilValueSet.objects.get(\n pk=pk\n )\n return cached_nil_value_set[pk]\n except models.NilValueSet.DoesNotExist:\n return ()\n\n def encode_nil_values(self, nil_value_set):\n return SWE(\"nilValues\",\n SWE(\"NilValues\",\n *[SWE(\"nilValue\", nil_value.raw_value, reason=nil_value.reason\n ) for nil_value in nil_value_set]\n )\n )\n\n def encode_field(self, band):\n return SWE(\"field\",\n SWE(\"Quantity\",\n SWE(\"description\", band.description),\n self.encode_nil_values(\n self.get_nil_value_set(band.nil_value_set_id)\n ),\n SWE(\"uom\", code=band.uom),\n SWE(\"constraint\",\n SWE(\"AllowedValues\",\n SWE(\"interval\", \"%s %s\" % band.allowed_values),\n SWE(\"significantFigures\", str(band.significant_figures))\n )\n ),\n definition=band.definition\n ),\n name=band.name\n )\n\n def encode_range_type(self, range_type):\n return GMLCOV(\"rangeType\",\n SWE(\"DataRecord\",\n *[self.encode_field(band) for band in range_type]\n )\n )\n\n\nclass WCS20CoverageDescriptionXMLEncoder(GMLCOV10Encoder):\n def encode_coverage_description(self, coverage):\n if issubclass(coverage.real_type, ReferenceableDataset):\n rectified = False\n else:\n rectified = True\n\n return WCS(\"CoverageDescription\",\n self.encode_bounded_by(coverage.extent_wgs84),\n WCS(\"CoverageId\", coverage.identifier),\n self.encode_domain_set(coverage, rectified=rectified),\n self.encode_range_type(self.get_range_type(coverage.range_type_id)),\n WCS(\"ServiceParameters\",\n WCS(\"CoverageSubtype\", coverage.real_type.__name__)\n )\n **{ns_gml(\"id\"): self.get_gml_id(coverage.identifier)}\n )\n\n def encode_coverage_descriptions(self, coverages):\n return WCS(\"CoverageDescriptions\", *[\n self.encode_coverage_description(coverage)\n for coverage in coverages\n ])\n\n def get_schema_locations(self):\n return {ns_wcs.uri: ns_wcs.schema_location}\n\n\nclass WCS20EOXMLEncoder(WCS20CoverageDescriptionXMLEncoder, EOP20Encoder, OWS20Encoder):\n def encode_eo_metadata(self, coverage, request=None, subset_polygon=None):\n data_items = list(coverage.data_items.filter(\n semantic=\"metadata\", format=\"eogml\"\n ))\n if len(data_items) >= 1:\n with open(retrieve(data_items[0])) as f:\n earth_observation = etree.parse(f).getroot()\n\n if subset_polygon:\n try:\n feature = earth_observation.xpath(\n \"om:featureOfInterest\", namespaces=nsmap\n )[0]\n feature[0] = self.encode_footprint(\n coverage.footprint.intersection(subset_polygon),\n coverage.identifier\n )\n except IndexError:\n pass # no featureOfInterest\n\n else:\n earth_observation = self.encode_earth_observation(\n coverage, subset_polygon=subset_polygon\n )\n\n if not request:\n lineage = None\n\n elif request.method == \"GET\":\n lineage = EOWCS(\"lineage\",\n EOWCS(\"referenceGetCoverage\",\n self.encode_reference(\"Reference\",\n request.build_absolute_uri().replace(\"&\", \"&\"),\n False\n )\n ), GML(\"timePosition\", isoformat(now()))\n )\n elif request.method == \"POST\": # TODO: better way to do this\n lineage = EOWCS(\"lineage\",\n EOWCS(\"referenceGetCoverage\",\n OWS(\"ServiceReference\",\n OWS(\"RequestMessage\",\n etree.parse(request).getroot()\n ), **{ns_xlink(\"href\"): request.build_absolute_uri().replace(\"&\", \"&\")}\n )\n ), GML(\"timePosition\", isoformat(now()))\n )\n\n return GMLCOV(\"metadata\",\n GMLCOV(\"Extension\",\n EOWCS(\"EOMetadata\",\n earth_observation,\n *[lineage] if lineage is not None else []\n )\n )\n )\n\n def encode_coverage_description(self, coverage, srid=None, size=None, extent=None, footprint=None):\n source_mime = None\n for data_item in coverage.data_items.filter(semantic__startswith=\"bands\"):\n if data_item.format:\n source_mime = data_item.format\n break\n\n if source_mime:\n source_format = getFormatRegistry().getFormatByMIME(source_mime)\n # map the source format to the native one\n native_format = getFormatRegistry().mapSourceToNativeWCS20(source_format)\n elif issubclass(coverage.real_type, RectifiedStitchedMosaic):\n # use the default format for RectifiedStitchedMosaics\n native_format = getFormatRegistry().getDefaultNativeFormat()\n else:\n # TODO: improve if no native format availabe\n native_format = None\n\n if extent:\n poly = Polygon.from_bbox(extent)\n poly.srid = srid\n extent = poly.transform(4326).extent\n sr = SpatialReference(4326)\n else:\n extent = coverage.extent\n sr = coverage.spatial_reference\n\n rectified = False if issubclass(coverage.real_type, ReferenceableDataset) else True\n\n return WCS(\"CoverageDescription\",\n self.encode_bounded_by(extent, sr),\n WCS(\"CoverageId\", coverage.identifier),\n self.encode_eo_metadata(coverage),\n self.encode_domain_set(coverage, srid, size, extent, rectified),\n self.encode_range_type(self.get_range_type(coverage.range_type_id)),\n WCS(\"ServiceParameters\",\n WCS(\"CoverageSubtype\", coverage.real_type.__name__),\n WCS(\"nativeFormat\", native_format.mimeType if native_format else \"\")\n ),\n **{ns_gml(\"id\"): self.get_gml_id(coverage.identifier)}\n )\n\n def encode_range_set(self, reference, mime_type):\n return GML(\"rangeSet\",\n GML(\"File\",\n GML(\"rangeParameters\",\n **{\n ns_xlink(\"arcrole\"): \"fileReference\",\n ns_xlink(\"href\"): reference,\n ns_xlink(\"role\"): mime_type\n }\n ),\n GML(\"fileReference\", reference),\n GML(\"fileStructure\"),\n GML(\"mimeType\", mime_type)\n )\n )\n\n def calculate_contribution(self, footprint, contributions, subset_polygon=None):\n if subset_polygon:\n footprint = footprint.intersection(subset_polygon)\n\n for contribution in contributions:\n footprint = footprint.difference(contribution)\n contributions.append(footprint)\n return footprint\n\n\n def encode_contributing_datasets(self, coverage, subset_polygon=None):\n eo_objects = coverage.eo_objects\n if subset_polygon:\n if subset_polygon.srid != 4326:\n subset_polygon = subset_polygon.transform(4326, True)\n\n eo_objects = eo_objects.filter(\n footprint__intersects=subset_polygon\n )\n\n # iterate over all subsets in reverse order to get the\n eo_objects = eo_objects.order_by(\"-begin_time\")\n actual_contributions = []\n all_contributions = []\n for eo_object in eo_objects:\n contribution = self.calculate_contribution(\n eo_object.footprint, all_contributions, subset_polygon\n )\n if not contribution.empty and contribution.num_geom > 0:\n actual_contributions.append((eo_object, contribution))\n\n return EOWCS(\"datasets\", *[\n EOWCS(\"dataset\",\n WCS(\"CoverageId\", eo_object.identifier),\n EOWCS(\"contributingFootprint\",\n self.encode_footprint(\n contribution, eo_object.identifier\n )\n )\n )\n for eo_object, contribution in reversed(actual_contributions)\n ])\n\n def alter_rectified_dataset(self, coverage, request, tree, subset_polygon=None):\n return EOWCS(\"RectifiedDataset\", *(\n tree.getchildren() + [\n self.encode_eo_metadata(coverage, request, subset_polygon)\n ]\n ), **tree.attrib)\n\n def alter_rectified_stitched_mosaic(self, coverage, request, tree, subset_polygon=None):\n return EOWCS(\"RectifiedStitchedMosaic\", *(\n tree.getchildren() + [\n self.encode_eo_metadata(coverage, request, subset_polygon),\n self.encode_contributing_datasets(coverage, subset_polygon)\n ]\n ), **tree.attrib)\n\n def encode_referenceable_dataset(self, coverage, range_type, reference,\n mime_type, subset=None):\n # handle subset\n dst_srid = coverage.srid\n\n if not subset:\n # whole area - no subset\n domain_set = self.encode_domain_set(coverage, rectified=False)\n eo_metadata = self.encode_eo_metadata(coverage)\n extent = coverage.extent\n sr = SpatialReference(dst_srid)\n\n else:\n # subset is given\n srid, size, extent, footprint = subset\n srid = srid if srid is not None else 4326\n\n domain_set = self.encode_domain_set(\n coverage, srid, size, extent, False\n )\n eo_metadata = self.encode_eo_metadata(\n coverage, subset_polygon=footprint\n )\n\n # get the WGS84 extent\n poly = Polygon.from_bbox(extent)\n poly.srid = srid\n if srid != dst_srid:\n poly.transform(dst_srid)\n extent = poly.extent\n sr = SpatialReference(srid)\n\n return EOWCS(\"ReferenceableDataset\",\n self.encode_bounded_by(extent, sr),\n domain_set,\n self.encode_range_set(reference, mime_type),\n self.encode_range_type(range_type),\n eo_metadata,\n **{\n ns_gml(\"id\"): self.get_gml_id(coverage.identifier)\n }\n )\n\n def encode_dataset_series_description(self, dataset_series):\n return EOWCS(\"DatasetSeriesDescription\",\n self.encode_bounded_by(dataset_series.extent_wgs84),\n EOWCS(\"DatasetSeriesId\", dataset_series.identifier),\n self.encode_time_period(\n dataset_series.begin_time, dataset_series.end_time,\n \"%s_timeperiod\" % dataset_series.identifier\n ),\n **{ns_gml(\"id\"): self.get_gml_id(dataset_series.identifier)}\n )\n\n def encode_dataset_series_descriptions(self, dataset_series_set):\n return EOWCS(\"DatasetSeriesDescriptions\", *[\n self.encode_dataset_series_description(dataset_series)\n for dataset_series in dataset_series_set\n ])\n\n def encode_eo_coverage_set_description(self, dataset_series_set, coverages,\n number_matched=None,\n number_returned=None):\n if number_matched is None:\n number_matched = len(coverages) + len(dataset_series_set)\n if number_returned is None:\n number_returned = len(coverages) + len(dataset_series_set)\n\n root = EOWCS(\"EOCoverageSetDescription\",\n numberMatched=str(number_matched),\n numberReturned=str(number_returned)\n )\n\n if coverages:\n root.append(self.encode_coverage_descriptions(coverages))\n if dataset_series_set:\n root.append(self.encode_dataset_series_descriptions(\n dataset_series_set\n ))\n\n return root\n\n def get_schema_locations(self):\n return {ns_eowcs.uri: ns_eowcs.schema_location}\n","sub_path":"eoxserver/services/ows/wcs/v20/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":29953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357457705","text":"\"\"\"\njudge whether the three line length can make a triangle.\nif can make it, calculate the area and total length.\n\"\"\"\nimport math\n\na = float(input(\"a = \"))\nb = float(input(\"b = \"))\nc = float(input(\"c = \"))\n\nif a+b > c and b+c > a and a+c > b:\n total_length = a+b+c\n print(\"The total length: %f\" % total_length )\n\n p=(total_length)/2\n area = math.sqrt(p * (p-a) * (p-b) * (p-c))\n print(\"The area: %f\" % area)\nelse:\n print(\"a, b, c can not make a triangle.\")","sub_path":"Day01-15/Day03/my_code/p4. triangle.py","file_name":"p4. triangle.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"200243349","text":"import json\nimport urllib.request\nimport pygame\nfrom pygame.locals import *\nimport sys\nimport os\nimport random\nimport classes\n\nRIOT_API_KEY = \"\" # You need to insert your own profile key to correctly run this program\nLEAGUE_CURRENT_VERSION = \"\"\ndebug = False\n\ndef get_champion_dict_from_championID(championID, datatype=\"all\"):\n if type(championID) != \"\":\n championID = str(championID)\n opener = urllib.request.FancyURLopener({})\n url = \"https://global.api.pvp.net/api/lol/static-data/{}/v1.2/champion/{}\".format(\"euw\", championID) + \"?champData=\" + datatype + \"&api_key=\" + RIOT_API_KEY\n f = opener.open(url)\n champion_data = f.read().decode(\"utf-8\")\n return json.loads(champion_data)\n\ndef get_champion_pool_dict():\n try:\n \topener = urllib.request.FancyURLopener({})\n \turl = \"https://global.api.pvp.net/api/lol/static-data/{}/v1.2/champion?api_key=\".format(\"euw\") + RIOT_API_KEY\n \tf = opener.open(url)\n \tchampion_data = f.read().decode(\"utf-8\")\n \treturn json.loads(champion_data)\n except:\n \tprint(\"Error! Absent or invalid RIOT_API_KEY\")\n \tsys.exit(0)\n\ndef load_champs():\n # For specific champion\n \"\"\"\n champion_dict = get_champion_dict_from_championID(129)\n for key, value in champion_dict.items():\n print(key, value)\n \"\"\"\n champion_list = []\n pool = get_champion_pool_dict()\n\n for key in pool:\n if key == \"version\":\n global LEAGUE_CURRENT_VERSION\n LEAGUE_CURRENT_VERSION = pool[key]\n if debug:\n print(\"Current League of Legends version:\", pool[key])\n if key == \"data\":\n for champion in pool[key]:\n if len(champion) > 1:\n if debug:\n print(\"Champion\", champion, \"added to the list!\")\n championID = pool[key][champion][\"id\"]\n champion_title = pool[key][champion][\"title\"]\n champion_list.append((championID, champion, champion_title))\n\n champion_list.sort()\n\n if debug:\n print(\"Champions sorted!\")\n for championID, champion, champion_title in champion_list:\n print(championID, champion + \", \" + champion_title)\n print(\"Number of champions reached:\", len(champion_list))\n\ndef load_champs_images():\n opener = urllib.request.FancyURLopener({})\n url = \"https://global.api.pvp.net/api/lol/static-data/euw/v1.2/champion?champData=image&api_key=\" + RIOT_API_KEY\n f = opener.open(url)\n champion_data = f.read().decode(\"utf-8\")\n pool = json.loads(champion_data)\n\n champion_images = []\n for image in pool[\"data\"]:\n champion_images.append([pool[\"data\"][image][\"name\"], pool[\"data\"][image][\"image\"][\"full\"]])\n\n for val in champion_images:\n # print(val)\n val.append(\"http://ddragon.leagueoflegends.com/cdn/\" + LEAGUE_CURRENT_VERSION + \"/img/champion/\" + val[1])\n\n return champion_images\n\ndef load_images_to_pygame(image_db):\n db = []\n db_alreadydownloaded = []\n if not os.path.exists(os.getcwd() + \"\\\\images\\\\champion_icons\\\\\"):\n os.makedirs(os.getcwd() + \"\\\\images\\\\champion_icons\\\\\")\n for p, d, f in os.walk(os.getcwd() + \"\\\\images\\\\champion_icons\\\\\"):\n for file in f:\n db_alreadydownloaded.append(file)\n if debug:\n print(\"Downloading images if necessary...\")\n for val in image_db:\n # print(val)\n if val[1] in db_alreadydownloaded:\n if debug:\n print(\"SKIPPED -\", val[1], \"already downloaded!\")\n else:\n f = open(os.getcwd() + \"\\\\images\\\\champion_icons\\\\\" + val[1], \"wb\")\n f.write(urllib.request.urlopen(val[2]).read())\n f.close()\n # print(val[1])\n image = classes.Champion(val[0], os.getcwd() + \"\\\\images\\\\champion_icons\\\\\" + val[1], 1, 1)\n db.append([val[0], image])\n return db\n\ndef make_champion_pool(db):\n already_picked = []\n i = 5\n ris = []\n while i != 0:\n pick = db[random.randint(0, len(championID_images_db) - 1)]\n if debug:\n print(pick)\n champ = pick[1]\n if champ not in already_picked:\n ris.append(champ)\n already_picked.append(champ)\n i -= 1\n global current_champion_image1\n global current_champion_image2\n global current_champion_image3\n global current_champion_image4\n global current_champion_image5\n current_champion_image1 = ris[0]\n current_champion_image2 = ris[1]\n current_champion_image3 = ris[2]\n current_champion_image4 = ris[3]\n current_champion_image5 = ris[4]\n\nif __name__ == \"__main__\":\n\n # Caricamento di tutti i dati campione e file\n load_champs()\n pygame.init()\n\n championID_images_db = load_images_to_pygame(load_champs_images())\n\n make_champion_pool(championID_images_db)\n\n SCREENWIDTH = 854\n SCREENHEIGHT = 480\n app_window = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))\n pygame.display.set_icon(pygame.image.load(os.getcwd() + \"\\\\images\\\\favicon.ico\"))\n\n background_image = classes.Champion(\"Background Image\", os.getcwd() + \"\\\\images\\\\backgroundimage.jpg\", 1, 1)\n\n logo = classes.Champion(\"Logo\", os.getcwd() + \"\\\\images\\\\logo.png\", 0.4, 0.4)\n\n box_shadow = classes.Champion(\"Box Shadow\", os.getcwd() + \"\\\\images\\\\box-shadow.png\", 1, 1)\n\n mastery7 = classes.Champion(\"Mastery 7\", os.getcwd() + \"\\\\images\\\\mastery7.png\", 1, 1)\n mastery7.going = \"up\"\n mastery7.x, mastery7.y = SCREENWIDTH - 20 - mastery7.width, SCREENHEIGHT - 21 - mastery7.height\n\n space_label = classes.Champion(\"Press space to change!\", os.getcwd() + \"\\\\images\\\\champion_icons\\\\Annie.png\", 1, 1)\n\n zac_lulu = classes.Champion(\"Zac and Lulu\", os.getcwd() + \"\\\\images\\\\zaclulu.png\", 0.35, 0.35)\n\n info_label = classes.Champion(\"Info Label\", os.getcwd() + \"\\\\images\\\\label.png\", 1, 1)\n\n pygame.mixer.music.load(os.getcwd() + \"\\\\music\\\\theme.mp3\")\n pygame.mixer.music.set_volume(0.3)\n pygame.mixer.music.play(loops=-1)\n\n pygame.display.set_caption(\"Random Champion Pick - Current Version: Beta v5 - Developer: Marcioz\")\n game_quit = False\n clock = pygame.time.Clock()\n FPS = 60\n frame_counter = 0\n while not game_quit:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_quit = True\n break\n if event.type == pygame.KEYDOWN:\n if event.key == K_F4:\n game_quit = True\n break\n if event.key == K_SPACE:\n make_champion_pool(championID_images_db)\n\n if mastery7.going == \"down\":\n if mastery7.y + mastery7.height >= SCREENHEIGHT - 20:\n mastery7.y = SCREENHEIGHT - 20 - mastery7.height\n mastery7.going = \"up\"\n else:\n mastery7.y += (1/FPS) * 4 * clock.get_time()\n elif mastery7.going == \"up\":\n if mastery7.y <= SCREENHEIGHT // 1.4:\n mastery7.y = SCREENHEIGHT // 1.4\n mastery7.going = \"down\"\n else:\n mastery7.y -= (1/FPS) * 4 * clock.get_time()\n\n # pygame.Surface.fill(app_window, (122, 122, 122))\n pygame.Surface.blit(app_window, background_image.sprite, (-250, -200))\n pygame.Surface.blit(app_window, box_shadow.sprite, (0, 0))\n\n pygame.Surface.blit(app_window, zac_lulu.sprite, (480, SCREENHEIGHT - zac_lulu.height))\n\n pygame.Surface.blit(app_window, space_label.rendered_name, (480, SCREENHEIGHT - 30))\n\n pygame.Surface.blit(app_window, info_label.sprite, (20, SCREENHEIGHT - 20 - info_label.height))\n\n pygame.Surface.blit(app_window, current_champion_image1.sprite, (20 + current_champion_image1.x, current_champion_image1.y))\n pygame.Surface.blit(app_window, current_champion_image1.rendered_name, (20 + current_champion_image1.x + 3, current_champion_image1.y + current_champion_image1.height + 2))\n\n pygame.Surface.blit(app_window, current_champion_image2.sprite, (20 + current_champion_image2.width, current_champion_image2.y))\n pygame.Surface.blit(app_window, current_champion_image2.rendered_name, (20 + current_champion_image2.width + 3, current_champion_image2.y + current_champion_image2.height + 2))\n\n pygame.Surface.blit(app_window, current_champion_image3.sprite, (20 + 2*current_champion_image3.width, current_champion_image3.y))\n pygame.Surface.blit(app_window, current_champion_image3.rendered_name, (20 + 2*current_champion_image3.width + 3, current_champion_image3.y + current_champion_image3.height + 2))\n\n pygame.Surface.blit(app_window, current_champion_image4.sprite, (20 + 3*current_champion_image4.width, current_champion_image4.y))\n pygame.Surface.blit(app_window, current_champion_image4.rendered_name, (20 + 3*current_champion_image4.width + 3, current_champion_image4.y + current_champion_image4.height + 2))\n\n pygame.Surface.blit(app_window, current_champion_image5.sprite, (20 + 4*current_champion_image5.width, current_champion_image5.y))\n pygame.Surface.blit(app_window, current_champion_image5.rendered_name, (20 + 4*current_champion_image5.width + 3, current_champion_image5.y + current_champion_image5.height + 2))\n\n pygame.Surface.blit(app_window, mastery7.sprite, (mastery7.x, mastery7.y))\n\n pygame.Surface.blit(app_window, logo.sprite, (SCREENWIDTH - 20 - logo.width, 40))\n\n clock.tick(FPS)\n pygame.display.update()\n\n pygame.quit()\n sys.exit(0)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454817846","text":"# ------------ Signature Types ------------\nSIGNATURE_TYPE_NO_PREPEND = 0\nSIGNATURE_TYPE_DECIMAL = 1\nSIGNATURE_TYPE_HEXADECIMAL = 2\n\n# ------------ Market Statistic Day Types ------------\nMARKET_STATISTIC_DAY_ONE = '1'\nMARKET_STATISTIC_DAY_SEVEN = '7'\nMARKET_STATISTIC_DAY_THIRTY = '30'\n\n# ------------ Order Types ------------\nORDER_TYPE_LIMIT = 'LIMIT'\nORDER_TYPE_STOP = 'STOP'\nORDER_TYPE_TRAILING_STOP = 'TRAILING_STOP'\nORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'\n\n# ------------ Order Side ------------\nORDER_SIDE_BUY = 'BUY'\nORDER_SIDE_SELL = 'SELL'\n\n# ------------ Time in Force Types ------------\nTIME_IN_FORCE_GTT = 'GTT'\nTIME_IN_FORCE_FOK = 'FOK'\nTIME_IN_FORCE_IOC = 'IOC'\n\n# ------------ Position Status Types ------------\nPOSITION_STATUS_OPEN = 'OPEN'\nPOSITION_STATUS_CLOSED = 'CLOSED'\nPOSITION_STATUS_LIQUIDATED = 'LIQUIDATED'\n\n# ------------ Order Status Types ------------\nORDER_STATUS_PENDING = 'PENDING'\nORDER_STATUS_OPEN = 'OPEN'\nORDER_STATUS_FILLED = 'FILLED'\nORDER_STATUS_CANCELED = 'CANCELED'\nORDER_STATUS_UNTRIGGERED = 'UNTRIGGERED'\n\n# ------------ Transfer Status Types ------------\nTRANSFER_STATUS_PENDING = 'PENDING'\nTRANSFER_STATUS_CONFIRMED = 'CONFIRMED'\nTRANSFER_STATUS_QUEUED = 'QUEUED'\nTRANSFER_STATUS_CANCELED = 'CANCELED'\nTRANSFER_STATUS_UNCONFIRMED = 'UNCONFIRMED'\n\n# ------------ Account Action Types ------------\nACCOUNT_ACTION_DEPOSIT = 'DEPOSIT'\nACCOUNT_ACTION_WITHDRAWAL = 'WITHDRAWAL'\n\n# ------------ Markets ------------\nMARKET_BTC_USD = 'BTC-USD'\nMARKET_ETH_USD = 'ETH-USD'\nMARKET_LINK_USD = 'LINK-USD'\n\n# ------------ Assets ------------\nASSET_USDC = 'USDC'\nASSET_BTC = 'BTC'\nASSET_ETH = 'ETH'\nASSET_LINK = 'LINK'\nCOLLATERAL_ASSET = ASSET_USDC\n\n# ------------ Synthetic Assets by Market ------------\nSYNTHETIC_ASSET_MAP = {\n MARKET_BTC_USD: ASSET_BTC,\n MARKET_ETH_USD: ASSET_ETH,\n MARKET_LINK_USD: ASSET_LINK,\n}\n\n# ------------ Asset IDs ------------\nASSET_ID_MAP = {\n ASSET_USDC: int(\n '0x24d6ea88d53b68601dcf03b3f204cbe829d3689194f823bd6a7f74292c22334',\n 16,\n ),\n ASSET_BTC: 0,\n ASSET_ETH: 1,\n ASSET_LINK: 2,\n}\nCOLLATERAL_ASSET_ID = ASSET_ID_MAP[COLLATERAL_ASSET]\n\n# ------------ Asset Resolution (Quantum Size) ------------\n#\n# The asset resolution is the number of quantums (Starkware units) that fit\n# within one \"human-readable\" unit of the asset. For example, if the asset\n# resolution for BTC is 1e10, then the smallest unit representable within\n# Starkware is 1e-10 BTC, i.e. 1/100th of a satoshi.\n#\n# For the collateral asset (USDC), the chosen resolution corresponds to the\n# base units of the ERC-20 token. For the other, synthetic, assets, the\n# resolutions are chosen such that prices relative to USDC are close to one.\nASSET_RESOLUTION = {\n ASSET_USDC: '1e6',\n ASSET_BTC: '1e10',\n ASSET_ETH: '1e8',\n ASSET_LINK: '1e7',\n}\n","sub_path":"dydx3/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419000775","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.views.generic import CreateView, UpdateView, DeleteView, ListView, DetailView\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom . models import Major, Student, StudentModelForm, MajorModelForm\nfrom . forms import MajorForm, StudentForm\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.serializers import serialize\n\n# student_list = ListView.as_view(model=Student)\n# major_list = ListView.as_view(model=Major)\n\n\nclass StudentListView(ListView):\n model = Student\n paginate_by = 6\n template_name = 'stdmj/student_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(StudentListView, self).get_context_data(**kwargs)\n paginator = context['paginator']\n page_numbers_range = 5 # Display only 5 page numbers\n max_index = len(paginator.page_range)\n\n page = self.request.GET.get('page')\n current_page = int(page) if page else 1\n\n start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range\n end_index = start_index + page_numbers_range\n if end_index >= max_index:\n end_index = max_index\n\n page_range = paginator.page_range[start_index:end_index]\n context['page_range'] = page_range\n return context\n\nclass MajorListView(ListView):\n model = Major\n paginate_by = 6\n template_name = 'stdmj/major_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(MajorListView, self).get_context_data(**kwargs)\n paginator = context['paginator']\n page_numbers_range = 5 # Display only 5 page numbers\n max_index = len(paginator.page_range)\n\n page = self.request.GET.get('page')\n current_page = int(page) if page else 1\n\n start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range\n end_index = start_index + page_numbers_range\n if end_index >= max_index:\n end_index = max_index\n\n page_range = paginator.page_range[start_index:end_index]\n context['page_range'] = page_range\n return context\n\nstudent_list = StudentListView.as_view()\nmajor_list = MajorListView.as_view()\n\nstudent_detail = DetailView.as_view(model=Student)\nmajor_detail = DetailView.as_view(model=Major)\n\nstudent_delete = DeleteView.as_view(model=Student, success_url='/stdmj/')\nmajor_delete = DeleteView.as_view(model=Major, success_url='/stdmj/mj')\n\n\ndef student_new(request):\n if request.method=='GET':\n form = StudentForm()\n else:\n form = StudentForm(request.POST, request.FILES)\n if form.is_valid():\n Student.objects.create(**form.cleaned_data)\n return redirect(reverse(\"stdmj:student_list\"))\n return render(request, 'stdmj/student_form.html', {'form':form})\n\ndef major_new(request):\n if request.method=='GET':\n form = MajorForm()\n else:\n form = MajorForm(request.POST, request.FILES)\n if form.is_valid():\n Major.objects.create(**form.cleaned_data)\n return redirect(reverse(\"stdmj:major_list\"))\n return render(request, 'stdmj/major_form.html', {'form':form})\n\ndef student_edit(request, pk):\n student = get_object_or_404(Student, studentID=pk)\n if request.method == 'POST':\n form = StudentModelForm(request.POST, request.FILES, instance=student)\n if form.is_valid():\n student.save()\n return redirect('/stdmj/')\n else:\n form = StudentModelForm(instance=student)\n return render(request, 'stdmj/student_form.html', {'form':form}) \n\ndef major_edit(request, pk):\n major = get_object_or_404(Major, major_id=pk)\n if request.method == 'POST':\n form = MajorModelForm(request.POST, request.FILES, instance=major)\n if form.is_valid():\n major.save()\n return redirect('/stdmj/mj')\n else:\n form = MajorModelForm(instance=major)\n return render(request, 'stdmj/major_form.html', {'form':form})\n\n@csrf_exempt\ndef searchData(request):\n data = request.POST['name']\n student = Student.objects.filter(name__contains=data)\n print(student)\n return render(request, 'stdmj/student_list_table.html', {'student_list':student})\n\n@csrf_exempt\ndef searchData2(request):\n data2 = request.POST['mjname']\n major = Major.objects.filter(major_title__contains=data2)\n print(major)\n return render(request, 'stdmj/major_list_table.html', {'major_list':major})\n\ndef upload_csv(request):\n data = {}\n if \"GET\" == request.method:\n return render(request, \"stdmj/upload_csv.html\", data)\n # if not GET, then proceed\n \n csv_file = request.FILES[\"csv_file\"]\n if not csv_file.name.endswith('.csv'):\n message.error(request, '파일이 csv 타입이 아닙니다.')\n return HttpResponseRedirect(reverse(\"stdmj:upload_csv\"))\n \n file_data = csv_file.read().decode(\"utf-8\")\t\n lines = file_data.split(\"\\n\")\n for line in lines:\n fields = line.split(\",\")\n data_dict = {}\n data_dict[\"studentID\"] = fields[0]\n data_dict[\"name\"] = fields[1]\n data_dict[\"major\"] = fields[2]\n data_dict[\"phone\"] = fields[3]\n data_dict[\"address\"] = fields[4]\n data_dict[\"hobby\"] = fields[5]\n data_dict[\"skill\"] = fields[6]\n\n \n form = StudentForm(data_dict)\n if form.is_valid():\n Student.objects.create(**form.cleaned_data)\n # form.save()\n return redirect(reverse(\"stdmj:student_list\"))\n\ndef upload_csv2(request):\n data = {}\n if \"GET\" == request.method:\n return render(request, \"stdmj/upload_csv2.html\", data)\n # if not GET, then proceed\n \n csv_file = request.FILES[\"csv_file\"]\n if not csv_file.name.endswith('.csv'):\n message.error(request, '파일이 csv 타입이 아닙니다.')\n return HttpResponseRedirect(reverse(\"stdmj:upload_csv2\"))\n \n file_data = csv_file.read().decode(\"utf-8\")\t\n lines = file_data.split(\"\\n\")\n for line in lines:\n fields = line.split(\",\")\n data_dict = {}\n data_dict[\"major_id\"] = fields[0]\n data_dict[\"major_title\"] = fields[1]\n \n form = MajorForm(data_dict)\n if form.is_valid():\n Major.objects.create(**form.cleaned_data)\n # form.save()\n return redirect(reverse(\"stdmj:major_list\"))\n ","sub_path":"stdmj/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"309974663","text":"import argparse\nfrom glob import glob\nimport os\nimport pandas as pd\nfrom expanalysis.experiments import processing\n\ndef get_exp_DVs(use_group_fun=True, group_kwargs=None, out_dir=None):\n file_dir = os.path.dirname(__file__)\n # calculate DVs\n if group_kwargs is None:\n group_kwargs = {}\n exp_DVs = {}\n for task_data in glob(os.path.join(file_dir, '../behavioral_data/processed/group_data/*.csv')):\n df = pd.read_csv(task_data)\n exp_id = df.experiment_exp_id.unique()[0]\n print(exp_id)\n if out_dir:\n group_kwargs['outfile'] = os.path.join(out_dir, exp_id)\n DVs, valence, description = processing.calc_exp_DVs(df, \n use_group_fun=use_group_fun,\n group_kwargs=group_kwargs)\n exp_DVs[exp_id] = DVs\n if out_dir:\n DVs.to_pickle(os.path.join(out_dir, exp_id+'_DVs.pkl'))\n DV_df = pd.DataFrame()\n for name, DV in exp_DVs.items():\n if DV is not None:\n DV.columns = [name+'_%s' % i for i in DV.columns]\n DV_df = pd.concat([DV_df, DV], axis=1) \n return DV_df\n\nif __name__ =='__main__':\n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--no_group', action='store_false')\n # HDDM params\n parser.add_argument('--out_dir', default=None)\n parser.add_argument('--hddm_samples', default=20000, type=int)\n parser.add_argument('--hddm_burn', default=10000, type=int)\n parser.add_argument('--hddm_thin', default=1, type=int)\n parser.add_argument('--no_parallel', action='store_false')\n parser.add_argument('--num_cores', default=None, type=int)\n parser.add_argument('--mode', default=None, type=str)\n \n args = parser.parse_args()\n out_dir = args.out_dir\n use_group = args.no_group\n # HDDM variables\n hddm_samples = args.hddm_samples\n hddm_burn= args.hddm_burn\n hddm_thin= args.hddm_thin\n parallel = args.no_parallel\n num_cores = args.num_cores\n # mode for motor selective stop signal\n mode = args.mode\n \n #calculate DVs\n group_kwargs = {'parallel': parallel,\n 'num_cores': num_cores,\n 'samples': hddm_samples,\n 'burn': hddm_burn,\n 'thin': hddm_thin}\n \n DV_df = get_exp_DVs(use_group, group_kwargs, out_dir)\n if out_dir is not None:\n DV_df.to_pickle(os.path.join(out_dir, 'fmri_DVs.pkl'))\n \n\n","sub_path":"behavioral_data_prep/extract_DVs.py","file_name":"extract_DVs.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176625960","text":"from PIL import Image, ImageFile\nimport bs4\nimport requests\nimport urllib\nfrom io import StringIO\n\n\n\nim = Image.open(\"C:\\\\Code\\\\Python\\\\Wall-Fresh\\\\test.jpg\")\nwidth , height = im.size\n#print(width)\n#print(height)\n\nurl = \"https://i.redd.it/x0n5toj5j1o11.jpg\"\n\ndef getsizes(uri):\n # get file size *and* image size (None if not known)\n file = urllib.request.urlopen(uri)\n size = file.headers.get(\"content-length\")\n if size: size = int(size)\n p = ImageFile.Parser()\n while 1:\n data = file.read(1024)\n if not data:\n break\n p.feed(data)\n if p.image:\n print(p.image.size)\n width = p.image.size[0]\n height = p.image.size[1]\n #print(width)\n #print(height)\n file.close()\n if(height > width or height < 1080 or width < 1920):\n return False\n return True\n \n \n\n\n\nif(getsizes(url)):\n print(\"gotem\")\n'''\nsize,width,height = getsizes(url)\nprint(size)\nprint(width)\nprint(height)\n'''\n\n\n\n\n# (10965, (179, 188))\n'''\nfile = StringIO(str(urllib.request.urlopen(url).read()))\nim2 = Image.open(file)\nwidth,height = im.size\nprint(width)\nprint(height)\n'''","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375965111","text":"from battle.businesslogic.effects.Effect import Effect\nimport random\n\n\nclass RandomizeDeckEffect(Effect):\n \"\"\"\n Randomizes order of cards in deck.\n This effect randomizes cards order til it's changed, there is no randomization which does nothing.\n \"\"\"\n\n def on_activation(self, target, turns_queue):\n deck = target.deck\n\n original_cards_order = [i for i in range(deck.size())]\n new_cards_order = [i for i in range(deck.size())]\n\n # Randomize cards order til it's changed.\n while new_cards_order == original_cards_order:\n random.shuffle(new_cards_order)\n\n # Create tuple of BattleCard objects in new order\n cards_in_new_order = tuple([deck.lookup(ind) for ind in new_cards_order])\n\n deck.create_cards_queue(cards_in_new_order)\n","sub_path":"WMIAdventure/backend/WMIAdventure_backend/battle/businesslogic/effects/RandomizeDeckEffect.py","file_name":"RandomizeDeckEffect.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475491078","text":"# Copyright 2015-2018 Cisco Systems, Inc.\n# All rights reserved.\n\nfrom datetime import datetime, timedelta\nimport random\nimport string\nimport uuid\nimport hashlib\n\nimport six\nimport prettytable\n\ndef timestamp():\n \"\"\"return current time\n \"\"\"\n return datetime.now().__str__()\n\n\ndef timedelta_for_human(seconds):\n \"\"\"convert seconds to day,hour,min,second\n \"\"\"\n sec = timedelta(seconds=int(seconds))\n try:\n d = datetime(1, 1, 1) + sec\n except OverflowError:\n return \"0d0h0m0s\"\n return \"%dd%dh%dm%ds\" % (d.day - 1, d.hour, d.minute, d.second)\n\n\ndef humantimestr_to_seconds(timestr):\n tmp = timestr.split('d')\n day = tmp[0]\n tmp = tmp[1].split('h')\n hour = tmp[0]\n tmp = tmp[1].split('m')\n _min = tmp[0]\n tmp = tmp[1].split('s')\n sec = tmp[0]\n deltatime = timedelta(days=int(day), hours=int(hour), minutes=int(_min), seconds=int(sec))\n seconds = deltatime.total_seconds()\n return seconds\n\n\ndef name_generator(length=10):\n \"\"\"generate random name\n \"\"\"\n return ''.join(random.sample(string.ascii_lowercase + string.digits, length))\n\n\ndef uuid_generator():\n \"\"\"generate uuid string\n \"\"\"\n return uuid.uuid4().hex\n\n\ndef hash_generator(node_str):\n \"\"\"return hash value\n \"\"\"\n return hashlib.sha224(node_str.encode('utf-8')).hexdigest()\n\n\ndef prefix_dict_str(value):\n \"\"\"dict to string format\n \"\"\"\n return ','.join([\"%s:%s\" % (k, value[k])for k in sorted(value.keys())])\n\ndef print_list(objs, fields, exclude_unavailable=False):\n \"\"\"Prints a list of objects.\n @param objs: Objects to print\n @param fields: Fields on each object to be printed\n @param exclude_unavailable: Boolean to decide if unavailable fields are\n removed\n \"\"\"\n rows = []\n removed_fields = []\n for o in objs:\n row = []\n for field in fields:\n if field in removed_fields:\n continue\n if isinstance(o, dict) and field in o:\n data = o[field]\n else:\n if not hasattr(o, field) and exclude_unavailable:\n removed_fields.append(field)\n continue\n else:\n data = getattr(o, field, '')\n if data is None:\n data = '-'\n if isinstance(data, six.string_types) and \"\\r\" in data:\n data = data.replace(\"\\r\", \" \")\n row.append(data)\n rows.append(row)\n\n for f in removed_fields:\n fields.remove(f)\n\n pt = prettytable.PrettyTable((f for f in fields), caching=False)\n pt.align = 'l'\n for row in rows:\n pt.add_row(row)\n print(pt)\n","sub_path":"cxcomm/cxcomm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"83426846","text":"import numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer import utils\nfrom chainer.utils import type_check\n\nif cuda.cudnn_enabled:\n cudnn = cuda.cudnn\n libcudnn = cuda.cuda.cudnn\n _mode = libcudnn.CUDNN_ACTIVATION_TANH\n\n\nclass BinaryActivation(function_node.FunctionNode):\n\n \"\"\"binary activation function.\"\"\"\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].dtype.kind == 'f')\n\n def forward_cpu(self, x):\n y = utils.force_array(numpy.where(x[0] >= 0.0,\n 1.0,\n -1.0)).astype(numpy.float32)\n self.retain_inputs((0,))\n self._use_cudnn = False\n return y,\n\n def forward_gpu(self, x):\n y = cuda.elementwise('T x', 'T y',\n 'y = (x >= 0.0) - (x < 0.0)',\n 'binary_activation')(x)\n self.retain_inputs((0,))\n self._use_cudnn = False\n return y,\n\n def backward(self, indexes, grad_outputs):\n x = self.get_retained_inputs()[0].data\n gy = grad_outputs[0]\n return BinaryActivationGrad(x).apply((gy))\n\n\nclass BinaryActivationGrad(function_node.FunctionNode):\n\n def __init__(self, x):\n super(BinaryActivationGrad, self).__init__()\n self.x = x\n\n def forward_cpu(self, inputs):\n self.retain_inputs((0))\n gy = inputs\n grad_mask = numpy.logical_and(self.x < 1.0, self.x > -1.0)\n return utils.force_array(gy * grad_mask).astype(numpy.float32),\n\n\n def forward_gpu(self, inputs):\n self.retain_inputs((0))\n gy = inputs\n gx = cuda.elementwise('T x, T gy', 'T gx',\n 'gx = gy * ((x < 1.0) and (x > -1.0))',\n 'binary_activation_bwd')(self.x, gy)\n return gx,\n\n\ndef binary_activation(x):\n \"\"\"binary activation function.\"\"\"\n return BinaryActivation().apply((x,))[0]\n","sub_path":"lib/functions/activation/binary_activation.py","file_name":"binary_activation.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"331645846","text":"from collections import defaultdict\n\n\ndef main():\n with open(\"input\", \"r\") as file:\n data = [line.strip() for line in file.readlines()]\n\n data = [[row[i] for row in data] for i in range(len(data[0]))]\n message = []\n for line in data:\n frequnecy = defaultdict(lambda: 0)\n for letter in line:\n frequnecy[letter] += 1\n\n maxLetter, _ = max(frequnecy.items(), key=lambda x: x[1])\n message.append(maxLetter)\n\n print(\"\".join(message))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2016/day06/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"145116030","text":"# -*- coding:utf-8 -*-\n\"\"\"\nCreated on the 01/25/2018\n@author: Nicolas Thiebaut\n@email: nicolas@visage.jobs\n\"\"\"\nfrom logging import Formatter, getLogger, StreamHandler, WARNING\n\nLOG_FORMAT = (\n \"%(asctime)s [%(levelname)s]: %(message)s in %(pathname)s:%(lineno)d\")\n\nDEFAULT_LOG_LEVEL = WARNING\n\n\nDEFAULT_HANDLER = StreamHandler()\nDEFAULT_HANDLER.setFormatter(Formatter(LOG_FORMAT))\n\npackage_logger = getLogger(\"zeugma\")\npackage_logger.setLevel(DEFAULT_LOG_LEVEL)\npackage_logger.addHandler(DEFAULT_HANDLER)\n","sub_path":"zeugma/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459950333","text":"from datetime import datetime\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom graphene.test import Client\n\nfrom server.schema import schema\nfrom server.models import Team, Match, TeamMatch, MLModel, Prediction\n\n\nclass TestSchema(TestCase):\n def setUp(self):\n self.maxDiff = None\n self.client = Client(schema)\n\n home_team = Team(name=\"Richmond\")\n home_team.save()\n away_team = Team(name=\"Melbourne\")\n away_team.save()\n\n match_datetime = timezone.make_aware(datetime(2018, 5, 5))\n new_match = Match(start_date_time=match_datetime, round_number=5)\n new_match.save()\n match_datetime = timezone.make_aware(datetime(2014, 5, 5))\n old_match = Match(start_date_time=match_datetime, round_number=7)\n old_match.save()\n\n (TeamMatch(team=home_team, match=new_match, at_home=True, score=150).save())\n (TeamMatch(team=away_team, match=new_match, at_home=False, score=100).save())\n (TeamMatch(team=home_team, match=old_match, at_home=True, score=150).save())\n (TeamMatch(team=away_team, match=old_match, at_home=False, score=100).save())\n\n ml_model = MLModel(name=\"test_model\")\n ml_model.save()\n\n new_prediction = Prediction(\n match=new_match,\n ml_model=ml_model,\n predicted_winner=home_team,\n predicted_margin=50,\n )\n new_prediction.save()\n old_prediction = Prediction(\n match=old_match,\n ml_model=ml_model,\n predicted_winner=away_team,\n predicted_margin=50,\n )\n old_prediction.save()\n\n def assert_correct_results(self, results, expected_results):\n # graphene returns OrderedDicts instead of dicts, which makes asserting\n # on results a little more complicated\n for idx, result in enumerate(results):\n expected_result = expected_results[idx]\n\n self.assertEqual(dict(result[\"match\"]), expected_result[\"match\"])\n self.assertEqual(dict(result[\"mlModel\"]), expected_result[\"mlModel\"])\n self.assertEqual(result[\"isCorrect\"], expected_result[\"isCorrect\"])\n\n def test_predictions(self):\n expected_predictions = [\n {\n \"match\": {\"roundNumber\": 5, \"year\": 2018},\n \"mlModel\": {\"name\": \"test_model\"},\n \"isCorrect\": True,\n },\n {\n \"match\": {\"roundNumber\": 7, \"year\": 2014},\n \"mlModel\": {\"name\": \"test_model\"},\n \"isCorrect\": False,\n },\n ]\n\n executed = self.client.execute(\n \"\"\"\n query QueryType {\n predictions {\n match { roundNumber, year },\n mlModel { name },\n isCorrect\n }\n }\n \"\"\"\n )\n\n self.assert_correct_results(\n executed[\"data\"][\"predictions\"], expected_predictions\n )\n\n with self.subTest(year=2018):\n executed = self.client.execute(\n \"\"\"\n query QueryType {\n predictions(year: 2018) {\n match { roundNumber, year },\n mlModel { name },\n isCorrect\n }\n }\n \"\"\"\n )\n\n self.assert_correct_results(\n executed[\"data\"][\"predictions\"], expected_predictions[:1]\n )\n","sub_path":"backend/server/tests/unit/test_schema.py","file_name":"test_schema.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220084533","text":"\"\"\"Given k, find the geometric sum i.e.\n1 + 1/2 + 1/4 + 1/8 + ... + 1/(2^k) \nusing recursion.\"\"\"\n\ndef geometricSum(k, sum):\n if k == 0:\n return 1\n \n TotalSum = (1/2**k) + geometricSum(k-1, sum)\n return TotalSum \n\n\n\n\n\n\nk = int(input())\nans = geometricSum(k, sum)\nprint(ans)","sub_path":"geometricSum.py","file_name":"geometricSum.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39975907","text":"from django.conf.urls import patterns, url\nfrom book import views\n\nurlpatterns = patterns('',\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^new/$', views.edit_item, name='new_item'),\n url(r'^batch_modify/$', views.batch_modify, name='batch_modify'),\n url(r'^analysis/$', views.AnalysisListView.as_view(), name='analysis_list'),\n url(r'^analysis/(?P.+)/$', views.AnalysisView.as_view(), name='analysis'),\n url(r'^account/(?P.+)/$', views.AccountView.as_view(), name='account'),\n url(r'^account/(?P<title>.+)/update$', views.update_account, name='account_update'),\n url(r'^(?P<pk>.+)/$', views.ItemView.as_view(), name='item'),\n url(r'^(?P<iid>.+)/edit$', views.edit_item, name='edit_item'),\n url(r'^tags/(?P<pk>.+)/$', views.TagView.as_view(), name='tag'),\n)\n","sub_path":"book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"228106159","text":"#!/usr/bin/env python\n\n\"\"\"\n fetch_trajectory_demo.py - Version 0.1 2014-01-14\n \n Send a trajectory to the FollowJointTrajectoryAction server for the Fetch robot\n \n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2014 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nimport rospy\nimport actionlib\n\nfrom control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\n\nclass TrajectoryDemo():\n def __init__(self):\n rospy.init_node('trajectory_demo')\n \n # Set to True to move back to the starting configurations\n reset = rospy.get_param('~reset', False)\n \n # Set to False to wait for arm to finish before moving head\n sync = rospy.get_param('~sync', True)\n \n # Which joints define the arm?\n arm_joints = ['shoulder_pan_joint',\n 'shoulder_lift_joint',\n 'upperarm_roll_joint',\n 'elbow_flex_joint',\n 'forearm_roll_joint',\n 'wrist_flex_joint',\n 'wrist_roll_joint']\n \n # Which joints define the head?\n head_joints = ['head_pan_joint', 'head_tilt_joint']\n \n # Which joint defines the torso?\n torso_joints = ['torso_lift_joint']\n \n if reset:\n # Set the arm back to the tucked position\n arm_goal = [-1.3901, 1.3439, -2.8327, -1.8119, 0.0, -1.6571, 0.0]\n \n # Re-center the head\n head_goal = [0.0, 0.0]\n \n # Bring the toros back down\n torso_goal = [0.0]\n else:\n # Set a goal configuration for the arm\n arm_goal = [-1.0, 0, 0, -1.0, 0, 0, 0]\n \n # Set a goal configuration for the head\n head_goal = [-0.85, -0.25]\n \n # Move the torso up\n torso_goal = [0.35]\n \n # Connect to the arm trajectory action server\n rospy.loginfo('Waiting for arm trajectory controller...')\n \n arm_client = actionlib.SimpleActionClient('arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction)\n \n arm_client.wait_for_server()\n \n rospy.loginfo('...connected.')\n \n # Connect to the head trajectory action server\n rospy.loginfo('Waiting for head trajectory controller...')\n \n head_client = actionlib.SimpleActionClient('head_controller/follow_joint_trajectory', FollowJointTrajectoryAction)\n \n head_client.wait_for_server()\n \n rospy.loginfo('...connected.')\n \n # Connect to the torso trajectory action server\n rospy.loginfo('Waiting for headtorso trajectory controller...')\n \n torso_client = actionlib.SimpleActionClient('torso_controller/follow_joint_trajectory', FollowJointTrajectoryAction)\n \n torso_client.wait_for_server()\n \n rospy.loginfo('...connected.') \n \n # Create a single-point arm trajectory with the arm_goal as the end-point\n arm_trajectory = JointTrajectory()\n arm_trajectory.joint_names = arm_joints\n arm_trajectory.points.append(JointTrajectoryPoint())\n arm_trajectory.points[0].positions = arm_goal\n arm_trajectory.points[0].velocities = [0.0 for i in arm_joints]\n arm_trajectory.points[0].accelerations = [0.0 for i in arm_joints]\n arm_trajectory.points[0].time_from_start = rospy.Duration(5.0)\n \n # Send the trajectory to the arm action server\n rospy.loginfo('Moving the arm to goal position...')\n \n # Create an empty trajectory goal\n arm_goal = FollowJointTrajectoryGoal()\n \n # Set the trajectory component to the goal trajectory created above\n arm_goal.trajectory = arm_trajectory\n \n # Specify zero tolerance for the execution time\n arm_goal.goal_time_tolerance = rospy.Duration(0.0)\n \n # Send the goal to the action server\n arm_client.send_goal(arm_goal)\n \n if not sync:\n # Wait for up to 5 seconds for the motion to complete \n arm_client.wait_for_result(rospy.Duration(5.0))\n \n # Create a single-point head trajectory with the head_goal as the end-point\n head_trajectory = JointTrajectory()\n head_trajectory.joint_names = head_joints\n head_trajectory.points.append(JointTrajectoryPoint())\n head_trajectory.points[0].positions = head_goal\n head_trajectory.points[0].velocities = [0.0 for i in head_joints]\n head_trajectory.points[0].accelerations = [0.0 for i in head_joints]\n head_trajectory.points[0].time_from_start = rospy.Duration(5.0)\n \n # Send the trajectory to the head action server\n rospy.loginfo('Moving the head to goal position...')\n \n head_goal = FollowJointTrajectoryGoal()\n head_goal.trajectory = head_trajectory\n head_goal.goal_time_tolerance = rospy.Duration(0.0)\n \n # Send the goal\n head_client.send_goal(head_goal)\n \n if not sync:\n # Wait for up to 5 seconds for the motion to complete \n head_client.wait_for_result(rospy.Duration(5.0))\n \n # Create a single-point torso trajectory with the torso_goal as the end-point\n torso_trajectory = JointTrajectory()\n torso_trajectory.joint_names = torso_joints\n torso_trajectory.points.append(JointTrajectoryPoint())\n torso_trajectory.points[0].positions = torso_goal\n torso_trajectory.points[0].velocities = [0.0 for i in torso_joints]\n torso_trajectory.points[0].accelerations = [0.0 for i in torso_joints]\n torso_trajectory.points[0].time_from_start = rospy.Duration(5.0)\n \n # Send the trajectory to the head action server\n rospy.loginfo('Moving the head to goal position...')\n \n torso_goal = FollowJointTrajectoryGoal()\n torso_goal.trajectory = torso_trajectory\n torso_goal.goal_time_tolerance = rospy.Duration(0.0)\n \n # Send the goal\n torso_client.send_goal(torso_goal)\n \n # Wait for up to 8 seconds for the motion to complete \n torso_client.wait_for_result(rospy.Duration(8.0))\n \n rospy.loginfo('...done')\n \nif __name__ == '__main__':\n try:\n TrajectoryDemo()\n except rospy.ROSInterruptException:\n pass\n ","sub_path":"rbx2_gazebo/scripts/fetch_trajectory_demo.py","file_name":"fetch_trajectory_demo.py","file_ext":"py","file_size_in_byte":7110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"143978988","text":"#Debug.on(3)\nDebug.off()\n\nswitchApp(\"Safari\")\nr = App.focusedWindow()\nimgStop = \"imgStop.png\"\n \nstopAppeared = r.onAppear(imgStop)\nr.observeInBackground(FOREVER)\nwhile not r.isObserving(): wait(0.3)\n\nr.hover()\nwhile r.isObserving():\n wheel(WHEEL_UP, 10)\n wait(1)\n\nm = r.getEvent(stopAppeared).getMatch()\nhover(m)\nm.highlight(2)\n","sub_path":"sikuli_StuffContainer/testScripts/testObserve/testStopWheel.sikuli/testStopWheel.py","file_name":"testStopWheel.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102804185","text":"import csv\n\n#s,s2=input(\"mera,didaskwn\\n\").split(',')\n#s= input('dwse 1')\n#s2= input('dwse 2')\n# open csv file for reading\nwith open('programma.csv','r',newline='',encoding='utf-8') as ifp:\n\twith open('out.csv','w',newline='') as ofp:\n\t\tir = csv.reader(ifp)\n\t\twriter = csv.writer(ofp)\n\t\t#create csv reader object\n\t\n\n\t\t#read first row (headers)\n\t\thdrow = next(ir)\n\n\t\t#iterate over table rows in csv file\n\t\tfor i,row in enumerate(ir):\n\t\t\tfor j,item in enumerate(row):\n\n\t\t\t\tif hdrow [j] == 'ΕΝΑΡΞΗ' or hdrow[j] == 'ΛΗΞΗ' or hdrow [j] == 'ΗΜΕΡΑ':\n\t\t\t\t\twriter.writerow (['b:'+str(i+1),hdrow[j],'l:' + item])\n\t\t\t\telse: \n\t\t\t\t\twriter.writerow (['b:'+str(i+1),hdrow[j],'u:' + item])\n\t\t\t\t#writer.writerow([i+1,hdrow[j],item])\n\t\t\t#each row is a list of stringsd\n\t\t\t#(table column values for this row)\n\t\t\n\t\t\t#do something with each row here...\n","sub_path":"Lab 2/blu.py","file_name":"blu.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"546662508","text":"import numpy as np\nimport matplotlib.pyplot as plt \nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Bidirectional, Concatenate, Flatten \nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.models import load_model\n\nfrom sklearn.preprocessing import PowerTransformer, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\n\nimport requests\nimport zipfile\nimport io \n\nimport dill\n\n\nclass RankDrugRev:\n\t\"\"\" this class instantiates objects that can preprocess and model usefulness of reviews on drugs\n\tacross a vroad range of drugs and conditions \"\"\"\n\n\tdata_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00462/drugsCom_raw.zip'\n\n\tdef __init__(self, num_words = 5000, max_len = 2000, len_limit = None):\n\t\tself.num_words = num_words\n\t\tself.len_limit = len_limit\n\t\tself.max_len = max_len\n\n\t\tself._km_fitted = False\n\t\tself._seq_fitted = False\n\n\n\n\tdef load_data(self):\n\t\t\"\"\" applying this function on self results in loading and storing data from online source, \n\t\tas pandas dataframe \"\"\"\n\n\t\tr = requests.get(RankDrugRev.data_url, verify = False)\n\t\tz = zipfile.ZipFile(io.BytesIO(r.content))\n\t\tdf_train = pd.read_csv(io.BytesIO(z.read(\"drugsComTrain_raw.tsv\")),sep = '\\t', parse_dates = [5])\n\t\tdf_train.rename(columns={\"Unnamed: 0\": \"id\"}, inplace = True)\n\n\t\tdf_test = pd.read_csv(io.BytesIO(z.read(\"drugsComTest_raw.tsv\")),sep = '\\t', parse_dates = [5])\n\t\tdf_test.rename(columns={\"Unnamed: 0\": \"id\"}, inplace = True)\n\t\n\t\tself.raw_data_train = df_train\n\t\tself.raw_data_test = df_test\n\n\t\tself._raw_data_loaded = True\n\n\tdef fit_preprocess(self):\n\t\t\"\"\" applying this function on self fits a preprocessing pipline on the train dataset loaded\n\t\tby executing \\\"load_data(self)\\\" \"\"\"\n\n\t\tif not self._raw_data_loaded:\n\t\t\traise ValueError('raw data needs to be loaded first! ; execute \\\"load_data()\\\"')\n\n\t\tdf = self.raw_data_train\n\n\t\tdf = df.loc[~df[\"condition\"].str.contains(\"</span>\").astype(bool),:]\n\n\t\tlow_cond_list = df[\"condition\"].value_counts()[df[\"condition\"].value_counts() < 50].index.values.tolist()\n\t\tlow_drug_list = df[\"drugName\"].value_counts()[df[\"drugName\"].value_counts() < 50].index.values.tolist()\n\n\t\tidx_ = (~df[\"condition\"].isin(low_cond_list)) & (~df[\"drugName\"].isin(low_drug_list))\n\n\t\tdf = df.loc[idx_,:]\n\t\tdf.dropna(inplace = True)\n\n\t\tenc_cond = OneHotEncoder(handle_unknown='ignore')\n\t\tenc_cond.fit(df[\"condition\"].values.reshape(-1,1))\n\n\t\tenc_drug = OneHotEncoder(handle_unknown='ignore')\n\t\tenc_drug.fit(df[\"drugName\"].values.reshape(-1,1))\n\n\t\ttokenizer = tf.keras.preprocessing.text.Tokenizer(num_words = self.num_words, oov_token = \"<UNK>\", filters='!\"#$%&()*+.,-/:;=?@[\\]^_`{|}~')\n\t\ttokenizer.fit_on_texts(df[\"review\"].values)\n\t\ttokenizer.index_word[0] = '<PAD>'\n\n\t\tdf.loc[df[\"usefulCount\"] == 0, \"usefulCount\"] = 0.1 # box_cox transformation requires strictly positive data \n\t\ttmp = df[\"usefulCount\"].values.reshape((-1,1))\n\t\tpw_useC = PowerTransformer(method='box-cox')\n\t\tpw_useC.fit(tmp)\n\n\t\t\n\t\tself._tokenizer = tokenizer\n\t\tself._enc_cond = enc_cond\n\t\tself._enc_drug = enc_drug\n\n\t\tself._pw_useC = pw_useC\n\n\t\tself._low_cond_list, self._low_drug_list = low_cond_list, low_drug_list\n\n\t\tself._seq_fitted = True\n\n\n\tdef transform_preprocess(self, data = 'train', forModelorTest = True):\n\t\t\"\"\"\n\n\t\tParameters\n\t\t----------\n\t\tdata : \n\t\t\tDefault value = 'train') or 'train' for loaded datasets. Pass in a pandas dataframe if applying on an external dataset\n\t\t\t\t\t\t\t\t\t\tdataframe should include columns = \"drugName\", \"condition\", \"review\", \"rating\", and \"usefulCount\"\n\t\t\t\t\t\t\t\t\t\tif forModelorTest = False, usefulCount is not needed \n\t\tforModelorTest :\n\t\t\tDefault value = True) indicates whether 'data' has column usefulCount or not\n\n\t\tReturns\n\t\t-------\n\t\tA list of predictors for the model and usefulCounts as model output and index of 'data' for which preprocessing has an answer\n\n\t\t\"\"\"\n\n\t\tif isinstance(data, pd.DataFrame):\n\t\t\tdf = data\n\t\telif (data == 'train') & (self._raw_data_loaded) & (self._seq_fitted):\n\t\t\tdf = self.raw_data_train\n\t\telif (data == 'test') & (self._raw_data_loaded) & (self._seq_fitted):\n\t\t\tdf = self.raw_data_test\n\t\telse:\n\t\t\traise ValueError(\"no data are loaded!\")\n\n\t\tindex_orig = np.array(df.index)\n\t\tdf = df.loc[~df[\"condition\"].str.contains(\"</span>\").astype(bool),:]\n\n\t\tenc_drug = self._enc_drug\n\t\tenc_cond = self._enc_cond\n\t\t\n\t\tdrug_OH = enc_drug.transform(df[\"drugName\"].values.reshape(-1,1)).toarray()\n\t\tcond_OH = enc_cond.transform(df[\"condition\"].values.reshape(-1,1)).toarray()\n\n\t\tidx_1 = np.where(((drug_OH.sum(axis=1) == 0) | (cond_OH.sum(axis=1) == 0)).ravel())[0]\n\t\tidx_2 = np.where((df['rating'] < 0) | (df['rating'] > 10).values)[0]\n\t\tidx_3 = np.where((df['usefulCount'] < 0).values)[0]\n\t\tidx_4 = np.where((df['review'].str.strip() == \"\").values)[0]\n\t\t\n\t\tdf.loc[df[\"usefulCount\"] == 0, \"usefulCount\"] = 0.1\n\t\tdf.loc[:, \"rating\"] = df.loc[:,\"rating\"].values / 10\n\n\t\tidx = np.setdiff1d(np.arange(df.shape[0]) , np.unique(np.concatenate((idx_1, idx_2, idx_3, idx_4))))\n\n\t\tdf = df.iloc[idx,:] # cleaned up dataframe\n\n\t\tdf.dropna(inplace = True)\n\t\n\n\t\tif df.empty:\n\t\t\traise ValueError('there is no relevant data in \\\"data\\\"!')\n\n\n\t\t# preparing returns\n\n\t\tdrug_OH = enc_drug.transform(df[\"drugName\"].values.reshape(-1,1)).toarray()\n\t\tcond_OH = enc_cond.transform(df[\"condition\"].values.reshape(-1,1)).toarray()\n\n\t\ttokenizer = self._tokenizer\n\t\ttransformed_seqs = tokenizer.texts_to_sequences(df[\"review\"].values)\n\t\ttransformed_seqs = pad_sequences(transformed_seqs, maxlen = self.max_len)[:,:self.len_limit]\n\n\t\trating = df[\"rating\"].values.reshape((-1,1))\n\n\t\tif forModelorTest:\n\t\t\tpw_useC = self._pw_useC \n\t\t\tuseC = pw_useC.transform(df['usefulCount'].values.reshape(-1,1))\n\t\t\tassert transformed_seqs.shape[0] == rating.shape[0] == drug_OH.shape[0] == cond_OH.shape[0]\\\n\t\t\t\t== useC.shape[0] == df.index.shape[0]\n\t\t\treturn [transformed_seqs, drug_OH, cond_OH, rating], useC, df.index\n\n\t\telse:\n\t\t\tassert transformed_seqs.shape[0] == rating.shape[0] == drug_OH.shape[0] == cond_OH.shape[0]\\\n\t\t\t\t== df.index.shape[0]\n\t\t\treturn [transformed_seqs, drug_OH, cond_OH, rating], df.index\n\n\tdef lstm_model(self, X, y):\n\t\t\"\"\"\n\n\t\tParameters\n\t\t----------\n\t\tX : preprocessed inputs for lstm model \n\t\t\n\t\ty: preprocesses output for lstm model\n\n\n\n\t\tReturns\n\t\t-------\n\t\ttrained model and traning history\n\n\t\t\"\"\"\n\t\t\n\n\t\t# defining model\n\t\tinput_layer1 = keras.Input(shape=(None,), dtype=\"int32\")\n\t\tembed1 = Embedding(X[0].max() + 1, 64)(input_layer1)\n\t\tlstm1 = Bidirectional(LSTM(64, return_sequences=True))(embed1)\n\t\tlstm2 = Bidirectional(LSTM(64))(lstm1)\n\t\tdense_1 = Dense(1)(lstm2)\n\n\t\t# input layer for drug \n\t\tinput_layer2 = keras.Input(shape=(X[1].shape[1],), dtype=\"int32\")\n\t\tdense_2 = Dense(12, activation=\"relu\")(input_layer2)\n\t\tdense_3 = Dense(1, activation=\"relu\")(dense_2)\n\n\t\t# input layer for condition\n\t\tinput_layer3 = keras.Input(shape=(X[2].shape[1],), dtype=\"int32\")\n\t\tdense_4 = Dense(12, activation=\"relu\")(input_layer3)\n\t\tdense_5 = Dense(1, activation=\"relu\")(dense_4)\n\n\t\t# rating as an input\n\t\tinput_layer4 = keras.Input(shape=(X[3].shape[1],), dtype=\"int32\") \n\t\tdense_6 = Dense(12, activation=\"relu\")(input_layer4)\n\t\tdense_7 = Dense(1, activation=\"relu\")(dense_6)\n\n\t\tconcat_layer = Concatenate()([dense_1, dense_3, dense_5, dense_7])\n\n\t\tdense_8 = Dense(4, activation = \"relu\")(concat_layer)\n\t\toutput = Dense(1)(dense_8)\n\n\t\tmodel = keras.Model(inputs = [input_layer1, input_layer2, input_layer3, input_layer4], outputs = output)\n\n\t\tmodel.compile(optimizer = keras.optimizers.Adam(learning_rate = 0.005), loss=\"mse\")\n\n\t\thistory = model.fit(x = X , y = y, epochs = 4, validation_split = 0.2)\n\n\t\tself._km_fitted = True\n\n\t\treturn model, history\n\n\n\n\n\tdef save_model(self, *fnames, model = None):\n\t\t\"\"\"\n\n\t\tParameters\n\t\t----------\n\t\t*fnames : file names to which the trained model can be saved\n\t\t\n\t\tmodel :\n\t\t\t(Default value = None)\n\t\t\tlstm model object to be saved, otherwise only saving preprocessing model\n\n\n\t\tReturns\n\t\t-------\n\t\tNone\n\n\t\t\"\"\"\n\n\t\tif len(fnames) == 0:\n\t\t\traise TypeError('no file names were provided!')\n\n\t\tif (model is None) and len(fnames) == 1:\n\t\t\twith open(fnames[0], 'wb') as f:\n\t\t\t\tf.write(dill.dumps(self))\n\n\t\telif (model is not None) & (len(fnames) == 2):\n\t\t\tif fnames[1].split('.')[1] != 'h5':\n\t\t\t\traise ValueError('include \\\".h5\\\" for the keras model file name!')\n\t\t\telse:\n\t\t\t\twith open(fnames[0], 'wb') as f:\n\t\t\t\t\tf.write(dill.dumps(self))\n\t\t\t\tmodel.save(fnames[1])\n\n\t\telse:\n\t\t\traise TypeError('no model was loaded! check fir correct arguments')\n\n\t@staticmethod\n\tdef load_model(*fnames):\n\t\t\"\"\"\n\n\t\tParameters\n\t\t----------\n\t\t*fnames : names of the files from which pretrained models can be loaded\n\t\t\n\n\t\tReturns\n\t\t-------\n\t\tmodel objects\n\n\t\t\"\"\"\n\n\t\tret = [] \n\t\tif len(fnames) >= 1:\n\t\t\twith open(fnames[0], 'rb') as f:\n\t\t\t\tret.append(dill.loads(f.read()))\n\n\t\t\tif len(fnames) == 2:\n\t\t\t\tret.append(keras.models.load_model(fnames[1]))\n\n\t\tif not ret:\n\t\t\traise TypeError(\"no file names are provided\")\n\n\t\treturn tuple(ret)\n\n\t@staticmethod\n\tdef plot_performance(y_obs, y_pred):\n\t\t\"\"\"\n\t\tplots model performance \n\t\tParameters\n\t\t----------\n\t\ty_obs : observed output as (-1,1) numpy array\n\t\t param y_pred:\n\t\ty_pred : predicted output as (-1,1) numpy array\n\t\t \n\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t\"\"\"\n\n\t\ttmp_df = pd.DataFrame({'obs' : y_obs.ravel(), 'model' : y_pred.ravel()})\n\t\tax = sns.displot(data = tmp_df, x = 'obs', y = 'model', kind = 'kde' )\n\t\tax.ax.set_xlim(-4, 4)\n\t\tax.ax.set_ylim(-4, 4)\n\t\tplt.plot(np.linspace(-4,4,100),np.linspace(-4,4,100),'r.-')\n\n\t\tplt.show()\n\n\ndef rank_reviews(df):\n\t\"\"\"\n\tA standalone function that uses RankDrugRev class to rank the reviews on drugs included in 'df'\n\n\tParameters\n\t----------\n\tdf : a pandas dataframe that needs to have columns : 'drugName', 'condition', 'review' ,'rating'\n\n\n\tReturns\n\t-------\n\ta pandas dataframe that is ranked based on an extra 'usefulness' column\n\t\"\"\"\n\t\n\tpreprocc, lstm_model = RankDrugRev.load_model('preprcc.dill','lstm_model.h5')\n\n\tX, idx_known = preprocc.transform_preprocess(data = df, forModelorTest = False)\n\ty_pred = lstm_model.predict(X)\n\tdf.loc[idx_known, 'usefulness'] = y_pred\n\tdf.sort_values(by=['usefulness'], inplace = True, ascending = False)\n\n\treturn df","sub_path":"rankDrugRev.py","file_name":"rankDrugRev.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"320956435","text":"import json\r\nfrom watson_developer_cloud import VisualRecognitionV3\r\n\r\nvisual_recognition = VisualRecognitionV3(\r\n '2018-03-19',\r\n iam_apikey='Vi7lrOVTqCDLnxs2n0McsYbGVS9G8Gb3-xWsMF-NUMeM')\r\n\r\nwith open('./Test2.jpg', 'rb') as images_file:\r\n classes = visual_recognition.classify(\r\n images_file,\r\n threshold='0.6',\r\n\tclassifier_ids='Fire_504844078').get_result()\r\nprint(json.dumps(classes, indent=2))\r\n","sub_path":"ImageTesting.py","file_name":"ImageTesting.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"19728817","text":"#Team Name: CGZL\r\n#Team Members:\r\n#-Brian Cabantug (bcabantug@csu.fullerton.edu) CWID: 891096281\r\n#-Hancheng Zhou (jerryzhhch@csu.fullerton.edu) CWID: 891971798\r\n#-Mason Guzman-Sanchez (macegs1995@gmail.com) CWID: 890919442\r\n#-Alexandre Lee (al2012@csu.fullerton.edu) CWID: 892172396\r\n#File description: This file is the main script that runs the game and calls the functions\r\nimport pygame\r\nfrom global_inst import *\r\nfrom player_ai import Player_vs_AI\r\nfrom player_player import Player_vs_Player\r\n\r\n\r\n# print the text on the buttons\r\ndef text_to_button(text, color, x, y, w, h, ):\r\n small_font = pygame.font.SysFont(\"Arial\", 20, False, False)\r\n button_text = small_font.render(text, False, color)\r\n rect = button_text.get_rect()\r\n rect.center = (x + w / 2, y + h / 2)\r\n menu.blit(button_text, rect)\r\n\r\n\r\n# create buttons\r\ndef button(text, inactive_color, active_color, x, y, w, h, action=None):\r\n cur = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if x + w > cur[0] > x and y + h > cur[1] > y:\r\n pygame.draw.rect(menu, active_color, (x, y, w, h))\r\n if click[0] == 1 and action != None:\r\n if action == \"1\":\r\n bgm.stop()\r\n click_sound.play()\r\n pygame.time.wait(300)\r\n Player_vs_AI()\r\n if action == \"2\":\r\n bgm.stop()\r\n click_sound.play()\r\n pygame.time.wait(300)\r\n Player_vs_Player()\r\n if action == \"3\":\r\n bgm.stop()\r\n click_sound.play()\r\n pygame.time.wait(300)\r\n pygame.quit()\r\n else:\r\n pygame.draw.rect(menu, inactive_color, (x, y, w, h))\r\n text_to_button(text, black, x, y, w, h)\r\n\r\n\r\ndef main():\r\n # load texts\r\n large_font = pygame.font.SysFont(\"Arial\", 60, False, False)\r\n menu_text = large_font.render(\"Welcome To Bunny Game\", True, black)\r\n menu_text_rect = menu_text.get_rect()\r\n menu_text_rect.center = (width / 2, 100)\r\n menu.blit(menu_text, menu_text_rect)\r\n\r\n intro = True\r\n while intro:\r\n # process input (events)\r\n for event in pygame.event.get():\r\n # check for closing the window\r\n if event.type == pygame.QUIT:\r\n intro = False\r\n pygame.quit()\r\n\r\n # create three buttons\r\n button(\"Single Player\", yellow, dark_yellow, 300, 200, 200, 80, action=\"1\")\r\n button(\"Multiple Players\", yellow, dark_yellow, 300, 320, 200, 80, action=\"2\")\r\n button(\"Quit\", red, dark_red, 300, 440, 200, 80, action=\"3\")\r\n\r\n pygame.display.flip()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pygame.init()\r\n pygame.mixer.init()\r\n menu = pygame.display.set_mode((width, height))\r\n pygame.display.set_caption(\"Menu\")\r\n pygame.event.set_blocked(pygame.MOUSEMOTION)\r\n # set menu background\r\n menu.fill(white)\r\n background = pygame.image.load(os.path.join(img_folder, \"background.png\")).convert()\r\n background = pygame.transform.scale(background, (width, height))\r\n bg_rect = background.get_rect()\r\n bg_rect.center = (width / 2, height / 2)\r\n menu.blit(background, bg_rect)\r\n click_sound = pygame.mixer.Sound(os.path.join(sound_folder, \"click.wav\"))\r\n click_sound.set_volume(1)\r\n bgm = pygame.mixer.Sound(os.path.join(sound_folder, \"bgm.wav\"))\r\n bgm.play(-1)\r\n main()\r\n quit()\r\n\r\n\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422431157","text":"\nimport requests\nfrom lxml import html\npandas_doc_url = 'http://pandas.pydata.org/pandas-docs/stable/'\npandas_api = requests.get(pandas_doc_url + 'api.html')\nhtml_root = html.fromstring(pandas_api.content)\nlxml_links = html_root.xpath('//a[@class=\"reference internal\"]')\nlinks = {}\nfor a in lxml_links:\n a_text = str(a.text_content())\n if a_text.startswith('Series') or a_text.startswith('DataFrame'):\n links[a_text] = \"<a href='{href}' target='_blank'>{text}</a>\"\\\n .format(href=pandas_doc_url + a.get('href'), text=a_text) \n \nimport inspect\n\ndef get_methods_and_attributes(instance, include_protected=False):\n attrs = []\n funcs = []\n for attr in [attr for attr in dir(instance) if include_protected or not attr.startswith('_')]:\n f_or_a = funcs if inspect.ismethod(getattr(instance, attr)) else attrs\n f_or_a.append(attr)\n return attrs, funcs\ndef print_title(title):\n print('\\n{}\\n'.format(f' {title} '.center(120, '=')))\n \n# needed to display the HTML properly\nfrom IPython.display import display\nfrom IPython.display import HTML\n\n# needed to create rows out of a list of items (this is used by print_in_columns)\nfrom test_envi.utils.lists import list_to_matrix_index_generator\n\ndef print_html_table(title, attrs, structure='Series', columns=5):\n\n table = \"<table style='width=100%'>\"\n \n # add title\n table += f\"<tr><th colspan='{columns}' style='text-align: center'>{title}</th></tr>\"\n \n # add rows\n for row in list_to_matrix_index_generator(attrs, columns=columns, row_order=False):\n tds = \"\"\n for data in row: \n structure_data = structure + '.' + data\n tds += f'<td>{links.get(structure_data, structure_data)}</td>' \n table += f'<tr>{tds}</tr>'\n table += \"</table>\"\n \n # finally display\n display(HTML(table))\n ","sub_path":"course/Week-06-Pandas-Statistics/pandas_api_links.py","file_name":"pandas_api_links.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89623546","text":"from math import gcd\n\nN = int(input())\nnum_lis = list(map(int, input().split()))\n\ndef osa_k(max_num):\n lis = [i for i in range(max_num+1)]\n p = 2\n while p**2 <= max_num:\n if lis[p] == p:\n for q in range(2*p, max_num+1, p):\n if lis[q] == q:\n lis[q] = p\n p += 1\n return lis\n\nhoge = 0\nfor i in num_lis:\n hoge = gcd(hoge, i)\nif hoge > 1:\n print(\"not coprime\")\n exit()\n\nd_lis = osa_k(max(num_lis))\ntmp = set()\nfor i in num_lis:\n num = i\n new_tmp = set()\n while num > 1:\n d = d_lis[num]\n new_tmp.add(d)\n num //= d\n for j in new_tmp:\n if j in tmp:\n print(\"setwise coprime\")\n exit()\n else:\n tmp.add(j)\nprint(\"pairwise coprime\")","sub_path":"Python_codes/p02574/s021486096.py","file_name":"s021486096.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"613782448","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 15 11:57:25 2018\n\n@author: caoa\n\"\"\"\nfrom random import randint\nfrom itertools import cycle\nfrom collections import defaultdict\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.tools import HoverTool, TapTool\nfrom bokeh.models.callbacks import OpenURL\nfrom bokeh.plotting import figure\nfrom bokeh.io import show\nimport bokeh.palettes as palettes\n\n#%% data\ndef get_data(n):\n colors = cycle(palettes.d3['Category20'][min(20,n)])\n data = defaultdict(list)\n for day in range(n):\n data['days'].append(str(day+1))\n data['ct'].append(randint(1,100))\n data['colors'].append(next(colors)) \n return data\n \n#%% vertical bar chart\ndef create_bar_chart(days, w=900, h=300):\n data = get_data(days)\n xfactor = [str(x+1) for x in range(days)]\n source = ColumnDataSource(data)\n \n p = figure(title=\"Count per day\", plot_width=w, plot_height=h, \n x_range=xfactor, y_range=(0,100),\n toolbar_location=\"above\", outline_line_color='black',\n )\n p.vbar(x='days', bottom=0, top='ct', source=source, width=0.8, \n color=None, fill_color='colors')\n \n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = \"grey\"\n p.ygrid.grid_line_alpha = 0.25\n p.yaxis.axis_label = \"Count\"\n p.xaxis.axis_label = \"Days\"\n p.xaxis.major_tick_line_color = None\n\n TOOLTIPS = [\n (\"Day\", \"@days\"),\n (\"No.\", \"@ct\"),\n (\"color\", \"$color[swatch]:colors\"),\n ]\n hovertool = HoverTool(tooltips=TOOLTIPS)\n p.add_tools(hovertool)\n taptool = TapTool(callback=OpenURL(url='../../scatter'))\n p.add_tools(taptool)\n \n return p\n\n#%%\nif __name__ == \"__main__\":\n plot = create_bar_chart(days=9)\n show(plot)","sub_path":"bokeh_vbar.py","file_name":"bokeh_vbar.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"629182092","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017年11月29日\n@author: Administrator\n'''\nimport tensorflow as tf\nimport numpy as np\n# 创建一个常量op\n# random1=np.random.random((2,3))\n# print(random1.shape)\n# m1=tf.constant([[1,3,5]])\n# print(m1,type(m1))\n# 创建一个常量op\n# m2=tf.constant([[2],[3],[3]])\n# print(m2)\n# 创建一个矩阵相乘,m1,m2\n# product=tf.matmul(m1,m2)\n# print(product)\n# 定义一个绘画,启动默认图\n# sess=tf.Session()\n# 调用sess的run方法执行矩阵乘法,run触发了上面3个op,到这里才开始执行运算\n# result=sess.run(product)\n# print(result)\n# sess.close()\n# with tf.Session() as sess:\n# result=sess.run(product)\n# print(result)\nx=tf.Variable([1,2 ])\na=tf.constant([3,3])\n# 增加一个减法op\n# sub=tf.subtract(x,a)\n# 增加一个加法op\n# add=tf.add(x,sub)\ny=tf.add(x,a)\n\n# y=x赋值\n# updata=tf.assign(x,y)\n# \n# init=tf.global_variables_initializer()\n# with tf.Session() as sess:\n# sess.run(init)\n# for _ in range(5):\n# sess.run(updata)\n# print(sess.run(y))\n# \n# \n# with tf.Session() as sess:\n# sess.run(init)\n# for _ in range(5):\n# x=tf.add(x,a)\n# z=sess.run(x)\n# print(z)\n# fetch\n# a=tf.constant(2)\n# b=tf.constant(1)\n# c=tf.constant(3)\n# sub=tf.subtract(a,b)\n# mat=tf.multiply(a,c)\n# with tf.Session() as sess:\n# result=sess.run([sub,mat])\n# print(result)\n \n# feed\n# 创建占位符\n# input1=tf.placeholder(tf.float32)\n# input2=tf.placeholder(tf.float32)\n# mul=tf.multiply(input1,input2)\n# with tf.Session() as sess:\n# print(sess.run(mul,feed_dict={input1:[1],input2:[2]}))\n# 字典\n\nx_data=np.random.random(100)\ny_data=x_data*345+123\nb=tf.Variable(float(np.random.random(1)))\nk=tf.Variable(float(np.random.random(1)))\ny=k*x_data+b\n\n# 二次代价函数\nloss=tf.reduce_mean(tf.square(y_data-y))\n# 定义一个梯度下降法来进行训练的优化器\noptimizer=tf.train.GradientDescentOptimizer(0.1)\n# 最小化代价函数\ntrain=optimizer.minimize(loss)\n\ninit=tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n for step in range(3000):\n sess.run(train)\n if step % 100 == 0:\n print(step,sess.run([k,b]),sess.run(loss))\n\n\n\n\n\n\n\n\n\n\n\n\n \n ","sub_path":"mypython/TF201711290848.py","file_name":"TF201711290848.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160051830","text":"\n\ndef main():\n import pyglet\n from .rapy import Rapy\n rapy = Rapy()\n #rapy.load_core('/home/higor/code/rapy/gambatte_libretro.so')\n #rapy.load('/home/higor/code/maymayarch/oracle_of_seasons.gbc')\n rapy.load_core('/usr/lib/libretro/snes9x_next_libretro.so')\n rapy.load('/home/higor/code/maymayarch/megamanx.sfc')\n pyglet.app.run()\n\nmain()\n","sub_path":"rapy/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"636190687","text":"import os, logging\n\nclass DropHandler(object):\n\n def __init__(self, data):\n self._data = data\n self._logger = logging.getLogger('root.drop_handler')\n\n # handle dropevents for the treewidget\n def folderDropped(self, pathes, index=None):\n\n drops = {}\n count = 20\n for path in pathes:\n if count <= 0:\n break\n self._logger.debug('dropped file %s' % path)\n\n if os.path.isdir(path):\n\n # name of the folder\n path = os.path.abspath(path)\n\n title = unicode(os.path.split(path)[1])\n # get all music files from the folder\n tracks = self.getTracks(path)\n # amount of music files in the folder\n amount = unicode(len(tracks))\n\n # add new _data to the observable _data structure\n drop = {'title':title, 'path':path, 'tracklist':tracks, 'track_amount': amount}\n drops[title] = drop\n\n else:\n self._logger.error('file %s does not exist' % path)\n count -= 1\n # add the drops to the already dropped _data\n self._data.releases = dict(self._data.releases.items() + drops.items())\n \n def addDrop(self, path, is_expanded):\n self._logger.debug('add drop %s' % path)\n\n if os.path.isdir(path):\n # name of the folder\n path = os.path.abspath(path)\n title = unicode(os.path.split(path)[1])\n # get all music files from the folder\n tracks = self.getTracks(path)\n # amount of music files in the folder\n amount = unicode(len(tracks))\n # add new _data to the observable _data structure\n drop = {'title':title, 'path':path, 'tracklist':tracks, 'track_amount': amount}\n \n self._data.update_drop_data(drop, title, is_expanded)\n\n # get tracks from selected folder\n def getTracks(self, url):\n self._logger.debug('generate tracks for \"%s\"' % os.path.basename(url))\n extensions = ['.mp3', '.mp4', '.wav', '.flac', '.aif', '.mid', '.m4a']\n\n if os.path.isdir(url):\n tracks = [unicode(track) for track in os.listdir(url) if os.path.splitext(track)[1] in extensions]\n return tracks\n","sub_path":"controller/drop_handler.py","file_name":"drop_handler.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265475379","text":"\"\"\" This simple code is desinged to teach a basic user to read in the files in python, simply find what proportion of males and females survived and make a predictive model based on this\nAuthor : AstroDave\nDate : 18 September 2012\nRevised: 28 March 2014\n\n\"\"\"\n\n\nimport csv as csv\nimport numpy as np\nimport graphviz as gv\nfrom sklearn import tree\nfrom sklearn.cross_validation import cross_val_score, train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import ensemble \nfrom sklearn import linear_model\nfrom my_functions import *\nfrom sklearn import svm\nimport xgboost as xgb\n\ndef main ():\n\n\t## LOADING DATA\n\n\tmy_project_dir = \"kaggle_data\\\\\"\n\tcsv_file_object = csv.reader(open(my_project_dir + 'processed.csv', 'r')) \t# Load in the csv file\n\theader = next(csv_file_object) \t\t\t\t\t\t# Skip the fist line as it is a header\n\tdata=[] \t\t\t\t\t\t\t\t\t\t\t\t# Create a variable to hold the data\n\n\tfor row in csv_file_object: \t\t\t\t\t\t\t# Skip through each row in the csv file,\n\t data.append(row[0:]) \t\t\t\t\t\t\t\t# adding each row to the data variable\n\n\n\tdata = np.array(data) \t\t\t\t\t\t\t\t\t# Then convert from a list to an array.\n\n\n\t### Select columns for model:\n\t\n\tcols_for_model = [\"PassengerId\", \"Survived\",\"Pclass\",\"Mrs\",\"Mr\",\"Miss\",\"Master\",\n\t\t\t\t\t\t\"Captain\",\"Other\",\"is_female\",\"Age_predict\",\"Age_unknown\",\n\t\t\t\t\t\t\"age_less_10\",\"age_10_20\",\"age_20_30\",\"age_30_50\",\"age_more_50\",\n\t\t\t\t\t\t\"SibSp\",\"Parch\",\"Fare\",\"No_cabin\",\"Embarked_C\",\"Embarked_Q\",\n\t\t\t\t\t\t\"Embarked_S\",\"Same_Ticket\"\n\t\t\t\t\t\t,\"Same_Room_Surv\",\"Same_Room_surv_perc\"\n\t\t\t\t\t\t]\n\n\t### Removing columns that we don't need\n\tcol_remove = []\n\tfor i in range(len(header)):\n\t\tif (header[i] not in cols_for_model): col_remove.append(i)\n\n\tprint (\"Zero value test: \", len(cols_for_model)-(len(header)-len(col_remove)))\n\tdata = np.delete(data, col_remove,1)\n\theader = np.delete(header,col_remove,0)\n\n\n\t### Collecting data for model\n\thas_surv = (data[:,1] !='')\n\ty = data[has_surv,1].astype(float)\n\tX = data[has_surv,2::]\n\tX_nolabel = data[~has_surv,2::]\n\tx_id = data[~has_surv,0]\n\tfeature_names = header[2:]\n\n\n\tX_train, X_test, y_train, y_test = train_test_split(\n\t\t\t\t X, y, test_size=0.5, random_state=222)\n\n\t# specify validations set to watch performance\n\tprint (header)\n\tT_train_xgb = xgb.DMatrix(X_train.astype(float), label=y_train.astype(float))\n\tT_test_xgb = xgb.DMatrix(X_test.astype(float))\n\n\n\n\t## Error function\n\tdef evalerror(preds, dtrain):\n\t labels = dtrain.get_label()\n\t y_predict = list(map (lambda x: int(x>0.5), preds))\n\t return 'myerror', sum(labels != y_predict) / len(labels)\n\n\n\tfile_path = \"output/xgboost_params.csv\"\n\txgb_params_file = open(file_path, \"w\", newline='')\n\txgb_params_file_object = csv.writer(xgb_params_file)\n\txgb_params_file.close()\n\n\n\t## n_round = 4\n\t## max_depth = 6\n\t## i_ets = 0.7\n\t## i_subsample = 0.6\n\t## i_lambda = 0.9\n\n\tfor i_num_round in range(6,7,1):\n\t\tfor i_max_depth in range(8,9,1):\n\t\t\tfor i_eta in range(7,8,1):\n\t\t\t\tfor i_subsample in range(6,7,1):\n\t\t\t\t\tfor i_lambda in range(9,10,1):\n\n\t\t\t\t\t\tnum_round = i_num_round\n\t\t\t\t\t\tparam = {'max_depth':i_max_depth, 'eta':i_eta/10.0, \n\t\t\t\t\t\t\t\t 'silent':1, 'subsample':i_subsample/10.0,\n\t\t\t\t\t\t\t\t 'lambda':i_lambda/10.0, 'num_round': 4,\n\t\t\t\t\t\t\t\t 'objective':'binary:logistic'}\n\t\t\t\t\t\n\t\t\t\t\t\t#param['nthread'] = 4\n\t\t\t\t\t\tparam['eval_metric'] = 'logloss'\n\n\t\t\t\t\t\td_evals_result = {}\n\t\t\t\t\t\teval_hist = xgb.cv(param, T_train_xgb, num_round, nfold=8,\n\t\t\t\t\t \t\t\t\t\t metrics={'logloss'}, seed = 25, \n\t\t\t\t\t \t\t\t\t\t show_progress =False, show_stdv =True,\n\t\t\t\t\t \t\t\t\t\t feval=evalerror)\n\t\t\t\t\t\t#print (eval_hist)\n\t\t\t\t\t\tscores_cv = 1- eval_hist['test-myerror-mean'][num_round-1]\n\t\t\t\t\t\t# print (\"Cross validation score: \", scores_cv)\n\n\t\t\t\t\t\tbst = xgb.train(param, T_train_xgb, num_round)\n\t\t\t\t\t\ty_predict = bst.predict(T_test_xgb)\n\t\t\t\t\t\ty_predict = list(map (lambda x: int(x>0.6), y_predict))\n\t\t\t\t\t\ty_predict = np.array(y_predict).astype(float)\n\t\t\t\t\t\tscore_test = sum(y_test==y_predict)/len(y_test)\n\n\t\t\t\t\t\t#print (i_num_round,i_max_depth)\n\t\t\t\t\t\tprint (i_num_round,i_max_depth,\" -- \",scores_cv,score_test)\n\n\t\t\t\t\t\twith open(file_path, 'a', newline='') as f:\n\t\t\t\t\t\t\tf_object = csv.writer(f)\n\t\t\t\t\t\t\tf_object.writerow([i_num_round, \n\t\t\t\t\t\t\t\t\t\t\t i_max_depth, round(i_eta/10.0,2),\n\t\t\t\t\t\t\t\t\t\t\t round(i_subsample/10.0,2), round(i_lambda/10.0,2),\n\t\t\t\t\t\t\t\t\t\t\t scores_cv, score_test\n\t\t\t\t\t\t\t\t\t\t\t\t])\n\n\n\n\n\tT_train_xgb = xgb.DMatrix(X.astype(float), label=y.astype(float))\n\tX = data[:,2::]\n\tx_id = data[:,0]\n\tT_test_xgb = xgb.DMatrix(X.astype(float))\n\n\tbst = xgb.train(param, T_train_xgb, num_round)\t\n\n\ty_predict = bst.predict(T_test_xgb)\n\ty_probab = np.copy(y_predict)\n\t#print (y_predict)\n\ty_predict = list(map (lambda x: int(x>0.6), y_predict))\n\ty_predict = np.array(y_predict).astype(int)\n\n\tprint (sum(y_predict==1)/len(y_predict))\n\n\tpredictions_file = open(\"output/prob_xgboost.csv\", \"w\", newline='')\n\tpredictions_file_object = csv.writer(predictions_file)\n\tpredictions_file_object.writerow([\"PassengerId\", \"Survived\"])\t\n\t#predictions_file_object.writerow([pass_id, y_predict])\n\n\tfor i in range(len(y_predict)):\t\n\t\t#predictions_file_object.writerow([x_id[i], y_predict[i]])\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t predictions_file_object.writerow([x_id[i], y_predict[i],1-y_probab[i],y_probab[i]])\t\t\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"4_xgboost_grid_search.py","file_name":"4_xgboost_grid_search.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"478723755","text":"#Stuart- As part of your documentation you should include the purpose (in the help format inside the defined function) and inputs to\n#each of the major functions that you write (~one per problem). You have the information, just not as part of\n#the functions themselves.\n#Stuart- Any points lost due to lack of documentation or using script instead of functions\n# can be regained if the issues are corrected. Please email me once you have corrected the issue\n# and I will check your code.\n# hw 1 problem 1\n\n'''\nWrite a version of a palindrome recogniser that accepts a file\nname from the user, reads each line, and prints the line to the\nscreen if it is a palindrome. \nDefine a function is_palindrome()that recognizes palindromes\n(i.e. words that look the same written backwards).\n'''\n\ndef find_palindromes(file_name):\n f = open(file_name,'r')\n for line in f:\n # stript() is to remove white spaces(including new line) within a line.\n # lower() is to ignore cases (convert uppercase to lowercase).\n if is_palindrome(line.strip().lower()):\n print(line)\n \ndef is_palindrome(text):\n return text == reverse(text)\n\ndef reverse(text):\n return text[::-1]\n \nfind_palindromes('sample_input.txt')\n# hw1 problem 10\n\n'''\nIn a game of Lingo, there is a hidden word. \nThe object of the game is to find this word by guessing, and in\nreturn receive two kinds of clues: 1) the characters that are fully\ncorrect, with respect to identity as well as to position, and 2) the\ncharacters that are indeed present in the word, but which are\nplaced in the wrong position. Write a program with which one can\nplay Lingo. Use square brackets to mark characters correct in the\nsense of 1), and ordinary parentheses to mark characters correct in\nthe sense of 2). The word is user defined. For intance, tiger\n(not necessay five characters long though).\n'''\n\ndef guess_word(word_string):\n word = list(word_string)\n \n while True:\n guess = list(input('Please enter your guess of a {} characters long word: '\\\n .format(len(word_string))))\n if guess == word:\n print('You guessed the right word!')\n break\n else:\n clue_lst = guess\n \n # The characters that are fully correct, \n # with respect to identity as well as to position. Use [] to show the character.\n for i in range(0, len(guess)):\n if i < len(word) and guess[i] == word[i]:\n clue_lst[i] = \"[\" + guess[i] + \"]\" \n continue\n \n # The characters that are indeed present in the word, \n # but which are placed in the wrong position. Use () to show the character.\n for j in range(0, len(word)):\n if guess[i] == word[j]:\n clue_lst[i] = \"(\" + guess[i] + \")\"\n \n clue = \"\".join(clue_lst)\n print('Clue: ' + clue)\n\nguess_word('tiger')\n# hw1 problem 11\n\n'''\nA sentence splitter is a program capable of splitting a text into\nsentences. The standard set of heuristics for sentence splitting\nincludes (but isn't limited to) the following rules:\nSentence boundaries occur at one of \".\" (periods), \"?\" or \"!\", except\nthat\na. Periods followed by whitespace followed by a lower case letter\nare not sentence boundaries.\nb. Periods followed by a digit with no intervening whitespace are\nnot sentence boundaries. \nc. Periods followed by whitespace and then an upper case letter,\nbut preceded by any of a short list of titles are not sentence\nboundaries. Sample titles include Mr., Mrs., Dr., and so on.\nd. Periods internal to a sequence of letters with no adjacent\nwhitespace are not sentence boundaries (for example,\nwww.aptex.com, or e.g).\ne. Periods followed by certain kinds of punctuation (notably comma\nand more periods) are probably not sentence boundaries.\nYour task here is to write a program that given the name of a text\nfile is able to write its content with each sentence on a separate\nline. Test your program with given_text.txt.\n'''\n\nimport re\n\ndef sentence_splitter(file_name):\n file = open(file_name, 'r')\n text = file.read()\n\n # We first remove the newlines that were already there \n # by subtituting \\n with an empty string.\n sentences = re.sub(r'\\n', '', text)\n\n # Now we add a newline after each period only if that period is not\n # preceded by 'Mr', 'Mrs' or 'Dr' and is followed by a space and an\n # uppercase letter\n sentences = re.sub(r'(?<!Mr)(?<!Mrs)(?<!Dr)\\.\\s([A-Z])', r'.\\n\\1', sentences)\n\n # Then we do the same after every '!'\n sentences = re.sub(r'!\\s', '!\\n', sentences)\n\n # Finally, we do the same after every '?'\n sentences = re.sub(r'\\?\\s', '?\\n', sentences)\n\n print(sentences)\n\nsentence_splitter('given_text.txt')\n# hw 1 problem 12\n\n'''\nAn anagram is a type of word play, the result of rearranging the\nletters of a word or phrase to produce a new word or phrase, using\nall the original letters exactly once; e.g., orchestra = carthorse.\nUsing the word unixdict.txt, write a program\nthat finds the sets of words that share the same characters that\ncontain the most words in them. \n'''\n\ndef anagram(filepath):\n filepath = open ('unixdict.txt').read()\n words = filepath.split()\n for word_similar in words:\n for word in words:\n if set(word) == set(word_similar):\n if len(word) == len(word_similar):\n print (word, word_similar)\n \nprint(anagram('unixdict.txt'))\n# Stuart- The problem asks for you to find the set that contains the most words\n# you have a good bit of code for finding anagrams in the list, but you need\n# to find the set that contains the most anagrams.\n\n# hw1 problem 13\n\n'''\nGenerate a string with\nN opening brackets (\"[\") and N closing brackets (\"]\"), in some\narbitrary order. Then Determine whether the generated string is\nbalanced; that is, whether it consists entirely of pairs of\nopening/closing brackets (in that order), none of which mis-nest.\n'''\n\nimport re\n\ndef brackets(bracket_string):\n # Update only the temp string.\n temp = bracket_string\n \n # Remove all white spaces.\n temp = re.sub(r'\\s', '', temp)\n\n # Remove all found pairs of brackets using regex\n while len(re.findall(r'\\[\\]', temp)) > 0:\n temp = re.sub(r'\\[\\]', '', temp)\n\n # If after removing all pairs of brackets the string is still not empty\n if len(temp) > 0:\n print(bracket_string, 'NOT OK')\n else:\n print(bracket_string, 'OK')\n\nbrackets('[][][][[]]')\nbrackets('[ ]')\nbrackets('][ [][]')\nbrackets('][][[[][]]')\nbrackets('[]][[]')\n# hw1 problem 14\n\n'''\nAn alternade is a word in which its letters, taken alternatively in a\nstrict sequence, and used in the same order as the original word,\nmake up at least two other words. All letters must be used, but the\nsmaller words are not necessarily of the same length. For example, a\nword with seven letters where every second letter is used will produce\na four-letter word and a three-letter word. Write a program that\ngoes through each word in the unixdict.txt and tries to make two smaller words\nusing every second letter. The smaller words must also be members\nof the list. Print the words to the screen in the above fashion.\n'''\n\nf = open(\"unixdict.txt\",'r')\n\nfor line in f:\n line = line.strip()\n length = len(line)\n word1 = \"\"\n word2 = \"\"\n for i in range(0, length,2):\n word1 += line[i]\n try:\n word2 += line[i+1]\n except:\n pass\n word1 = word1.strip()\n word2 = word2.strip()\n print(line + ' contains '+ word1 + ' and ' + word2)# hw 1 problem 2\n\n'''\nAccording to Wikipedia, a semordnilap is a word or phrase that\nspells a different word or phrase backwards. Write a semordnilap\nrecogniser that accepts a file name (pointing to a list of words)\nfrom the user and finds and prints all pairs of words that are\nsemordnilaps to the screen. For example, if \"stressed\" and\n\"desserts\" is part of the word list, the the output should include\nthe pair \"stressed desserts\". \n'''\n\n# The re lib provides regular expression matching operations.\nimport re\n\ndef find_semordnilaps(file_name):\n file = open(file_name,'r')\n # Read and convert uppercase to lowercase.\n text = file.read().lower() \n \n # Use strip() to revome White space at the beginnign and the end of a text string\n # Then split words in the text by any (at least one) whitespace character\n # which is equivalent to the set [\\t\\n\\r\\f\\v].\n words = re.split('\\s+', text.strip())\n \n # Remove dupicated words.\n words = list(set(words))\n \n for idx1 in range(0, len(words)):\n for idx2 in range(idx1+1, len(words)):\n word1 = words[idx1]\n word2 = words[idx2]\n if word1 == reverse(word2):\n print(word1,word2)\n \ndef reverse(text):\n return text[::-1]\n \nfind_semordnilaps('sample_input.txt')# hw 1 problem 3\n\n'''\nWrite a procedure char_freq_table()that, when run in a terminal,\naccepts a file name from the user, builds a frequency listing of\nthe characters contained in the file, and prints a sorted and\nnicely formatted character frequency table to the screen.\n'''\n#Stuart- Nice simple function with an excelent table presentation.\nimport operator\n\ndef display_char_freqs(file_name):\n file = open(file_name,'r')\n text = file.read()\n \n print('char\\tfreq')\n for char, freq in char_freq_dict(text):\n # Ignore white spaces.\n if char != ' ' and char != '\\n':\n print(char, '\\t', freq) \n \ndef char_freq_dict(text):\n # Count the frequency of characters in the text.\n char_freq = dict((char, text.count(char)) for char in set(text))\n # Sorted by reverse order of frequencies.\n return sorted(char_freq.items(), key = operator.itemgetter(1), reverse = True)\n\ndisplay_char_freqs('sample_input.txt')# hw1 problem 4\n\n'''\nProcedure speak_ICAO()able to translate any text (i.e. any string)\ninto spoken ICAO words. Apart from the text to be spoken, your\nprocedure also needs to accept two additional parameters: a float\nindicating the length of the pause between each spoken ICAO\nword, and a float indicating the length of the pause between each\nword spoken.\n'''\n\n# os for mac to speak\nimport os\nimport time\n\n# Here is a dictionary covering one version of the ICAO alphabet.\nd = {\n'a':'alfa', 'b':'bravo', 'c':'charlie', 'd':'delta',\n'e':'echo', 'f':'foxtrot', 'g':'golf', 'h':'hotel',\n'i':'india', 'j':'juliett', 'k':'kilo', 'l':'lima',\n'm':'mike', 'n':'november', 'o':'oscar', 'p':'papa',\n'q':'quebec', 'r':'romeo', 's':'sierra', 't':'tango',\n'u':'uniform', 'v':'victor', 'w':'whiskey', 'x':'x-ray',\n'y':'yankee', 'z':'zulu'\n}\n\n# Function speak_ICAO includes parameters as follow\n# text: the text to read\n# ICAO_pause: the time pause between between each spoken ICAO word\n# word_pause: the time pause between each word spoken\n\ndef speak_ICAO(text, ICAO_pause, word_pause):\n for idx in range(0, len(text)):\n c = text[idx]\n # Convert the character to lower case before looking up from the dict.\n c = c.lower()\n # Pause after each word.\n if c == ' ':\n time.sleep(word_pause)\n # Use elif to negelect character other then alphabets.\n elif 'a' <= c and c <= 'z':\n os.system('say '+ d[c])\n # Use index idx+1 to check whether the next character is blank space.\n # Pause only if the next character is not the blank space.\n # Check index idx+1 is within the text length before usage.\n if idx+1 < len(text) and text[idx+1] != ' ':\n time.sleep(ICAO_pause)\n\n#The text to read is 'Vivi is great' with 0.5s pause time bewteen ICAO word\n#and 2s betweeb each word. \nspeak_ICAO('Vivi is great.', 0.5, 2)# hw1 problem 5\n\n'''\nA hapax legomenon (often abbreviated to hapax) is a word which\noccurs only once in either the written record of a language, the\nworks of an author, or in a single text. Define a function that given\nthe file name of a text will return all its hapaxes. Make sure your\nprogram ignores capitalization\n'''\n\n# The re lib provides regular expression matching operations.\nimport re\n\n# Construct a function that returns hapax, a word which occurs only once, of a text.\ndef hapax(file_name):\n \n # Construct a dictionary that saves each word of text as its keys, \n # and the counts of the words as its values.\n # Construct a list that returns a word which occurs only once, named hapax list\n count_dict = {}\n hapax_list = []\n \n f = open(file_name,'r')\n text = f.read() \n \n # Use strip() to revome White space at the beginnign and the end of a text string\n # Then split words in the text by any (at least one) whitespace character\n # which is equivalent to the set [\\t\\n\\r\\f\\v].\n for word in re.split('\\s+', text.strip()):\n # Convert upper to lower cases\n word = word.lower()\n # Check the occurance of each word\n if word in count_dict:\n count_dict[word] = count_dict[word] + 1\n else:\n count_dict[word] = 1\n \n # 'word' as dictionary keys and 'count' as dictionary values\n for word,count in count_dict.items():\n if count == 1:\n hapax_list.append(word)\n \n return hapax_list\n \nprint(hapax('sample_input.txt'))# hw1 problem 6\n\n'''\nWrite a program that given a text file will create a new text file in\nwhich all the lines from the original file are numbered from 1 to n \n'''\n\ndef number_lines(in_file_name, out_file_name):\n # Read input file\n in_file = open(in_file_name,'r')\n # Write out output file\n out_file = open(out_file_name,'w')\n line_number = 1\n for line in in_file:\n out_file.write('{}: {}'.format(line_number, line))\n line_number = line_number + 1\n\nnumber_lines('sample_input.txt','sample_output.txt')# hw1 problem 7\n\n'''\nWrite a program that will calculate the average word length of a text\nstored in a file \n'''\n\n# The re lib provides regular expression matching operations.\nimport re\n\ndef cal_aver(file_name): \n file = open(file_name,'r')\n # Read and convert uppercase to lowercase.\n text = file.read().lower()\n \n # Ignore punctuation in the passage.\n for pun in ('!','?',',','.',\"'\",'\"',':'):\n text = text.replace(pun,'')\n \n # Use strip() to revome White space at the beginnign and the end of a text string\n # Then split words in the text by any (at least one) whitespace character\n # which is equivalent to the set [\\t\\n\\r\\f\\v].\n words = re.split('\\s+', text.strip())\n \n # creat a var. to store the length of total words\n sum_total = 0\n # Use for loop to add length of each word to the sum_total.\n for i in range(len(words)):\n sum_total += len(words[i])\n # show the average length of word\n print('The average length of word =', sum_total/len(words))\n \ncal_aver('sample_input.txt')# hw1 problem 8\n\n\"\"\"\nWrite a program that is able to play the \"Guess the number\"-game, where\nthe number to be guessed is randomly chosen between 1 and 20.\n\"\"\"\n\nimport random\n\n# input user's name\nname = input('Hello! What is your name? ')\n# tell user what to do\nprint('Well, {}, I am thinking of a number between 1 and 20. Take a guess: '.format(name))\n# real number (correct answer)\nreal = random.randint(1, 20)\n# give the time user guessed an initial value\ntime = 1\n# use while loop to let user have a guess\nwhile True:\n # input user's guess\n guess = int(input('Take a guess: '))\n # if the guess number is high, give a clue\n if guess > real:\n print('Your guess is too high.')\n # guess time increase\n time += 1\n # if the guess number is low, give a clue\n elif guess < real:\n print('Your guess is too low.')\n # guess time increase\n time += 1\n else:\n # if the guess is correct, print the guess times\n print('Good job, {}! You guessed my number in {} guess(es)!'.format(name,time))\n break# hw1 problem 9\n\n'''\nAn anagram is a type of word play, the result of rearranging the\nletters of a word or phrase to produce a new word or phrase, using \nall the original letters exactly once. Write a Python program that,\nwhen started 1) randomly picks a word w from given list of words,\n2) randomly permutes w (thus creating an anagram of w), 3)\npresents the anagram to the user, and 4) enters an interactive loop\nin which the user is invited to guess the original word. It may be a\ngood idea to work with (say) colour words only. \n'''\n\nimport random\nimport itertools\n\ndef guess_color_by_anagram():\n # Get a k, here k = 1, length list of unique elements \n # chosen from the population sequence or set. \n # [0] is to get the first and only element from the list.\n words = {\"red\",\"black\",\"brown\",\"white\",\"yellow\",\"blue\",\"green\"}\n word = random.sample(words,1)[0]\n \n # Get all possible permutations of characters by permutating characters in the word.\n all_permutations = list(itertools.permutations(word))\n # Random pick a permutation.\n # And join the characters from that permutation to consturct a string.\n random_idx = random.randint(1,len(all_permutations) - 1)\n string = ''.join(all_permutations[random_idx])\n \n print('Color word anagram:',string)\n \n word_input = input(\"Enter a color word you guess: \")\n while word_input != word:\n print(\"your guess is incorrect\")\n word_input = input(\"Enter a color word you guess: \")\n print(\"your guess is correct\")\n\nguess_color_by_anagram()","sub_path":"He_Vivi_HW1_graded.py","file_name":"He_Vivi_HW1_graded.py","file_ext":"py","file_size_in_byte":17648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341185518","text":"#\n# linter.py\n# Linter for SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by Clifton Kaznocha\n# Copyright (c) 2014 Clifton Kaznocha\n#\n# License: MIT\n#\n\n\"\"\"This module exports the Flow plugin class.\"\"\"\n\nimport os\nimport re\nfrom SublimeLinter.lint import Linter\n\n\nclass Flow(Linter):\n\n \"\"\"Provides an interface to flow.\"\"\"\n\n syntax = ('javascript', 'html', 'javascriptnext', 'javascript (babel)', 'javascript (jsx)', 'jsx-real')\n executable = 'flow'\n version_args = '--version'\n version_re = r'(?P<version>\\d+\\.\\d+\\.\\d+)'\n version_requirement = '>= 0.17.0'\n tempfile_suffix = '-' # Flow only works on files on disk\n\n regex = r'''(?xi)\n # Warning location and optional title for the message\n ^.+\\/(?P<file_name_1>.+):(?P<col_1>(\\d+:\\d+,(\\d+:)?\\d+)):\\s(?P<message_title>.+)?\\r?\\n\n # (Optional) main message\n (^(?P<message>.+))?\n # (Optional) message footer\n (\\r?\\n\\s\\s.+\\/(?P<file_name_2>.+):(?P<col_2>(\\d+:\\d+,(\\d+:)?\\d+)):\\s(?P<message_footer>.+))?\n '''\n\n multiline = True\n defaults = {\n # Allows the user to lint *all* files, regardless of whether they have the `/* @flow */` declaration at the top.\n 'all': False,\n\n # Allow to bypass the 50 errors cap\n 'show-all-errors': True\n }\n word_re = r'^((\\'|\")?[^\"\\']+(\\'|\")?)(?=[\\s\\,\\)\\]])'\n selectors = {\n 'html': 'source.js.embedded.html'\n }\n\n def cmd(self):\n \"\"\"\n Return the command to execute.\n\n By default, with no command selected, the 'status' command executes.\n This starts the server if it is already not started. Once the server\n has started, checks are very fast.\n \"\"\"\n command = [self.executable_path]\n merged_settings = self.get_merged_settings()\n\n if merged_settings['show-all-errors']:\n command.append('--show-all-errors')\n\n if merged_settings['all']:\n command.append('--all')\n\n # Until we update the regex, will re-use the old output format\n command.append('--old-output-format')\n\n return command\n\n def split_match(self, match):\n \"\"\"\n Return the components of the match.\n\n We override this to catch linter error messages and return better\n error messages.\n \"\"\"\n\n if match:\n open_file_name = os.path.basename(self.view.file_name())\n # Since the filename on the top row might be different than the open file if, for example,\n # something is imported from another file. Use the filename from the footer is it's available.\n linted_file_name = match.group('file_name_2') or match.group('file_name_1')\n\n if linted_file_name == open_file_name:\n\n # In the flow message format, the message ends up getting split into a few\n # pieces for better readability - we try to reconstruct these.\n message_title = match.group('message_title')\n message = match.group('message')\n message_footer = match.group('message_footer')\n col = match.group('col_2') or match.group('col_1')\n\n if message_title and message_title.strip():\n message = '\"{0}\"\" {1} {2}'.format(\n message_title,\n message,\n message_footer\n )\n\n # Get the start and ending indexes of the line and column\n line_cols = col.replace(':', ',').split(',')\n line_start = max(int(line_cols[0])-1, 0)\n col_start = int(line_cols[1])\n col_start -= 1\n\n # Multi line error\n if len(line_cols) == 4:\n line_end = max(int(line_cols[2])-1, 0)\n col_end = int(line_cols[3])\n near = \" \" * (self.view.text_point(line_end, col_end) - self.view.text_point(line_start, col_start))\n\n # Single line error\n else:\n col_end = int(line_cols[2])\n # Get the length of the column section for length of error\n near = \" \" * (col_end - col_start)\n\n # match, line, col, error, warning, message, near\n return match, line_start, col_start, True, False, message, near\n\n return match, None, None, None, None, '', None\n","sub_path":"linter.py","file_name":"linter.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359007960","text":"# -*- coding: utf-8 -*-\n\n# 这里因为要将数据保存在本地,所以 master 指定为 local, 同时指定 jars. \n# 启动命令: spark-submit --master local --jars spark-tensorflow-connector_2.11-1.15.0 data_generator.py \nfrom pyspark.sql.types import *\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.appName('din_train_data').getOrCreate()\n\n# 保存在本地,可以换成 HDFS、S3 等分布式存储路径\npath = \"file:///home/axing/din/dataset\"\n\n# 指定各特征类型\nfeature_names = [\n StructField(\"label\", LongType()),\n StructField(\"user_id\", StringType()),\n StructField(\"age\", IntegerType()),\n StructField(\"gender\", StringType()),\n StructField(\"item_id\", StringType()),\n StructField(\"clicked_items_15d\", ArrayType(StringType(), True))]\n\nschema = StructType(feature_names)\ntest_rows = [\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]],\n [1, \"user_id1\", 22, \"0\", \"item_id1\", [\"item_id2\", \"item_id3\", \"item_id4\"]],\n [0, \"user_id2\", 33, \"1\", \"item_id5\", [\"item_id6\", \"item_id7\"]]\n]\nrdd = spark.sparkContext.parallelize(test_rows)\ndf = spark.createDataFrame(rdd, schema)\n\n# 存储为 tfrecord 文件格式,文件内部的数据格式为 Example\ndf.repartition(10).write.format(\"tfrecords\").option(\"recordType\", \"Example\").save(path, mode=\"overwrite\")\n\ndf = spark.read.format(\"tfrecords\").option(\"recordType\", \"Example\").load(path)\ndf.show()\n\n# 打印 dataframe 结构\ndf.printSchema()\n# root\n# |-- item_id: string (nullable = true)\n# |-- age: long (nullable = true)\n# |-- gender: string (nullable = true)\n# |-- clicked_items_15d: array (nullable = true)\n# | |-- element: string (containsNull = true)\n# |-- label: long (nullable = true)\n# |-- user_id: string (nullable = true)","sub_path":"chapter 31/din/spark/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"603001904","text":"__author__ = 'Kingsley Chimezie - C14468272'\n\n\nimport csv\n\n\n###################################### display() ######################################\n''' This function is used to find out and display the best and worst months\n'''\ndef display(cal_data, max_input):\n print(\"\\n\")\n\n\n A = cal_data[\"AvgPrices\"] # Store the average price\n M = cal_data[\"Months\"] # Store the Months\n count = 0 # Loop counter\n line_num = 0 # Used to display line num\n\n\n #ZIP the Average prices and Month dates\n price_month_zipped = dict(zip(A, M))\n\n\n #Sort zip by Average prices (lowest to highest)\n price_month = sorted(price_month_zipped.items())\n\n\n #amount of prices+months combination (100)\n data_size = len(price_month)-1 # I took away 1 because indexing starts at 0\n\n\n\n print(\"\\n_______________________________________\\n\")\n\n #DISPLAYING WORST 6 MONTHS\n count, line_num = 0, 1\n\n if max_input is 1:\n print(\"WORST MONTH:\")\n else:\n print(\"WORST %s MONTHS:\" % max_input)\n\n while count < max_input:\n print(\"%i\\t\" % line_num, end=\"\")\n print(price_month[count])\n count += 1\n line_num += 1\n\n print(\"\\n_______________________________________\\n\")\n\n #DISPLAYING BEST 6 MONTHS\n count, line_num = data_size, 0\n\n if max_input is 1:\n print(\"BEST MONTH:\")\n else:\n print(\"BEST %s MONTHS:\" % max_input)\n\n while count > data_size-max_input:\n count -= 1\n line_num += 1\n print(\"%i\\t\" % line_num, end=\"\")\n print(price_month[count+1])\n\n print(\"\\n_______________________________________\\n\")\n\n\n\n\n#################################### calculate() ####################################\n'''This function performs the main calculations\n needed to meet the assignment requirements. i.e calculate formulae given.\n'''\ndef calculate(csv_doc): # pass the read contents of the CSV file\n\n\n # VARIABLES #------------------------------------------------------------------------\n line_num = 0\t\t\t\t\t# Used to count lines\n end_month_line = 0 # Stores the line number of the end of the month average\n\n csv_Date = 0\t\t\t\t\t# Stores the column number for Date\n csv_Volume = 5\t\t\t\t\t# Stores the column number for Volume\n csv_Adj_Close = 6\t\t\t\t# Stores the column number for Adjusted Close\n\n Vol_Total = 0\t\t\t\t\t# Used to sum up the volume total\n Adj_Total = 0.0\t\t\t\t\t# Used to sum up the adjusted close total\n avg_price_total = 0.0 # Used to sum up the average price total\n\n daily_avg_list = [] # Stores the daily average price per month\n monthly_dates = [] # Stores the date of a month (format: mm/yyyy)\n monthly_avg_prices = [] # Stores the average price for a month (format: 0.00)\n # END VARIABLES #------------------------------------------------------------------------\n\n\n\n pre_date = (csv_doc[0][csv_Date])[:] # Set to latest date in the CSV in 'mm/yyyy' format\n\n for row in csv_doc: # Read the list of the CSVs contents line by line (aka day by day)\n\n\n current_date = row[csv_Date][3:] # Store to the current date on the line (mm/yyyy format)\n\n\n\n '''[FORMULAE] Summing up the average price total using formulae:\n ((v1*c1)+(v2*c2)+(v3*c3)+(v4*c4)...+(vn*cn)) / (v1+v2+v3+v4...+vn)\n where vi is the volume for day i and ci is the adjusted close price for day i.\n\n The formulae below increments and appends all necessary variables day by day\n The data of the variables used, will reset to the data of a new month.\n '''\n Vol_Total += int(row[csv_Volume]) # Summing up the volume total\n Adj_Total += float(row[csv_Adj_Close]) # Summing up the adjusted close total\n\n avg_price_total += float((int(row[csv_Volume]) * float(row[csv_Adj_Close]))) # ((v1*c1)+(v2*c2)+(v3*c3)+(v4*c4)...+(vn*cn))\n avg_price = (avg_price_total / Vol_Total) # / (v1+v2+v3+v4...+vn)\n\n daily_avg_list.append(avg_price) # Store the average price at this day of the month\n # END [FORMULAE] ****************************************************************\n\n\n\n '''[NEW MONTH] If previous & current date don't match, it IS a new month:\n (1) Stores the average price for previous month\n (2) reset variables used for calculations to the start of the new month's data\n '''\n if not(pre_date in current_date):\n\n\n ''' [DON'T STORE FIRST ITERATION] previous date (11/2012) & current date (NULL) don't match.\n However, it is NOT a new month yet, it is the first month.\n From the next entry (10/2012) onwards, it will be a new month\n '''\n if line_num > 0:\n\n\n # (1) Stores the average price for previous month\n end_month_line = line_num-1 # Store the line number for the end of the previous month\n monthly_avg_prices.append(round(daily_avg_list[end_month_line], 2)) # Stores the average price for the end of the previous month\n monthly_dates.append(pre_date)\n # END [DON'T STORE FIRST ITERATION] *****************************************\n\n\n # (2) reset variables used for calculations to the start of the new month's data\n Vol_Total = int(row[csv_Volume])\n Adj_Total = float(row[csv_Adj_Close])\n avg_price_total = float((int(row[csv_Volume]) * float(row[csv_Adj_Close])))\n pre_date = current_date\n # END [NEW MONTH] ***************************************************************\n\n\n line_num += 1 # line number increments\n # END FOR ****************************************************************************\n\n\n ''' [FINAL STORE] Because there are no more new months:\n Stores the last average month date and the last average price to their lists\n Store both lists in a monthly data dictionary\n '''\n monthly_dates.append(pre_date) # List of all the months dates (08/2004 - 11/2012)\n monthly_avg_prices.append(round(avg_price, 2)) # List of all months average prices (08/2004 - 11/2012)\n monthly_data = {\"Months\": monthly_dates, \"AvgPrices\": monthly_avg_prices} # Dictionary of all months dates and average prices\n return monthly_data\n\n\n\n#################################### readfile() ####################################\n''' This function is used to open and read the CSV file'''\ndef readfile():\n\n fp = open('googlePrices.csv','r')\t\t# opening the file\n csv_doc = list(csv.reader(fp))[1:]\t\t# read the CSVs Content and store as a list (Excluding column headings)\n return csv_doc\t\t\t\t\t\t # Return the list\n\n\n\n####################################### MAIN #######################################\nif __name__ == '__main__':\n\n\n ''' Try get the user to enter a number between 1 and 100\n because there are 100 months. Error if the entry is not between 1 and 100\n '''\n try:\n user_input = int(input(\"\\nHow Many Best And Worst Month(s) Would You Like To View?\\n\"\n \"Enter A Number Between 1 And 100: \"))\n\n if not user_input in range(1,101):\n exit(1)\n\n except:\n print(\"\\nPlease Enter A Number Between 1 & 100\")\n exit(1)\n\n\n\n csv_data = readfile()\t\t\t\t\t # Get data from the CSV file (googlePrices.csv)\n calculation_data = calculate(csv_data)\t\t# Use the CSVs data to make the required calculations.\n display(calculation_data, user_input) # Display the calculations","sub_path":"Year2/OOP_semester1_Python/OOP_Python_assignment1_Data_Mining/OOP_Python_assignment1.py","file_name":"OOP_Python_assignment1.py","file_ext":"py","file_size_in_byte":7977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42524313","text":"import cv2\nimport numpy as np\n\nimage = cv2.imread('images/bunchofshapes.jpg')\nimage_gray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\ncv2.imshow('0 - Original Image', image)\ncv2.waitKey(0)\n\nblank_image = np.zeros((image.shape[0], image.shape[1], 3))\norginal_image = image_gray\n\n\nedged_image = cv2.Canny(orginal_image, 50, 200)\ncv2.imshow('1 - edged_image', edged_image)\ncv2.waitKey(0)\n\n_, contours,hierarchy = cv2.findContours(edged_image.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\nprint(\"Number of contours found = \", len(contours))\n\ncv2.drawContours(blank_image,contours, -1, (0,255,0), 3)\ncv2.imshow('2 - All Contours over blank image', blank_image)\ncv2.waitKey(0)\n\ncv2.drawContours(orginal_image,contours, -1, (0,255,0), 3)\ncv2.imshow('3 - All Contours', orginal_image)\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"cv3test/sortingContours.py","file_name":"sortingContours.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"586630311","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nload(\"//antlir/bzl:shape.bzl\", \"shape\")\n\nmode_t = shape.union_t(int, str)\n\ndef add_stat_options(d, mode, user, group):\n if mode != None:\n d[\"mode\"] = mode\n if user != None or group != None:\n if user == None:\n user = \"root\"\n if group == None:\n group = \"root\"\n d[\"user_group\"] = \"{}:{}\".format(user, group)\n","sub_path":"antlir/bzl/add_stat_options.bzl","file_name":"add_stat_options.bzl","file_ext":"bzl","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"217164927","text":"# this is a learning note, try to breakdown the 2-seq2seq tutorial in details, source from ematvey github, modified some\nimport math\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.seq2seq as seq2seq\nfrom tensorflow.contrib.layers import safe_embedding_lookup_sparse as embedding_lookup_unique\nfrom tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple, GRUCell\nimport helpers\n# using tf.__version__ v1.0.1\n\n\nclass Seq2SeqModel(): # class representation of Seq2Seq model\n\n PAD = 0\n EOS = 1\n\n # class initialization definitions\n def __init__(self, encoder_cell, decoder_cell, vocab_size, embedding_size, # __init__ initialization\n bidirectional=True, attention=False, debug=False):\n self.encoder_cell = encoder_cell\n self.decoder_cell = decoder_cell\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.bidirectional = bidirectional\n self.attention = attention\n self.debug = debug\n\n self._make_graph() # FUNC alpha, private function, make a graph when initialization this class\n\n # define a property of this class: decoder's output size, or called as decoder hidden units\n @property\n def decoder_hidden_units(self):\n return self.decoder_cell.output_size # property of this class, not a function any more\n # decoder_cell has a property of output_size, apparently\n\n def _make_graph(self): # FUNC alpha body\n # initialize the input as Step a\n if self.debug:\n self._init_debug_inputs() # FUNC 1\n else:\n self._init_placeholder() # FUNC 2\n\n # initialize decoder inputs/targets as Step b\n self._init_decoder_train_connectors() # FUNC 3\n\n # initialize embeddings as Step c\n self._init_embeddings() # FUNC 4\n\n # initialize encoder as Step d\n if self.bidirectional:\n self._init_bidirectional_encoder() # FUNC 5\n else:\n self._init_simple_encoder() # FUNC 6\n\n # initialize decoder as Step e\n self._init_decoder() # FUNC 7\n\n # initialize optimizer as Step f\n self._init_optimizer() # FUNC 8\n\n # Step a starts\n # FUNC 1 body\n def _init_debug_inputs(self):\n\n # aim: to make encoder_inputs and encoder_inputs_length\n # to make decoder_targets and decoder_targets_length\n\n x = [[5, 6, 7], # time major data, every row is a time, every column is a sample / batch\n [7, 6, 0], # it has a shape of (time_size, batch_size)\n [0, 7, 0]]\n xl = [2, 3, 1] # the length for each sample / batch. it has a shape of (batch_size,)\n\n self.encoder_inputs = tf.constant(x, dtype=tf.int32, name='encoder_inputs')\n\n self.encoder_inputs_length = tf.constant(xl, dtype=tf.int32, name='encoder_inputs_length')\n\n # targets is inputs in this case\n self.decoder_targets = tf.constant(x, dtype=tf.int32, name='decoder_targets')\n\n self.decoder_targets_length = tf.constant(xl, dtype=tf.int32, name='decoder_targets_length')\n\n # FUNC 2 body\n def _init_placeholder(self):\n self.encoder_inputs = tf.placeholder(\n dtype=tf.int32,\n shape=(None, None), # should have a shape of (time_size, batch_size), similar as x in debug inputs\n name='encoder_inputs'\n )\n self.encoder_inputs_length = tf.placeholder(\n dtype=tf.int32,\n shape=(None,), # should have a shape of (batch_size,), similar as xl in debug inputs\n name='encoder_inputs_length'\n )\n self.decoder_targets = tf.placeholder(\n dtype=tf.int32,\n shape=(None, None), # should have a shape of (time_size, batch_size), similar as x in debug inputs\n name='decoder_targets'\n )\n self.decoder_targets_length = tf.placeholder(\n dtype=tf.int32,\n shape=(None,), # should have a shape of (batch_size,), similar as xl in debug inputs\n name='decoder_targets_length'\n )\n\n # Step b starts\n # FUNC 3 body\n def _init_decoder_train_connectors(self):\n with tf.name_scope('DecoderTrainFeeds'):\n # get size out of the decoder_targets, should be the (time_size, batch_size)\n # these two are just two numbers or what???\n sequence_size, batch_size = tf.unstack(tf.shape(self.decoder_targets))\n\n # EOS SLICE should be something like: [[1, 1, 1]]\n EOS_SLICE = tf.ones(shape=[1, batch_size], dtype=tf.int32, name='EOS_SLICE') * self.EOS # EOS = 1\n # PAD SLICE should be something like: [[0, 0, 0]]\n PAD_SLICE = tf.ones(shape=[1, batch_size], dtype=tf.int32, name='EOS_SLICE') * self.PAD # PAD = 0\n\n self.decoder_train_inputs = tf.concat([EOS_SLICE, self.decoder_targets], axis=0)\n # x = [[1, 1, 1], <-- added EOS slice, start_of_sequence_id in simple_decoder_fn_inference\n # [5, 6, 7],\n # [7, 6, 0],\n # [0, 7, 0]]\n self.decoder_train_length = self.decoder_targets_length + 1\n # xl = [3, 4, 2]\n\n decoder_train_targets = tf.concat([self.decoder_targets, PAD_SLICE], axis=0)\n # x = [[5, 6, 7],\n # [7, 6, 0],\n # [0, 7, 0],\n # [0, 0, 0]] <-- added PAD slice\n\n # get size out of the decoder_targets, should be the (time_size + 1, batch_size)\n decoder_train_targets_seq_len, _ = tf.unstack(tf.shape(decoder_train_targets))\n\n decoder_train_targets_eos_mask = tf.one_hot(self.decoder_train_length - 1, # = [2, 3, 1]\n decoder_train_targets_seq_len, # = 4\n on_value=self.EOS, off_value=self.PAD,\n dtype=tf.int32)\n # [\n # [0, 0, 1, 0]\n # [0, 0, 0, 1]\n # [0, 1, 0, 0]\n # ]\n decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask, [1, 0])\n # [\n # [0, 0, 0]\n # [0, 0, 1]\n # [1, 0, 0]\n # [0, 1, 0]\n # ]\n decoder_train_targets = tf.add(decoder_train_targets,\n decoder_train_targets_eos_mask)\n # x = [[5, 6, 7],\n # [7, 6, 0],\n # [0, 7, 0],\n # [0, 0, 0]]\n # add:\n # [[0, 0, 0]\n # [0, 0, 1]\n # [1, 0, 0]\n # [0, 1, 0]]\n # become:\n # x = [[5, 6, 7],\n # [7, 6, 1],\n # [1, 7, 0],\n # [0, 1, 0]] added EOS end parts, end_of_sequence_id in simple_decoder_fn_inference\n self.decoder_train_targets = decoder_train_targets\n\n self.loss_weights = tf.ones(\n shape=[batch_size, tf.reduce_max(self.decoder_train_length)],\n dtype=tf.float32,\n name=\"loss_weights\"\n )\n # loss_weights shape should be (batch_size, time_size + 1), all initialized as 1\n\n # FUNC 4\n def _init_embeddings(self):\n with tf.variable_scope('embedding'):\n\n # making embedding matrix, shape of (vocab_size, embedding_size)\n sqrt3 = math.sqrt(3)\n initializer = tf.random_uniform_initializer(minval=-sqrt3, maxval=sqrt3) # just a random function\n self.embedding_matrix = tf.get_variable(\n name='embedding_matrix',\n shape=[self.vocab_size, self.embedding_size],\n dtype=tf.float32,\n initializer=initializer\n )\n\n # shape should be (time_size, batch_size, embedding_size)\n self.encoder_inputs_embedded = tf.nn.embedding_lookup(\n params=self.embedding_matrix,\n ids=self.encoder_inputs,\n name='encoder_inputs_embedded'\n )\n\n # shape should be (time_size + 1, batch_size, embedding_size)\n self.decoder_train_inputs_embedded = tf.nn.embedding_lookup(\n params=self.embedding_matrix,\n ids=self.decoder_train_inputs,\n name='decoder_train_inputs_embedded'\n )\n\n # FUNC 5\n def _init_bidirectional_encoder(self):\n with tf.variable_scope('BidirectionalEncoder'):\n (\n (encoder_fw_outputs, # (encoder_times, batch_size, encoder_hidden_units)\n encoder_bw_outputs), # (encoder_times, batch_size, encoder_hidden_units)\n (encoder_fw_state, # c, h, shape: (batch_size, encoder_hidden_units)\n encoder_bw_state) # c, h, shape: (batch_size, encoder_hidden_units)\n ) = (\n tf.nn.bidirectional_dynamic_rnn(\n cell_fw=self.encoder_cell,\n cell_bw=self.encoder_cell,\n inputs=self.encoder_inputs_embedded,\n sequence_length=self.encoder_inputs_length,\n time_major=True,\n dtype=tf.float32)\n )\n\n # (encoder_times, batch_size, encoder_hidden_units*2)\n self.encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)\n\n # if isinstance(encoder_fw_state, LSTMStateTuple):\n # encoder_state_c = tf.concat(\n # (encoder_fw_state.c, encoder_bw_state.c), 1, name='bidirectional_concat_c')\n # encoder_state_h = tf.concat(\n # (encoder_fw_state.h, encoder_bw_state.h), 1, name='bidirectional_concat_h')\n # self.encoder_state = LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)\n # elif isinstance(encoder_fw_state, tf.Tensor):\n # self.encoder_state = tf.concat((encoder_fw_state, encoder_bw_state), 1, name='bidirectional_concat')\n\n encoder_state_c = tf.concat((encoder_fw_state.c, encoder_bw_state.c), 1, name='bidirectional_concat_c')\n encoder_state_h = tf.concat((encoder_fw_state.h, encoder_bw_state.h), 1, name='bidirectional_concat_h')\n self.encoder_state = LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)\n # merge two (batch_size, encoder_hidden_units) to (batch_size, encoder_hidden_units*2)\n\n # FUNC 6\n def _init_simple_encoder(self):\n with tf.variable_scope('SimpleEncoder'):\n (\n self.encoder_outputs,\n self.encoder_state\n ) = (\n tf.nn.dynamic_rnn(\n cell=self.encoder_cell,\n inputs=self.encoder_inputs_embedded,\n sequence_length=self.encoder_inputs_length,\n time_major=True,\n dtype=tf.float32)\n )\n\n # FUNC 7\n def _init_decoder(self):\n with tf.variable_scope('Decoder') as scope:\n # use linear function to make output neurons from number X to vocab_size\n # shape change should be: from (encoder_times, batch_size, decoder_hidden_units)\n # to (encoder_times, batch_size, vocab_size)\n def output_fn(outputs): # FUNC 9\n return tf.contrib.layers.linear(outputs, self.vocab_size, scope=scope)\n\n # plain decoder mode\n if not self.attention:\n # Simple decoder function for a sequence-to-sequence model used in the dynamic_rnn_decoder.\n # The simple_decoder_fn_train is a simple training function for a sequence-to-sequence model.\n # It should be used when dynamic_rnn_decoder is in the training mode.\n # The simple_decoder_fn_train is called with a set of the user arguments and returns the decoder_fn,\n # which can be passed to the dynamic_rnn_decoder, such that\n # dynamic_fn_train = simple_decoder_fn_train(encoder_state)\n # outputs_train, state_train = dynamic_rnn_decoder(decoder_fn=dynamic_fn_train, ...)\n decoder_fn_train = seq2seq.simple_decoder_fn_train(\n encoder_state=self.encoder_state,\n name='decoder_fn_train'\n )\n\n # Simple decoder function for a sequence-to-sequence model used in the dynamic_rnn_decoder.\n # The simple_decoder_fn_inference is a simple inference function for a sequence-to-sequence model.\n # It should be used when dynamic_rnn_decoder is in the inference mode.\n # The simple_decoder_fn_inference is called with a set of the user arguments and returns the decoder_fn,\n # which can be passed to the dynamic_rnn_decoder, such that\n # dynamic_fn_inference = simple_decoder_fn_inference(...)\n # outputs_inference, state_inference = dynamic_rnn_decoder(decoder_fn=dynamic_fn_inference, ...)\n decoder_fn_inference = seq2seq.simple_decoder_fn_inference(\n output_fn=output_fn, # FUNC 9, An output function to project your cell_output onto class logits.\n encoder_state=self.encoder_state, # The encoded state to initialize the dynamic_rnn_decoder.\n embeddings=self.embedding_matrix, # The embeddings matrix used for the decoder sized [num_decoder_symbols, embedding_size]\n start_of_sequence_id=self.EOS,\n end_of_sequence_id=self.EOS,\n maximum_length=tf.reduce_max(self.encoder_inputs_length) + 3, # The maximum allowed of time steps to decode.\n num_decoder_symbols=self.vocab_size, # The number of classes to decode at each time step.\n )\n\n # attention decoder mode\n else:\n # encoder_outputs: (encoder_times, batch_size, encoder_hidden_units*2)\n # attention_states: (batch_size, encoder_times, encoder_hidden_units*2)\n attention_states = tf.transpose(self.encoder_outputs, [1, 0, 2])\n\n # Prepare keys/values/functions for attention.\n (\n attention_keys, # to be compared with target states. # out1\n attention_values, # to be used to construct context vectors. # out2\n attention_score_fn, # to compute similarity between key and target states. # out3\n attention_construct_fn # to build attention states. # out4\n ) = seq2seq.prepare_attention(\n attention_states=attention_states, # hidden states to attend over.\n attention_option=\"bahdanau\", # how to compute attention, either \"luong\" or \"bahdanau\".\n num_units=self.decoder_hidden_units, # hidden state dimension.\n )\n\n # compared with simple_decoder_fn_train\n # Attentional decoder function for dynamic_rnn_decoder during training.\n # The attention_decoder_fn_train is a training function for an attention-based sequence-to-sequence model.\n # It should be used when dynamic_rnn_decoder is in the training mode.\n # The attention_decoder_fn_train is called with a set of the user arguments and returns the decoder_fn,\n # which can be passed to the dynamic_rnn_decoder, such that\n # dynamic_fn_train = attention_decoder_fn_train(encoder_state)\n # outputs_train, state_train = dynamic_rnn_decoder(decoder_fn=dynamic_fn_train, ...)\n decoder_fn_train = seq2seq.attention_decoder_fn_train(\n encoder_state=self.encoder_state, # The encoded state to initialize the dynamic_rnn_decoder.\n attention_keys=attention_keys, # to be compared with target states. # out1\n attention_values=attention_values, # to be used to construct context vectors. # out2\n attention_score_fn=attention_score_fn, # to compute similarity between key and target states. # out3\n attention_construct_fn=attention_construct_fn, # to build attention states. # out4\n name='attention_decoder'\n )\n\n # compared with simple_decoder_fn_inference\n decoder_fn_inference = seq2seq.attention_decoder_fn_inference(\n output_fn=output_fn,\n encoder_state=self.encoder_state,\n attention_keys=attention_keys,\n attention_values=attention_values,\n attention_score_fn=attention_score_fn,\n attention_construct_fn=attention_construct_fn,\n embeddings=self.embedding_matrix,\n start_of_sequence_id=self.EOS,\n end_of_sequence_id=self.EOS,\n maximum_length=tf.reduce_max(self.encoder_inputs_length) + 3,\n num_decoder_symbols=self.vocab_size,\n )\n\n # dynamic_rnn_decoder module for train process\n (\n self.decoder_outputs_train, # [max_time, batch_size, cell.output_size]\n self.decoder_state_train, # [batch_size, cell.state_size]\n self.decoder_context_state_train\n # https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/seq2seq/dynamic_rnn_decoder\n ) = (\n seq2seq.dynamic_rnn_decoder(\n cell=self.decoder_cell, # An instance of RNNCell.\n decoder_fn=decoder_fn_train,\n inputs=self.decoder_train_inputs_embedded,\n sequence_length=self.decoder_train_length,\n time_major=True,\n scope=scope,\n )\n )\n\n # from [max_time, batch_size, cell.output_size] to [max_time, batch_size, vocab_size]\n self.decoder_logits_train = output_fn(self.decoder_outputs_train)\n\n # from [max_time, batch_size, vocab_size] to [max_time, batch_size], now decoder_prediction_train saves symbols\n self.decoder_prediction_train = tf.argmax(self.decoder_logits_train, axis=-1, name='decoder_prediction_train')\n\n # reuse variables from train to inference\n scope.reuse_variables()\n\n # dynamic_rnn_decoder module for inference process\n (\n self.decoder_logits_inference,\n self.decoder_state_inference,\n self.decoder_context_state_inference\n ) = (\n seq2seq.dynamic_rnn_decoder( # there are no inputs and sequence_length handles in this inference module\n cell=self.decoder_cell,\n decoder_fn=decoder_fn_inference,\n time_major=True,\n scope=scope,\n )\n )\n\n self.decoder_prediction_inference = tf.argmax(self.decoder_logits_inference, axis=-1, name='decoder_prediction_inference')\n\n # FUNC 8\n def _init_optimizer(self):\n # decoder_logits_train: [max_time, batch_size, vocab_size]\n # logits: [batch_size, max_time, vocab_size]\n logits = tf.transpose(self.decoder_logits_train, [1, 0, 2])\n\n # decoder_train_targets: [max_time, batch_size]\n # targets: [batch_size, max_time]\n targets = tf.transpose(self.decoder_train_targets, [1, 0])\n\n self.loss = seq2seq.sequence_loss(\n logits=logits, # A 3D Tensor of shape [batch_size x sequence_length x num_decoder_symbols] and dtype float.\n # The logits correspond to the prediction across all classes at each timestep.\n targets=targets, # A 2D Tensor of shape [batch_size x sequence_length] and dtype int.\n # The target represents the true class at each timestep.\n weights=self.loss_weights # A 2D Tensor of shape [batch_size x sequence_length] and dtype float. Weights constitutes the weighting of each\n # prediction in the sequence. When using weights as masking set all valid timesteps to 1 and all padded timesteps to 0.\n # from previous: loss_weights shape should be (batch_size, time_size + 1), all initialized as 1\n )\n self.train_op = tf.train.AdamOptimizer().minimize(self.loss)\n\n # FUNC 10, make train data from helpers function\n def make_train_inputs(self, input_seq, target_seq):\n inputs_, inputs_length_ = helpers.batch(input_seq)\n targets_, targets_length_ = helpers.batch(target_seq)\n return {\n self.encoder_inputs: inputs_,\n self.encoder_inputs_length: inputs_length_,\n self.decoder_targets: targets_,\n self.decoder_targets_length: targets_length_,\n }\n\n # FUNC 11, make inference data from helpers function, this is outside of the class Seq2SeqModel\n def make_inference_inputs(self, input_seq):\n inputs_, inputs_length_ = helpers.batch(input_seq)\n return {\n self.encoder_inputs: inputs_,\n self.encoder_inputs_length: inputs_length_,\n }\n\n # class Seq2SeqModel ends here\n\n\n# FUNC 12, this is outside of the class Seq2SeqModel\ndef make_seq2seq_model(**kwargs):\n args = dict(encoder_cell=LSTMCell(10),\n decoder_cell=LSTMCell(20),\n vocab_size=10,\n embedding_size=10,\n attention=True,\n bidirectional=True,\n debug=False)\n args.update(kwargs)\n return Seq2SeqModel(**args)\n\n\ndef train_on_copy_task(session,\n model,\n length_from=3,\n length_to=8,\n vocab_lower=2,\n vocab_upper=10,\n batch_size=100,\n max_batches=5000,\n batches_in_epoch=1000,\n verbose=True):\n batches = helpers.random_sequences(length_from=length_from, length_to=length_to,\n vocab_lower=vocab_lower, vocab_upper=vocab_upper,\n batch_size=batch_size)\n loss_track = []\n for batch in range(max_batches + 1):\n batch_data = next(batches)\n fd = model.make_train_inputs(batch_data, batch_data)\n _, l = session.run([model.train_op, model.loss], fd)\n loss_track.append(l)\n\n if verbose:\n if batch == 0 or batch % batches_in_epoch == 0:\n print('batch {}'.format(batch))\n print(' minibatch loss: {}'.format(session.run(model.loss, fd)))\n for i, (e_in, dt_pred) in enumerate(zip(\n fd[model.encoder_inputs].T,\n session.run(model.decoder_prediction_train, fd).T\n )):\n print(' sample {}:'.format(i + 1))\n print(' enc input > {}'.format(e_in))\n print(' dec train predicted > {}'.format(dt_pred))\n if i >= 2:\n break\n print()\n\n return loss_track\n\nif __name__ == '__main__':\n import sys\n\n if 'fw-debug' in sys.argv:\n tf.reset_default_graph()\n with tf.Session() as session:\n model = make_seq2seq_model(debug=True)\n session.run(tf.global_variables_initializer())\n session.run(model.decoder_prediction_train)\n session.run(model.decoder_prediction_train)\n\n elif 'fw-inf' in sys.argv:\n tf.reset_default_graph()\n with tf.Session() as session:\n model = make_seq2seq_model()\n session.run(tf.global_variables_initializer())\n fd = model.make_inference_inputs([[5, 4, 6, 7, 8], [6, 6], [1, 2, 3]])\n inf_out = session.run(model.decoder_prediction_inference, fd)\n print(inf_out)\n\n elif 'train' in sys.argv:\n tracks = {}\n\n tf.reset_default_graph()\n\n with tf.Session() as session:\n model = make_seq2seq_model(attention=True)\n session.run(tf.global_variables_initializer())\n loss_track_attention = train_on_copy_task(session, model)\n\n tf.reset_default_graph()\n\n with tf.Session() as session:\n model = make_seq2seq_model(attention=False)\n session.run(tf.global_variables_initializer())\n loss_track_no_attention = train_on_copy_task(session, model)\n\n import matplotlib.pyplot as plt\n plt.plot(loss_track)\n print('loss {:.4f} after {} examples (batch_size={})'.format(loss_track[-1], len(loss_track)*batch_size, batch_size))\n\n else:\n tf.reset_default_graph()\n session = tf.InteractiveSession()\n model = make_seq2seq_model(debug=False)\n session.run(tf.global_variables_initializer())\n\n fd = model.make_inference_inputs([[5, 4, 6, 7], [6, 6]])\n\n inf_out = session.run(model.decoder_prediction_inference, fd)\n","sub_path":"4-seq2seq/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":25931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"248373422","text":"'''\nA Convolutional Network implementation example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n# Import MNIST data\nimport read_data as rd\n\n# Parameters\nlearning_rate = 0.0001\ntraining_iters = 2000000\nbatch_size = 128\ndisplay_step = 200\n# Network Parameters\nn_input = 36\nn_classes = 4 \ndropout = 0.95\n\n# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_input, 1])\ny = tf.placeholder(tf.float32, [None, n_classes])\nkeep_prob = tf.placeholder(tf.float32) #dropout (keep probability)\n\n# Create model\ndef conv_net(x, weights, biases, dropout):\n # Reshape input picture\n x = tf.reshape(x, shape=[-1, 1, n_input, 1])\n fc1 = tf.reshape(x, [-1, n_input])\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n fc1 = tf.nn.relu(fc1)\n fc1 = tf.nn.dropout(fc1, dropout)\n fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])\n fc2 = tf.nn.relu(fc2)\n fc2 = tf.nn.dropout(fc2, dropout)\n fc3 = tf.add(tf.matmul(fc2, weights['wd3']), biases['bd3'])\n fc3 = tf.nn.relu(fc3)\n fc3 = tf.nn.dropout(fc3, dropout)\n\n # Output, class prediction\n out = tf.add(tf.matmul(fc3, weights['out']), biases['out'])\n return out\n\n# Store layers weight & bias\nweights = {\n\n 'wd1': tf.Variable(tf.random_normal([n_input, 256])),#37\n 'wd2': tf.Variable(tf.random_normal([256, 128])),\n 'wd3': tf.Variable(tf.random_normal([128, 64])),\n # 512 inputs, 10 outputs (class prediction)\n 'out': tf.Variable(tf.random_normal([64, n_classes]))\n}\n\nbiases = {\n 'bd1': tf.Variable(tf.random_normal([256])),\n 'bd2': tf.Variable(tf.random_normal([128])),\n 'bd3': tf.Variable(tf.random_normal([64])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n# Construct model\npred = conv_net(x, weights, biases, keep_prob)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\nrd.read_data_()\nval_data, val_label = rd.get_val()\n# Launch the graph\nmx = 0\nwith tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n batch_x, batch_y = rd.next_train_batch(batch_size, step - 1)\n # Run optimization op (backprop)\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\n keep_prob: dropout})\n if step % display_step == 0:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,\n y: batch_y,\n keep_prob: 1.})\n print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n step += 1\n if(step % 20 == 0):\n test_acc = sess.run(accuracy, feed_dict={x: val_data,\n y: val_label,\n keep_prob: 1.})\n print(\"Testing Accuracy:\", test_acc)\n if(test_acc > mx):\n mx = test_acc\n \n print(\"Optimization Finished!\")\n print(\"max acc \" + str(mx))\n \n \n \n\n # Calculate accuracy for 512 mnist test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={x: val_data,\n y: val_label,\n keep_prob: 1.}))\n","sub_path":"TensorFlow/test_fuck.py","file_name":"test_fuck.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"500517816","text":"from typing import Any, Type, Generic, TypeVar\n\nfrom pydantic import BaseModel\n\nR = TypeVar(\"R\", bound=BaseModel)\n\n__all__ = (\"Backend\", \"check_response\")\n\n\nclass Backend(Generic[R]):\n def send_request(\n self, method: str, url: str, response_schema: Type[R], **kwargs: Any\n ) -> R:\n raise NotImplementedError\n\n\ndef check_response(data):\n known_codes = {\"1001\", \"1002\", \"1003\", \"2001\", \"2002\", \"3001\", \"3002\", \"3003\"}\n if \"code\" in data and data[\"code\"] in known_codes:\n raise ValueError(data)\n","sub_path":"venv/Lib/site-packages/pornhub_api/backends/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"630826782","text":"# coding=UTF-8\nimport imp\nimport lib.indicator as ind; imp.reload(ind); \n############################################################################### \ndef filter1(self,I): #2代濾網 IF(OR(AND((AM2>10.5),(AM2<16.5)),AND((AU2>6),(AU2<8))),0,AV2)\n import lib.filter as fl\n #if self.RunToday==1:self.RunToday=-1\n if self.RunToday==-1:\n self.RunToday=1\n ind.GetIndicatorByType(I,\"大台未純化大單企圖\")\n ind.GetIndicatorByType(I,\"大台未純化大單作為\")\n II=fl.getOpenVolABS(I.get(\"大台未純化大單企圖\"),15)\n M=fl.getOpenVolABS(I.get(\"大台未純化大單作為\"),15)\n\n if (M>8.8 and M<11) or (II<10) :\n self.RunToday=0\n return self.RunToday\n############################################################################### \ndef s1(self,PRICE,i,I):\n baseT= 45\n if filter1(self,I)==0: return\n if i< (baseT) : return\n if i==baseT:ind.GetIndicatorByType(I,\"小台未純化大單作為\")\n aa=I.get(\"小台未純化大單作為\")[i-1]\n amax=I.get(\"小台未純化大單作為高通道\")[i-1]\n amin=I.get(\"小台未純化大單作為低通道\")[i-1]\n \n if aa<amin : self.EnterShort(PRICE)\n if aa>amax : self.EnterLong(PRICE)\n self.CheckDailyExitAll(I.get(\"TIME\")[i],PRICE)\n \n############################################################################### \nimport os\nSTittle=u\"[p02]小台未純化大單作為通道45策略\"\nFName=os.path.split(__file__)[1].split('.')[0]\nif __name__ == '__main__':\n exec(open(os.path.split(os.path.realpath(__file__))[0]+'\\\\init.py').read())\n\n","sub_path":"strategy/p02.py","file_name":"p02.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"481768549","text":"import torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom data_providers.base_provider import *\n\n\nclass Cifar10DataProvider(DataProvider):\n\n def __init__(self, save_path=None, train_batch_size=64, test_batch_size=64, valid_size=0.5, n_worker=6):\n\n self._save_path = save_path\n train_dataset = datasets.CIFAR10(self.save_path, transform=transforms.Compose([\n transforms.RandomCrop(self.image_size, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n self.normalize,\n ]))\n\n if valid_size is not None:\n if isinstance(valid_size, float):\n valid_size = int(valid_size * len(train_dataset))\n else:\n assert isinstance(valid_size, int), 'invalid valid_size: %s' % valid_size\n\n num_train = len(train_dataset)\n indices = list(range(num_train))\n split = int(np.floor(valid_size * num_train))\n\n train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])\n valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train])\n\n self.train = torch.utils.data.DataLoader(\n train_dataset, batch_size=train_batch_size, sampler=train_sampler,\n num_workers=n_worker, pin_memory=True,\n )\n self.valid = torch.utils.data.DataLoader(\n train_dataset, batch_size=test_batch_size, sampler=valid_sampler,\n num_workers=n_worker, pin_memory=True,\n )\n self.valid = self.train\n else:\n self.train = torch.utils.data.DataLoader(\n train_dataset, batch_size=train_batch_size, shuffle=True,\n num_workers=n_worker, pin_memory=True,\n )\n self.valid = None\n\n @staticmethod\n def name():\n return 'cifar10'\n\n @property\n def data_shape(self):\n return 3, self.image_size, self.image_size # C, H, W\n\n @property\n def n_classes(self):\n return 10\n\n @property\n def save_path(self):\n if self._save_path is None:\n self._save_path = '/home/gaoyibo/Datasets/cifar-10/'\n return self._save_path\n\n @property\n def data_url(self):\n raise ValueError('unable to download cifar10')\n\n @property\n def normalize(self):\n return transforms.Normalize(mean=[0.49139968, 0.48215827, 0.44653124], std=[0.24703233, 0.24348505, 0.26158768])\n\n @property\n def resize_value(self):\n return 32\n\n @property\n def image_size(self):\n return 32\n\n","sub_path":"search/data_providers/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"392499617","text":"import json\nimport datetime\nfrom django.utils import six\n\nclass CalEvent:\n\n date = ''\n title = ''\n location = ''\n description = ''\n\n def __init__(self, *args, **kwargs):\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n def set_date(self, date):\n '''\n This function takes in a string in the YYYY-MM-DD format\n or a datetime.datetime object\n '''\n if isinstance(date, datetime.datetime):\n self.date = date.strftime('%Y-%m-%d')\n elif isinstance(date, six.string_types):\n if datetime.datetime.strptime('%Y-%m-%d'):\n self.date = date\n else:\n raise ValueError(\"%r is not in a YYYY-MM-DD format!\" \\\n % date)\n else:\n raise TypeError(\"%r is not a string or a datetime.datetime\\\n object!\" % date)\n\nclass Calendar:\n\n events = []\n template = \"$(#\\\"clndr-template\\\").html()\"\n startWithMonth = \"moment()\" # moment js object or YYYY-MM-DD string\n weekOffset = 0\n daysOfWeek = []\n classes = {\n 'past': \"past\",\n 'today': \"today\",\n 'event': \"event\",\n 'selected': \"selected\",\n 'inactive': \"inactive\",\n 'lastMonth': \"last-month\",\n 'nextMonth': \"next-month\",\n 'adjacentMonth': \"adjacent-month\",\n }\n targets = {\n 'day': 'day',\n 'empty': 'empty',\n 'nextButton': 'clndr-next-button',\n 'todayButton': 'clndr-today-button',\n 'previousButton': 'clndr-previous-button',\n 'nextYearButton': 'clndr-next-year-button',\n 'previousYearButton': 'clndr-previous-year-button',\n }\n clickEvents = {\n 'click': None,\n 'today': None,\n 'nextMonth': None,\n 'previousMonth': None,\n 'onMonthChange': None,\n 'nextYear': None,\n 'previousYear': None,\n 'onYearChange': None,\n 'nextInterval': None,\n 'previousInterval': None,\n 'onIntervalChange': None,\n }\n multiDayEvents = {}\n ready = None # JS function string\n doneRendering = None # JS function string\n useTouchEvent = False\n dateParameter = 'date'\n showAdjacentMonths = True\n adjacentDaysChangeMonth = False\n forceSixRows = False\n trackSelectedDate = False\n selectedDate = None # sets a date to be selected\n ignoreInactiveDaysInSelection = False\n lengthOfTime = {\n 'months': None,\n 'days': None,\n 'interval': 1,\n }\n extras = {}\n render = None # JS function in string\n constraints = {\n 'startDate': None,\n 'endDate': None,\n }\n moment = None # Pass a custom Moment js instance\n\n def __init__(self, *args, **kwargs):\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n def toJSON(self):\n '''\n This function returns the object in a JSON format so that is can be\n used by CLNDR.js\n '''\n return json.dumps(self.__dict__)\n\n def add_event(self, event=None, **kwargs):\n if not event:\n event = CalEvent(**kwargs)\n if isinstance(event, CalEvent):\n self.events.append(event)\n else: \n raise TypeError(\"%r is not a CalEvent object\" % event)\n return event\n","sub_path":"clndr/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565031478","text":"import cv2\nimport numpy as np\n\n # Currently Used one\ndef segment_color(img, lower, upper):\n \"\"\"\n :param img: Image to isolate teh color of\n :param lower: [lowerHue, lowerSat, lowerVal]\n :param upper: [upperHue, upperSat, upperVal]\n :return: Isolated image\n \"\"\"\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n if lower[0] > upper[0]:\n # If the HSV values wrap around, then intelligently mask it\n\n upper1 = [180, upper[1], upper[2]]\n mask1 = cv2.inRange(hsv, np.array(lower), np.array(upper1))\n\n lower2 = [0, lower[1], lower[2]]\n mask2 = cv2.inRange(hsv, np.array(lower2), np.array(upper))\n\n mask = mask1 + mask2\n\n else:\n mask = cv2.inRange(hsv, np.array(lower), np.array(upper))\n\n final = cv2.bitwise_and(img, img, mask=mask)\n rGray = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)\n ret, rThresh = cv2.threshold(rGray, 20, 255, cv2.THRESH_BINARY)\n cv2.imshow(\"Mask\", rThresh)\n # return final\n\ndef nothing(x):\n pass\n\n\nHSV_image = \"HSV image\"\ncv2.namedWindow(\"HSV image\", cv2.WINDOW_NORMAL)\ncv2.createTrackbar('Low_H', HSV_image, 0, 255, nothing)\ncv2.createTrackbar('Low_S', HSV_image, 0, 255, nothing)\ncv2.createTrackbar('Low_V', HSV_image, 0, 255, nothing)\ncv2.createTrackbar('High_H', HSV_image, 0, 255, nothing)\ncv2.createTrackbar('High_S', HSV_image, 0, 255, nothing)\ncv2.createTrackbar('High_V', HSV_image, 0, 255, nothing)\n# cv2.namedWindow(\"thresh image\", cv2.WINDOW_AUTOSIZE)\n\nvs = cv2.VideoCapture(0)\n# vs.set(cv2.CAP_PROP_BRIGHTNESS, 255)\n# vs.set(cv2.CAP_PROP_AUTOFOCUS, 1)\nvs.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\nvs.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n_, original_image = vs.read()\n\nwhile True:\n\n low_H = cv2.getTrackbarPos(\"Low_H\", HSV_image)\n low_S = cv2.getTrackbarPos(\"Low_S\", HSV_image)\n low_V = cv2.getTrackbarPos(\"Low_V\", HSV_image)\n High_H = cv2.getTrackbarPos(\"High_H\", HSV_image)\n High_S = cv2.getTrackbarPos(\"High_S\", HSV_image)\n High_V = cv2.getTrackbarPos(\"High_V\", HSV_image)\n lower_color = (low_H, low_S, low_V)\n upper_color = (High_H, High_S, High_V)\n try:\n\n _, original_image = vs.read()\n segment_color(original_image, lower_color, upper_color)\n\n cv2.imshow(\"Original Image\", original_image)\n\n # grey_image = cv2.cvtColor(vars.original_image, cv2.COLOR_RGB2GRAY)\n\n key = cv2.waitKey(1)\n if key == 27: # exit on ESC\n break\n except KeyboardInterrupt:\n break\n\nvs.stop()\ncv2.destroyAllWindows()\n","sub_path":"src/jetson_tx1/auto_rescue/scripts/visual_color_seg.py","file_name":"visual_color_seg.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"309454491","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.http import HttpResponseBadRequest, HttpResponseNotAllowed\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom easel.models import Profile, Site, Page\nfrom easel.forms import AddSiteForm, AddPageForm, AddMediaForm, EditSiteForm\nfrom easel.error import JsonErrorResponse, Json400, Json405\nfrom bs4 import BeautifulSoup\n\n\n@login_required\ndef home(request):\n profile = Profile.objects.get(user=request.user)\n sites = Site.objects.filter(owner=profile)\n siteCount = sites.count()\n context = {}\n context[\"add_site_form\"] = AddSiteForm()\n context[\"edit_site_form\"] = EditSiteForm()\n context[\"profile\"] = profile\n\n if siteCount == 0:\n return render(request, 'site-editor/no-site.html', context)\n else:\n context[\"sites\"] = sites\n return render(request, 'site-editor/site-menu.html', context)\n\n\n@login_required\ndef siteEditor(request, siteName):\n context = {}\n profile = Profile.objects.get(user=request.user)\n site = Site.objects.get(owner=profile, name=siteName)\n pages = profile.getAllPages(siteName)\n context['add_page_form'] = AddPageForm()\n context['add_media_form'] = AddMediaForm()\n context['add_site_form'] = AddSiteForm()\n\n context['username'] = request.user.username\n context['profile'] = profile\n context['site'] = site\n context['pages'] = pages\n context['sites'] = Site.objects.filter(owner=profile)\n return render(request, 'site-editor/site-editor.html', context)\n\n\n# requires GET request to \"/sites/(?P<siteName>\\w+)/editor/getPageNames/\"\n@login_required\ndef getPageNames(request, siteName):\n try:\n site = Site.getSite(request.user.username, siteName)\n except ObjectDoesNotExist:\n return HttpResponseBadRequest()\n pages = Page.objects.filter(site=site)\n context = {'site': site, 'pages': pages}\n return render(request, 'json/pages.json', context,\n content_type='application/json')\n\n\n# requires GET request\n@login_required\ndef getPageHTML(request, siteName, pageName):\n if request.method != 'GET':\n return Json405('GET')\n\n site = Site.getSite(request.user.username, siteName)\n page = site.getPage(pageName)\n return JsonResponse({'nav_html': page.getNavHTML(),\n 'content_html': page.content_html})\n\n\n# requires POST request with the following argument:\n# { 'isOpen': <whether page is opened>,\n# 'isActive': <whether page is active (focused on editor)> }\n@login_required\ndef changePageStatus(request, siteName, pageName):\n if request.method != 'POST':\n return Json405('POST')\n\n try:\n site = Site.getSite(request.user.username, siteName)\n page = site.getPage(pageName)\n except ObjectDoesNotExist:\n return Json400()\n\n # change open and active field\n isOpened = False\n isActive = False\n if 'isOpened' in request.POST:\n isOpened = (request.POST['isOpened'] == 'true')\n if 'isActive' in request.POST:\n isActive = (request.POST['isActive'] == 'true')\n\n allPages = Page.objects.filter(site=site)\n page.opened = isOpened\n # if page turned active, change other page to false\n if isActive:\n for otherPage in allPages:\n if otherPage.active:\n otherPage.active = False\n otherPage.save()\n page.active = True\n # if page turned not active\n else:\n page.active = False\n page.save()\n\n # check that there is only one/zero active tab, depending on whether\n # there is opened tab(s)\n if (allPages.filter(opened=True).count() == 0):\n assert(allPages.filter(active=True).count() == 0)\n return JsonResponse({'success': True})\n\n\n@login_required\ndef addSite(request):\n if request.method != 'POST':\n return HttpResponseNotAllowed('POST')\n\n form = AddSiteForm(request.POST)\n profile = Profile.objects.get(user=request.user)\n\n if not form.is_valid(request.user):\n return JsonErrorResponse(400, form.errors)\n\n siteName = request.POST['siteName']\n description = request.POST['description']\n profile = Profile.objects.get(user=request.user)\n new_site = profile.createSite(siteName, description)\n new_site.save()\n return HttpResponseRedirect(reverse('siteEditor',\n kwargs={'siteName': siteName}))\n\n\n@login_required\ndef deleteSite(request):\n if request.method != 'POST':\n return Json405('POST')\n if ('siteName' not in request.POST) or (request.POST['siteName'] == \"\"):\n print(\"siteName not in request.POST\")\n return Json400()\n profile = Profile.objects.get(user=request.user)\n siteName = request.POST['siteName']\n try:\n Site.objects.get(owner=profile, name=siteName).delete()\n except ObjectDoesNotExist:\n print(\"Site name %s does not exist\" % siteName)\n return Json400()\n count = Site.objects.filter(owner=profile).count()\n return JsonResponse({'success': True, 'count': count})\n\n\n@login_required\ndef editSite(request, siteName):\n profile = Profile.objects.get(user=request.user)\n site = Site.objects.get(name=siteName, owner=profile)\n\n if request.method != 'POST':\n return JsonResponse({'siteName': site.name,\n 'description': site.description})\n form = EditSiteForm(request.POST)\n if not form.is_valid(request.user):\n return JsonErrorResponse(400, form.errors)\n\n newName = form.cleaned_data['siteName']\n description = form.cleaned_data['description']\n\n site.name = newName\n site.description = description\n site.save()\n\n return JsonResponse({'success': True})\n\n\n# requires POST request with the following argument:\n# { 'pageName': <name of the page created>\n# 'html': <html of new page; if empty, uses default template>}\n# returns json response of newly added page\n@login_required\ndef addPage(request, siteName):\n if request.method != 'POST':\n return Json405('POST')\n\n form = AddPageForm(request.POST)\n # Validates the form.\n if not form.is_valid(request.user, siteName):\n print(\"form is not valid: %s\", form.errors)\n return JsonErrorResponse(400, form.errors)\n\n site = Site.getSite(request.user.username, siteName)\n pageName = form.cleaned_data['pageName']\n\n new_page = site.createPage(pageName)\n if ('copyPageName' in request.POST and request.POST['copyPageName'] != \"\"):\n copyPageName = request.POST['copyPageName']\n try:\n copyPage = site.getPage(copyPageName)\n except ObjectDoesNotExist:\n HttpResponseBadRequest()\n\n new_page.content_html = copyPage.content_html\n\n new_page.save()\n\n context = {'site': site, 'pages': [new_page]}\n return render(request, 'json/pages.json', context,\n content_type='application/json')\n\n\n@login_required\ndef deletePage(request, siteName):\n if request.method != 'POST':\n return Json405('POST')\n\n if ('pageName' not in request.POST) or (request.POST['pageName'] == \"\"):\n return Json400()\n\n pageName = request.POST['pageName']\n\n try:\n site = Site.getSite(request.user.username, siteName)\n page = Page.objects.get(name=pageName, site=site)\n except ObjectDoesNotExist:\n return Json400()\n\n page.delete()\n return JsonResponse({'success': True})\n\n\n# requires POST request with the following argument:\n# { 'pageNames': <names of the pages saving>,\n# 'htmls': <htmls of the new pages, in the correct index as above> }\n@login_required\ndef savePages(request, siteName):\n def processSavePage(html):\n soup = BeautifulSoup(html, 'html.parser')\n for e in soup.find_all():\n del e['data-medium-editor-element']\n\n return str(soup)\n\n if request.method != 'POST':\n return Json405(\"POST\")\n\n if ('pageNames[]' not in request.POST) or (request.POST['pageNames[]'] == \"\"):\n print('No pageNames[] argument in POST request')\n return Json400()\n if ('htmls[]' not in request.POST) or (request.POST['htmls[]'] == \"\"):\n print('No htmls[] argument in POST request')\n return Json400()\n\n pageNames = request.POST.getlist('pageNames[]')\n htmls = request.POST.getlist('htmls[]')\n\n if (len(pageNames) != len(htmls)):\n print('pageName and htmls does not have same length')\n return Json400()\n try:\n site = Site.getSite(request.user.username, siteName)\n except ObjectDoesNotExist:\n print(\"Site %s does not exist\" % siteName)\n return Json400()\n\n # retrieve all pages first. This is for ensuring it wouldn't raise an error\n # in the middle of saving some pages\n pages = []\n for pageName in pageNames:\n try:\n pages.append(Page.objects.get(name=pageName, site=site))\n except ObjectDoesNotExist:\n print(\"Page %s does not exists in %s\" % (pageName, siteName))\n return Json400()\n\n for i in range(len(pageNames)):\n pages[i].content_html = processSavePage(htmls[i])\n pages[i].save()\n\n return JsonResponse({'success': True})\n\n\n@login_required\ndef updateNav(request, siteName):\n if request.method != 'POST':\n return Json405(\"POST\")\n\n if ('nav_html' not in request.POST) or (request.POST['nav_html'] == \"\"):\n print('No nav_html argument in POST request')\n return Json400()\n\n nav_html = request.POST['nav_html']\n\n try:\n site = Site.getSite(request.user.username, siteName)\n except ObjectDoesNotExist:\n print(\"Site %s does not exist\" % siteName)\n return Json400()\n\n site.nav_html = nav_html\n site.save()\n\n return JsonResponse({'success': True})\n\n\n# requires POST request with the following argument:\n# { 'pageNames': <list of name of pages to be published> }\n# if the `pages` argument is empty, it publishes all pages\n@login_required\ndef sitePublish(request, siteName):\n profile = Profile.objects.get(user=request.user)\n if ('pageNames[]' not in request.POST) or (request.POST['pageNames[]'] == \"\"):\n print(\"Pages are not specified. Publishing all pages\")\n return Json400\n\n pageNames = request.POST.getlist('pageNames[]')\n pages = []\n for pageName in pageNames:\n pages.append(profile.getPage(siteName, pageName))\n\n for page in pages:\n page.published_html = processPage(request.user, siteName, page)\n page.save()\n\n return JsonResponse({'success': True})\n\n\n# process page for publishing & previewing\ndef processPage(user, siteName, page):\n profile = Profile.objects.get(user=user)\n allPageNames = profile.getAllPages(siteName).values_list('name', flat=True)\n\n def filterEditable(elem):\n try:\n return elem['contenteditable'] == 'true'\n except KeyError:\n return False\n\n # routed the relative link in nav to other pages\n soup = BeautifulSoup(page.site.nav_html, 'html.parser')\n for a in soup.find_all('a'):\n try:\n if a[\"href\"] in allPageNames:\n a[\"href\"] = \"../\" + a[\"href\"] + \"/\"\n # if a tag doesn't contain href\n except KeyError:\n pass\n\n processed_nav_html = str(soup)\n\n # process content_html to have no edtiable material\n soup = BeautifulSoup(page.content_html, 'html.parser')\n for div in soup.find_all('div', class_='empty-workspace-msg'):\n div.decompose()\n for div in soup.find_all('div', class_='delete-ud-wrapper'):\n div.decompose()\n for div in soup.find_all(filterEditable):\n div['contenteditable'] = 'false'\n\n remove_classnames = ['ud']\n for name in remove_classnames:\n for ud in soup.find_all('', class_=name):\n if ud.get('id') == 'ud-focus':\n del ud['id']\n ud['class'].remove(name)\n\n processed_content_html = str(soup)\n\n t = get_template('test_pages/wrapper.html')\n context = {'siteName': page.site.name,\n 'pageName': page.name,\n 'processed_nav_html': processed_nav_html,\n 'processed_content_html': processed_content_html}\n wrapper_html = t.render(context=context)\n\n return wrapper_html\n\n\ndef getAllSites(request):\n if request.method == \"GET\":\n profile = Profile.objects.get(user=request.user)\n sites = Site.objects.filter(owner=profile)\n context = {\"username\": profile.user.username, \"sites\": sites}\n return render(request, 'json/sites.json', context,\n content_type='application/json')\n\n return HttpResponseNotAllowed('GET')\n","sub_path":"src/easel/views/views_sites.py","file_name":"views_sites.py","file_ext":"py","file_size_in_byte":12800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378891181","text":"# -*- coding: utf-8 -*-\r\nfrom linepy import *\r\n\r\nclient = LineClient()\r\n#client = LineClient(authToken='AUTHTOKEN')\r\n\r\nclient.log(\"Auth Token : \" + str(client.authToken))\r\n\r\npoll = LinePoll(client)\r\n\r\n# Receive messages from LinePoll\r\ndef RECEIVE_MESSAGE(op):\r\n msg = op.message\r\n\r\n text = msg.text\r\n msg_id = msg.id\r\n receiver = msg.to\r\n sender = msg._from\r\n \r\n # Check content only text message\r\n if msg.contentType == 0:\r\n # Check only group chat\r\n if msg.toType == 2:\r\n # Get sender contact\r\n contact = client.getContact(sender)\r\n txt = '[%s] %s' % (contact.displayName, text)\r\n # Send a message\r\n client.sendMessage(receiver, txt)\r\n # Print log\r\n client.log(txt)\r\n\r\n# Add function to LinePoll\r\npoll.addOpInterruptWithDict({\r\n OpType.RECEIVE_MESSAGE: RECEIVE_MESSAGE\r\n})\r\n\r\nwhile True:\r\n poll.trace()\r\n","sub_path":"examples/echobot.py","file_name":"echobot.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150881141","text":"import FWCore.ParameterSet.Config as cms\n\negammaElectronTkRelIsolation = cms.EDProducer(\"EgammaElectronTkIsolationProducer\",\n absolut = cms.bool(False),\n trackProducer = cms.InputTag(\"generalTracks\"),\n intRadius = cms.double(0.02),\n electronProducer = cms.InputTag(\"pixelMatchGsfElectrons\"),\n extRadius = cms.double(0.2),\n ptMin = cms.double(1.5),\n maxVtxDist = cms.double(0.1)\n)\n\n\n","sub_path":"EgammaAnalysis/EgammaIsolationProducers/python/egammaElectronTkRelIsolation_cfi.py","file_name":"egammaElectronTkRelIsolation_cfi.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"650477993","text":"import pandas as pd\r\nfrom scipy.sparse import csr_matrix\r\n\r\ndef prefilter_items(data_train, item_feat, take_n_popular=5000):\r\n \r\n weeks_in_year = 52\r\n \r\n # Уберем товары, которые не продавались за последние 12 месяцев\r\n data_train_less_n_month = data_train.loc[data_train['week_no'] >= data_train['week_no'].max() - weeks_in_year]\r\n \r\n popularity = data_train_less_n_month.groupby('item_id')[['quantity', 'sales_value']].sum().reset_index()\r\n popularity.rename(columns={'quantity': 'n_sold', 'sales_value': 'price'}, inplace=True)\r\n popularity = popularity.loc[(popularity.n_sold > 0) & (popularity.price > 0)]\r\n popularity['new_sales_value'] = popularity.price / popularity.n_sold\r\n \r\n popularity = popularity.sort_values('new_sales_value', ascending=False).reset_index()\r\n \r\n # Уберем слишком дорогие товар\r\n \r\n popularity = popularity.loc[3::]\r\n\r\n # Уберем слишком дешевые товары (на них не заработаем). 1 покупка из рассылок стоит 60 руб.\r\n \r\n popularity = popularity.loc[popularity['new_sales_value'] > 1] \r\n \r\n # Уберем самые непопулряные\r\n popularity = popularity.loc[popularity['n_sold'] > 50]\r\n \r\n # Уберем самые популярные (топ-3)\r\n \r\n popularity = popularity.sort_values('n_sold', ascending=False).reset_index()\r\n popularity = popularity.loc[3::]\r\n \r\n # Уберем не интересные для рекоммендаций категории (department)\r\n small_count_departments = ['VIDEO RENTAL', 'AUTOMOTIVE', 'HOUSEWARES', \r\n 'PORK', 'POSTAL CENTER', 'GM MERCH EXP', 'CNTRL/STORE SUP', \r\n 'PROD-WHS SALES', 'DAIRY DELI', 'HBC', 'CHARITABLE', 'RX', 'TOYS',\r\n 'PHOTO', 'DELI/SNACK BAR', 'GRO BAKERY', 'PHARMACY SUPPLY', \r\n 'ELECT &PLUMBING', 'MEAT-WHSE', 'VIDEO']\r\n \r\n \r\n item_feat_ids = item_feat.loc[~item_feat['department'].isin(small_count_departments), 'item_id']\r\n \r\n popularity = popularity.loc[popularity.item_id.isin(item_feat_ids), 'item_id'].tolist()\r\n \r\n top_items = popularity[:take_n_popular]\r\n \r\n # Добавим, чтобы не потерять юзеров\r\n data_train.loc[~data_train['item_id'].isin(top_items), 'item_id'] = 999999 \r\n\r\n return data_train\r\n\r\n\r\ndef postfilter_items():\r\n pass\r\n\r\n\r\ndef train_test_split_by_week(df, name_of_week_column, test_size_weeks):\r\n\r\n week_ratio = df[name_of_week_column].max() - test_size_weeks\r\n\r\n df_train = df[df[name_of_week_column] < week_ratio]\r\n df_test = df[df[name_of_week_column] >= week_ratio]\r\n\r\n return df_train, df_test\r\n\r\n\r\ndef group_df_by(df, grouped_feat, second_feat, new_name_of_second_feat, pick_method):\r\n\r\n if pick_method == 'unique':\r\n grouped_df = df.groupby(grouped_feat)[second_feat].unique().reset_index()\r\n grouped_df.columns = [grouped_feat, new_name_of_second_feat]\r\n\r\n elif pick_method == 'sum':\r\n grouped_df = df.groupby(grouped_feat)[second_feat].sum().reset_index()\r\n grouped_df.columns = [grouped_feat, new_name_of_second_feat]\r\n\r\n return grouped_df\r\n\r\n\r\ndef df_to_user_item_matrix(df):\r\n\r\n user_item_matrix = pd.pivot_table(df,\r\n index='user_id',\r\n columns='item_id',\r\n values='quantity',\r\n aggfunc='count',\r\n fill_value=0)\r\n\r\n user_item_matrix = user_item_matrix.astype(float)\r\n\r\n sparce_user_item = csr_matrix(user_item_matrix).tocsr()\r\n\r\n sparce_t_user_item = csr_matrix(user_item_matrix).T.tocsr()\r\n\r\n return user_item_matrix, sparce_user_item, sparce_t_user_item\r\n\r\ndef group_df_train_and_test(train, test, groupby_feat, second_feat, rename_column, aggfunc):\r\n \r\n assert aggfunc in ['unique', 'sum'], 'aggfunc принимает значения unique, sum'\r\n \r\n train_df = group_df_by(train, groupby_feat, second_feat, rename_column, aggfunc)\r\n test_df = group_df_by(test, groupby_feat, second_feat, rename_column, aggfunc)\r\n \r\n return train_df, test_df\r\n\r\ndef change_ids(matrix):\r\n user_id = matrix.index.values\r\n item_id = matrix.columns.values\r\n\r\n matrix_user_id = np.arange(len(user_id))\r\n matrix_item_id = np.arange(len(item_id))\r\n\r\n id_to_user_id = dict(zip(matrix_user_id, user_id))\r\n id_to_item_id = dict(zip(matrix_item_id, item_id))\r\n\r\n user_id_to_id = dict(zip(user_id, matrix_user_id))\r\n item_id_to_id = dict(zip(item_id, matrix_item_id))\r\n\r\n return id_to_user_id, id_to_item_id, item_id_to_id, user_id_to_id\r\n\r\n\r\ndef get_recommendation(user, model, sparce_user_item, id_to_item_id, user_id_to_id, N=5):\r\n\r\n res = [id_to_item_id[rec[0]] for rec in model.recommend(userid=user_id_to_id[user],\r\n user_items=sparce_user_item,\r\n N=N,\r\n filter_already_liked_items=False,\r\n filter_items=None,\r\n recalculate_user=True)]\r\n\r\n return res\r\n\r\n\r\n\r\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"299401693","text":"#_*_ coding:utf-8 _*_\n#@Time :2020-11-1517:58\n#@Author :lemon_suhang\n#@Email :1147967632@qq.com\n#@File :1到100累加和.py\n#@Software:PyCharm\n\n\n\n\nn= 1\nsum = 0\n\nwhile n<=100:\n sum = sum + n\n\n n +=1\nprint(\"sum\",sum)","sub_path":"第一天/1到100累加和.py","file_name":"1到100累加和.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"367339572","text":"# Definition for a binary tree node.\nimport pdb\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n return self.valid(root, float('-inf'), float('inf'))\n \n def valid(self, root, min, max):\n #print('begin root,min,max: ', root.val, min, max)\n if root is None:\n print('empty')\n else:\n print('begin root,min,max: ', root.val, min, max)\n pdb.set_trace()\n if root is None:\n return True\n if root.val <= min or root.val >= max:\n return False \n print('root,min,max: ', root.val, min, max)\n return self.valid(root.left, min, root.val) and self.valid(root.right, root.val, max)\n\n\nnode5 = TreeNode(5)\nnode1 = TreeNode(1)\nnode4 = TreeNode(6)\nnode3 = TreeNode(3)\nnode6 = TreeNode(7)\n\nnode5.left = node1\nnode5.right = node4\nnode4.left = node3\nnode4.right = node6\n\nprint(Solution().isValidBST(node5))\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n # 递归法,关键在于设置好边界 recursive\n # 对于left子树,左节点需要确定上界,右节点需要确定上界和下界\n # 对于right子树,左节点需要确定上界和下界,右节点需要确定下界\n '''\n def compute_BST(root, max_=float('inf'),min_=float('-inf')):\n if not root:\n return True\n cur = root.val\n if cur >= max_ or cur <= min_:\n return False\n if not compute_BST(root.left,cur,min_):\n return False\n if not compute_BST(root.right,max_,cur):\n return False\n return True\n return compute_BST(root)\n '''\n\n # 迭代法 iteration\n # dfs 或者 bfs,用dfs,pop(0)\n # 会牺牲空间获取时间\n # 主要是创建一个可迭代的对象,例如列表,队列,预存一组数据,每次循环判断序列是否为空,不为空则判断条件,添加数据进去,知道队列为空。迭代算法也需要依赖前面的值求后面的值,像max_和min_的动态变化\n if not root:\n return True\n result = [(root, float('inf'), float('-inf'))]\n while result:\n # 每次处理一个节点\n root, max_, min_ = result.pop()\n if not root:\n continue\n val = root.val\n if val >= max_ or val <= min_:\n return False\n result.append((root.left, val, min_))\n result.append((root.right, max_, val))\n # 如果都满足,则 result 为空,返回 True\n return True\n\n # 中序遍历:左-中-右,按照从小到大的顺序排列,所以中序遍历将值存入列表之后判断是否是从小到大的顺序排列\n if not root:\n return True\n result = []\n\n def mid_traversal(root):\n if root.left:\n mid_traversal(root.left)\n result.append(root.val)\n if root.right:\n mid_traversal(root.right)\n\n mid_traversal(root)\n\n if result == sorted(result):\n return True\n else:\n return False","sub_path":"98_Validate_Binary_Search_Tree.py","file_name":"98_Validate_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502105162","text":"import logging\nlogger = logging.getLogger(__name__)\nimport sys\nfrom io import StringIO\nfrom typing import List\n\nfrom production.model import Model\nfrom production.commands import *\nfrom production.basics import Pos, Diff\nfrom production.solver_interface import ProblemType, Solver, SolverResult, Fail\n\nfrom production.data_files import *\nfrom production.pyjs_emulator.run import run\n\nfrom production.deconstruct.lib import clear_all\nfrom production.group_programs import move_x, move_y, move_z, single\nfrom production.solver_utils import bounding_box\n\ndef cubical(model, high=False):\n (pos1, pos2) = bounding_box(model)\n\n width = pos2.x - pos1.x + 1\n height = pos2.y - pos1.y + 1\n depth = pos2.z - pos1.z + 1\n\n (x_cur, x_next) = (0, pos1.x)\n (y_cur, y_next) = (0, pos2.y + 1)\n (z_cur, z_next) = (0, pos1.z)\n\n prog = move_x(pos1.x)\n prog += move_z(pos1.z - 1)\n\n if high:\n prog += single(Flip())\n\n prog += clear_all(model, pos1.x, 0, pos1.z - 1, width, height, depth)\n\n if high:\n prog += single(Flip())\n\n prog += move_x(-pos1.x)\n prog += move_z(-pos1.z + 1)\n\n return prog + single(Halt())\n\n\nclass CubicalDeconstructor(Solver):\n def __init__(self, args):\n self.high = len(args) > 0 and args[0] == 'high'\n\n def scent(self) -> str:\n return 'Cubical 0.1.2' + (' (high)' if self.high else '')\n\n def supports(self, problem_type: ProblemType) -> bool:\n return problem_type == ProblemType.Disassemble\n\n def solve(\n self, name: str,\n src_model: Optional[bytes],\n tgt_model: Optional[bytes]) -> SolverResult:\n assert tgt_model is None\n m = Model.parse(src_model)\n trace = cubical(m, high=self.high)\n trace_data = compose_commands(trace)\n return SolverResult(trace_data, extra={})\n\n\ndef write_solution(bytetrace, number): # -> IO ()\n with open('./FD{0:03d}.nbt'.format(number), 'wb') as f:\n f.write(bytetrace)\n\n\ndef deconstruction_by_id(id):\n (src, tgt) = full_problem('FD{0:03d}'.format(id))\n return src\n\ndef main():\n task_number = int(sys.argv[1]) if len(sys.argv) > 1 else 1\n mbytes = deconstruction_by_id(task_number)\n\n solver = CubicalDeconstructor([])\n res = solver.solve('main', mbytes, None)\n\n write_solution(res.trace_data, task_number)\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG, format='%(levelname).1s %(asctime)s %(module)10.10s:%(lineno)-4d %(message)s')\n main()\n\n","sub_path":"production/deconstruct/cubical.py","file_name":"cubical.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"64237276","text":"import os\nimport atexit\nfrom typing import Optional, Dict, List\n\nfrom aim.engine.repo import AimRepo\nfrom aim.artifacts.artifact_writer import ArtifactWriter\nfrom aim.sdk.session.utils import exception_resistant\nfrom aim.artifacts import *\nfrom aim.engine.configs import (\n AIM_BRANCH_ENV_VAR,\n AIM_COMMIT_ENV_VAR,\n AIM_AUTOMATED_EXEC_ENV_VAR,\n AIM_DEFAULT_BRANCH_NAME,\n AIM_MAP_METRICS_KEYWORD,\n)\n\n\nclass Session:\n sessions = {} # type: Dict[str, List['Session']] = {}\n\n @exception_resistant\n def __init__(self, repo: Optional[str] = None,\n experiment: Optional[str] = None):\n self.active = False\n\n self.repo = self.get_repo(repo, experiment)\n\n Session.sessions.setdefault(self.repo.path, [])\n Session.sessions[self.repo.path].append(self)\n\n # Start a new run\n self.repo.commit_init()\n self.metrics = {}\n\n self.active = True\n self._run_hash = self.repo.active_commit\n self._repo_path = self.repo.path\n\n # Finalize run\n atexit.register(self.close)\n\n @exception_resistant\n def __del__(self):\n self.close()\n\n @property\n def run_hash(self):\n return self._run_hash\n\n @property\n def repo_path(self):\n return self._repo_path\n\n @exception_resistant\n def close(self):\n if self.active:\n self.active = False\n # Set metrics\n self.set_params(self.metrics, name=AIM_MAP_METRICS_KEYWORD)\n\n self.repo.close_records_storage()\n self.repo.commit_finish()\n\n if self.repo.path in Session.sessions \\\n and self in Session.sessions[self.repo.path]:\n Session.sessions[self.repo.path].remove(self)\n if len(Session.sessions[self.repo.path]) == 0:\n del Session.sessions[self.repo.path]\n\n @exception_resistant\n def track(self, *args, **kwargs):\n if self.repo is None:\n raise FileNotFoundError('Aim repository was not found')\n\n artifact_name = None\n\n if not len(args):\n raise TypeError('artifact name is not specified')\n\n if isinstance(args[0], str):\n artifact_name = args[0]\n elif isinstance(args[0], int) or isinstance(args[0], float):\n # Autodetect Metric artifact\n artifact_name = metric\n kwargs['value'] = args[0]\n args = []\n elif isinstance(args[0], dict):\n # Autodetect Dictionary(Map) artifact\n artifact_name = dictionary\n kwargs['value'] = args[0]\n args = []\n\n if artifact_name is None:\n raise TypeError('artifact name is not specified')\n\n if artifact_name not in globals():\n raise TypeError('Aim cannot track: \\'{}\\''.format(artifact_name))\n\n # Get corresponding class\n obj = globals()[artifact_name]\n\n # Create an instance\n inst = obj(*args, **kwargs, aim_session_id=id(self))\n\n # Collect metrics values\n if isinstance(inst, Metric):\n self.metrics.setdefault(inst.name, [])\n for metric_item in self.metrics[inst.name]:\n if metric_item['context'] == inst.hashable_context:\n if inst.value < metric_item['values']['min']:\n metric_item['values']['min'] = inst.value\n if inst.value > metric_item['values']['max']:\n metric_item['values']['max'] = inst.value\n metric_item['values']['last'] = inst.value\n break\n else:\n self.metrics[inst.name].append({\n 'context': inst.hashable_context,\n 'values': {\n 'min': inst.value,\n 'max': inst.value,\n 'last': inst.value,\n },\n })\n\n writer = ArtifactWriter()\n writer.save(self.repo, inst)\n\n return inst\n\n @exception_resistant\n def set_params(self, params: dict, name: Optional[str] = None):\n if name is None:\n name = AIM_NESTED_MAP_DEFAULT\n return self.track(params, namespace=name)\n\n @staticmethod\n @exception_resistant\n def get_repo(path: Optional[str] = None,\n experiment_name: Optional[str] = None) -> AimRepo:\n # Auto commit\n if os.getenv(AIM_AUTOMATED_EXEC_ENV_VAR):\n # Get Aim environment variables\n branch_name = os.getenv(AIM_BRANCH_ENV_VAR)\n commit_hash = os.getenv(AIM_COMMIT_ENV_VAR)\n else:\n commit_hash = AimRepo.generate_commit_hash()\n if experiment_name is not None:\n branch_name = experiment_name\n else:\n # FIXME: Get active experiment name from given repo\n # if path is specified. Currently active experiment name of\n # the highest repo in the hierarchy will be returned.\n branch_name = AimRepo.get_active_branch_if_exists() \\\n or AIM_DEFAULT_BRANCH_NAME\n\n if path is not None:\n repo = AimRepo(path)\n if not repo.exists():\n if not repo.init():\n raise ValueError('can not create repo `{}`'.format(path))\n repo = AimRepo(path, branch_name, commit_hash)\n else:\n if AimRepo.get_working_repo() is None:\n path = os.getcwd()\n repo = AimRepo(path)\n if not repo.init():\n raise ValueError('can not create repo `{}`'.format(path))\n repo = AimRepo(path, branch_name, commit_hash)\n else:\n repo = AimRepo.get_working_repo(branch_name, commit_hash)\n\n return repo\n\n\nDefaultSession = Session\n","sub_path":"aim/sdk/session/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"345734149","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.2 (3180)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ally/core/http/impl/processor/fetcher.py\n# Compiled at: 2013-10-02 09:54:40\n\"\"\"\nCreated on Jul 31, 2012\n\n@package: ally core http\n@copyright: 2012 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the standard headers handling.\n\"\"\"\nfrom ally.api.operator.type import TypeModelProperty, TypeModel\nfrom ally.api.type import Input, typeFor, TypeClass, Type\nfrom ally.container.ioc import injected\nfrom ally.core.http.spec.transform.support_model import DataModel, IFetcher\nfrom ally.core.spec.resources import Path, Node, Invoker, INodeInvokerListener\nfrom ally.design.processor.attribute import requires\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.handler import HandlerProcessorProceed\nfrom weakref import WeakKeyDictionary\nimport logging\nlog = logging.getLogger(__name__)\n\nclass Request(Context):\n \"\"\"\n The request context.\n \"\"\"\n path = requires(Path)\n invoker = requires(Invoker)\n arguments = requires(dict)\n\n\nclass Response(Context):\n \"\"\"\n The response context.\n \"\"\"\n encoderData = requires(dict)\n encoderDataModel = requires(DataModel)\n isSuccess = requires(bool)\n\n\n@injected\nclass FetcherHandler(HandlerProcessorProceed, INodeInvokerListener):\n \"\"\"\n Implementation for a handler that provides the fetcher used in getting the filtered models.\n \"\"\"\n typeResponse = TypeClass(Response)\n\n def __init__(self):\n \"\"\"\n Construct the encoder.\n \"\"\"\n assert isinstance(self.typeResponse, Type), 'Invalid type response %s' % self.typeResponse\n super().__init__()\n self._cache = WeakKeyDictionary()\n\n def process(self, request: Request, response: Response, **keyargs):\n \"\"\"\n @see: HandlerProcessorProceed.process\n \n Provide the fetcher.\n \"\"\"\n assert isinstance(request, Request), 'Invalid request %s' % request\n assert isinstance(response, Response), 'Invalid response %s' % response\n if response.isSuccess is False:\n return\n else:\n if response.encoderDataModel is None:\n return\n invokerMain = request.invoker\n assert isinstance(invokerMain, Invoker), 'Invalid invoker %s' % invokerMain\n assert isinstance(response.encoderData, dict), 'Invalid encoder data %s' % response.encoderData\n fetch = self.extractFetch(response.encoderDataModel)\n if fetch:\n references, pack = set(fetch), self._cache.get(invokerMain)\n if pack:\n fetcher, fetcherReferences = pack\n if not (references == fetcherReferences or references.issubset(fetcherReferences)):\n fetcher = None\n references.update(fetcherReferences)\n else:\n fetcher = None\n if fetcher is None:\n assert isinstance(request.path, Path), 'Invalid request path %s' % request.path\n node = request.path.node\n assert isinstance(node, Node), 'Invalid path node %s' % node\n node = node.root\n node.addStructureListener(self)\n fetcher = FetcherInvoker(invokerMain)\n self._cache[invokerMain] = (fetcher, references)\n for reference, invoker in fetch.items():\n assert isinstance(invoker, Invoker)\n modelType, indexes = typeFor(reference), []\n if isinstance(modelType, TypeModelProperty):\n assert isinstance(modelType, TypeModelProperty)\n modelType = modelType.type\n assert isinstance(modelType, TypeModel), 'Invalid mode type %s' % modelType\n for inp in invoker.inputs:\n assert isinstance(inp, Input)\n if inp.hasDefault:\n indexes.append(fetcher.addInput(inp))\n else:\n if isinstance(inp.type, TypeModelProperty):\n assert isinstance(inp.type, TypeModelProperty)\n if inp.type.parent == modelType:\n indexes.append(None)\n continue\n for k, inpm in enumerate(invokerMain.inputs):\n assert isinstance(inpm, Input)\n if inp.type == inpm.type:\n indexes.append(k)\n break\n else:\n log.warning('Cannot locate any input main invoker %s input for invoker %s and input %s', invokerMain, invoker, inp)\n break\n\n else:\n fetcher.addFetch(reference, invoker, indexes)\n\n fetcher.inputs.append(Input('$response', self.typeResponse, True, None))\n request.invoker = fetcher\n if request.arguments is None:\n request.arguments = {}\n request.arguments['$response'] = response\n return\n\n def extractFetch(self, data, fetch=None):\n \"\"\"\n Extracts from the data model all the required fetch model values.\n \n @return: dictionary{Reference:Invoker}\n A dictionary containing the reference of the model and as a value the invoker that delivers the model for\n the reference.\n \"\"\"\n assert isinstance(data, DataModel), 'Invalid data model %s' % data\n if fetch is None:\n fetch = {}\n assert isinstance(fetch, dict), 'Invalid fetch %s' % fetch\n if data.fetchReference and data.path:\n assert isinstance(data.path, Path), 'Invalid data path %s' % data.path\n assert isinstance(data.path.node, Node), 'Invalid data path node %s' % data.path.node\n invoker = data.path.node.get\n if invoker:\n fetch[data.fetchReference] = invoker\n if data.fetchData:\n self.extractFetch(data.fetchData, fetch)\n elif DataModel.datas in data:\n for cdata in data.datas.values():\n self.extractFetch(cdata, fetch)\n\n return fetch\n\n def onInvokerChange(self, node, old, new):\n \"\"\"\n @see: INodeInvokerListener.onInvokerChange\n \"\"\"\n self._cache.clear()\n\n\nclass FetcherInvoker(Invoker):\n \"\"\"\n Invoker that provides the model fetching.\n \"\"\"\n __slots__ = ('invoker', 'references', 'invokers')\n\n def __init__(self, invoker):\n \"\"\"\n Construct the fetcher.\n \"\"\"\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n Invoker.__init__(self, invoker.name, invoker.method, invoker.output, list(invoker.inputs), invoker.hints, invoker.infoIMPL, invoker.infoAPI)\n self.invoker = invoker\n self.references = {}\n self.invokers = []\n\n def addInput(self, inp):\n \"\"\"\n Add a new optional input to the invoker inputs.\n \n @param inp: Input\n The input to be added.\n @return: integer\n The index of the input.\n \"\"\"\n assert isinstance(inp, Input), 'Invalid input %s' % inp\n assert inp.hasDefault, 'Input is not optional %s' % inp\n self.inputs.append(Input('%s.%s' % (inp.name, len(self.invokers)), inp.type, True, inp.default))\n return len(self.inputs) - 1\n\n def addFetch(self, reference, invoker, indexes):\n \"\"\"\n Add a new reference entry in the fetcher.\n \n @param reference: Reference\n The reference for fetching.\n @param invoker: Invoker\n The invoker associated with the reference.\n @param indexes: list[integer]\n The indexes in the invoker arguments to be used for the invoker at fetching, basically all the indexes of\n the arguments (beside of the model id one which is None in the indexes) to be used for call the invoker.\n \"\"\"\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n assert isinstance(indexes, list), 'Invalid indexes list %s' % indexes\n self.references[reference] = len(self.invokers)\n self.invokers.append((invoker, indexes))\n\n def invoke(self, *args):\n \"\"\"\n @see: Invoker.invoke\n \"\"\"\n response = args[(-1)]\n assert isinstance(response, Response), 'Invalid response %s' % response\n response.encoderData.update(fetcher=Fetcher(self, args))\n return self.invoker.invoke(*args[:len(self.invoker.inputs)])\n\n\nclass Fetcher(IFetcher):\n \"\"\"\n The fetcher implementation.\n \"\"\"\n __slots__ = ('fetcher', 'args', '_cache')\n\n def __init__(self, fetcher, args):\n \"\"\"\n Construct the fetcher.\n \"\"\"\n assert isinstance(fetcher, FetcherInvoker), 'Invalid fetcher invoker %s' % fetcher\n assert isinstance(args, (tuple, list)), 'Invalid arguments %s' % args\n self.fetcher = fetcher\n self.args = args\n self._cache = {}\n\n def fetch(self, reference, valueId):\n \"\"\"\n @see: IFetcher.fetch\n \"\"\"\n value, values = self, self._cache.get(reference)\n if values is None:\n values = self._cache[reference] = {}\n else:\n value = values.get(valueId, value)\n if value is self:\n fetcher = self.fetcher\n assert isinstance(fetcher, FetcherInvoker)\n index = fetcher.references.get(reference)\n if index is None:\n value = None\n else:\n invoker, indexes = fetcher.invokers[index]\n assert isinstance(invoker, Invoker)\n value = invoker.invoke(*((valueId if k is None else self.args[k]) for k in indexes))\n values[valueId] = value\n return value","sub_path":"pycfiles/ally_py-0.9.0-py3.2/fetcher.cpython-32.py","file_name":"fetcher.cpython-32.py","file_ext":"py","file_size_in_byte":10360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638960381","text":"\"\"\"\n\nScript validating if the experiment meets simple straightforward conditions like:\n * task started after ended\n * computation part started before task inputs were downloaded\n * etc\n\nExpected input is the same as output format of script prepare_data_for_gantt.py\n\n\"\"\"\n\nimport sys\n\n\ndef main():\n filename = sys.argv[1]\n infile = open(filename, \"r\")\n\n for line in infile:\n task = parse(line)\n validate(task)\n\n infile.close()\n\n\ndef validate(task):\n if task.task_start == -1.0:\n print(\"INVALID INPUT: task {} hasn't started at all\".format(task.id))\n return\n\n if task.task_end == -1.0:\n print(\"INVALID INPUT: task {} hasn't finished at all\".format(task.id))\n return\n\n if task.computation_start == -1.0:\n print(\"INVALID INPUT: task {} hasn't started computation at all\".format(task.id))\n return\n\n if task.computation_end == -1.0:\n print(\"INVALID INPUT: task {} hasn't finished computation at all\".format(task.id))\n return\n\n if not ( task.task_start <= task.computation_start <= task.computation_end <= task.task_end ):\n print(\"INVALID INPUT: task {} doesn't hold time order\".format(task.id))\n print(\n \" {} <= {} <= {} <= {}\".format(task.task_start, task.computation_start, task.computation_end, task.task_end))\n return\n\n\nclass InvalidInputFormatException(Exception):\n pass\n\n\nclass Task:\n def __init__(self):\n self.vm = None\n self.id = None\n self.task_start = None\n self.computation_start = None\n self.computation_end = None\n self.task_end = None\n\n\ndef parse(line):\n split_line = line.split(\" \")\n if len(split_line) != 6:\n raise InvalidInputFormatException()\n\n id, vm, task_start, computation_start, computation_end, task_end = split_line\n\n task = Task()\n task.id = id\n task.vm = vm\n task.task_start = float(task_start)\n task.computation_start = float(computation_start)\n task.computation_end = float(computation_end)\n task.task_end = float(task_end)\n\n return task\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/mequrel/single_experiment_validator.py","file_name":"single_experiment_validator.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214420846","text":"import os\r\npath = r'C:\\Users\\Sphinx\\Desktop\\Python-Toolkit\\maiz_train'\r\nfiles = os.listdir(path)\r\n\r\n\r\ntotal_folder = 0\r\nfor file_index, file in enumerate(files):\r\n file_path = os.path.join(path, file)\r\n os.rename(os.path.join(path, str(file_index+1)), os.path.join(path, str(file_index)))\r\n total_folder = total_folder + 1\r\n\r\nprint(\"After preprocessing(rename): image number: \", total_folder)\r\n","sub_path":"file system/Rename/rename_folder_givenNumber.py","file_name":"rename_folder_givenNumber.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312963503","text":"import time\nfrom adafruit_circuitplayground.express import cpx\n\n# Sometimes, our cpx cannot always do what we ask.\n# Here we ask our cpx board to try something and if it\n# cannot do it, it will run the code in the except block\n# of code.\n# \n# In this case, we will ask the cpx to try to log data\n# to a file. This will only run if the cpx board is in data logging\n# mode. In USB write mode, the cpx will run the code in the\n# except block to show that it is erroring.\ntry:\n # We ask cpx to open a stream to a file named data.txt.\n # The stream is remembered by our code as the variable data_log.\n # This file is opened in \"appending\" mode, which means we can only add\n # to the file.\n with open(\"/data.txt\", \"a\") as data_log:\n\n # Forever,\n while True:\n\n x, y, z = cpx.acceleration\n temperature = cpx.temperature\n light = cpx.light\n \n # We can visualize data live here using the pixels as we'd like.\n # This could cause the cpx to run out of batteries though.\n\n\n # This line takes the sensor data and puts it in a format where\n # the numbers will be separated by commas like so:\n # x, y, z, temperature, light\n data = '{:f}, {:f}, {:f}, {:f}, {:f}\\n'.format(x, y, z, temperature, light)\n # This writes the data to our data_log stream.\n data_log.write(data)\n # \n data_log.flush()\n\n # We can indicate status of data logging here.\n\n # Wait 1 second before collecting data again.\n time.sleep(1)\n\n\n# This code will run if the cpx is not in data logging mode or\n# if the cpx runs out of space. Why the code in the try block\n# cannot run will be remembered as the variable named error.\nexcept OSError as error:\n\n delay = 0.5\n\n # Error code of 28 means the cpx is out of space.\n # The delay variable is shorter if the cpx runs out of space.\n if error.args[0] == 28:\n delay = 0.25\n\n # Forever\n while True:\n\n # not cpx.red_led will switch between True and False,\n # causing the red led to blink\n cpx.red_led = not cpx.red_led\n # Sleep inbetween red led on and off.\n # The delay variable is 0.5 for most errors and\n # 0.25 for out of space error.\n # This means that the red led will blink at 0.25 seconds\n # when the cpx is out of space and at 0.5 seconds\n # for all other errors.\n time.sleep(delay)\n","sub_path":"cpx-data-logging/accelerometer-temperature-light/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"22159752","text":"#!/usr/bin/env python\n#coding=utf-8\n__author__ = 'LJjia'\n# *******************************************************************\n# Filename @ List.py\n# Author @ Jia LiangJun\n# Create date @ 2018/9/29 20:10\n# Email @ LJjiahf@163.com\n# Description @ ListBox\n# ********************************************************************\n\n\n\nimport tkinter as tk\n\nwindow = tk.Tk()\nwindow.title('My window')\nwindow.geometry('400x400')\n'''注意了,这里使用pack,个个对象是依次pack进去的,因此如果窗口设置的过小,\n而pack的内容过多,会导致有些对象在窗口中看不到'''\n\nvar1 = tk.StringVar()\n#创建一个Label\nl=tk.Label(window,bg='yellow',width=10,textvariable=var1)\nl.pack()\n\n\n\ndef print_selection():\n #获取光标选择的值为value\n value=lb.get(lb.curselection())\n var1.set(value)\n\n\n#command 传入的参数为点击按钮时执行的函数\nb = tk.Button(window, text='print selelction', height=2, command=print_selection)\nb.pack()\n\n\nvar2=tk.StringVar()\n#var2.set(2)\nvar2.set([11,22,33,44,55])\n#创建ListBox 其listvariable的输入参数可以是一个 值,list,tuple\nlb=tk.Listbox(window,listvariable=var2)\n#当然lb这个Listbox也可以调用insert方法,与之前的文本框类似\nlb.insert('end',[1,2,3,4])\nfor i in ['t1','t2','t3']:\n lb.insert('end',i)\nlb.insert(1,'first')\nlb.insert(2,'second')\n#删除\nlb.delete(2)\nlb.pack()\n\nwindow.mainloop()\n","sub_path":"tutorial/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"364457234","text":"from __future__ import absolute_import\n\"\"\"\nDraw a termite plot to visualize topics and words from an LDA.\n\"\"\"\n\nimport logging\n\nimport blaze as blz\nfrom odo import into\nimport pandas as pd\nimport bokeh.plotting as plt\nfrom bokeh.models.sources import ColumnDataSource\nimport math\n\n\nclass Termite(object):\n \"\"\"A Bokeh Termite Visualization for LDA results analysis.\n\n Parameters\n ----------\n input_file: string\n A csv file from an LDA output containing columns \"word\", \"topic\" and \"weight\".\n title: string\n The title for your termite plot\n\n >>> termite = Termite(\"topik/tests/data/lda.csv\", \"My lda results\")\n >>> termite.plot('my_termite.html')\n\n \"\"\"\n def __init__(self, input_file, title):\n print(input_file)\n self.input_file = input_file\n self.title = title\n\n def plot(self, output_file=\"termite.html\"):\n t = blz.Data(self.input_file)\n df = pd.read_csv(self.input_file)\n\n MAX = blz.compute(t.weight.max())\n MIN = blz.compute(t.weight.min())\n\n \n # t = blz.transform(t, size=blz.sqrt((t.weight - t.min)/(t.max - t.min))*50)\n\n WORDS = t['word'].distinct()\n WORDS = into(list, WORDS)\n topics = t['topic'].distinct()\n topics = into(list, topics)\n # Convert topics to strings\n TOPICS = [str(i) for i in topics]\n\n source = into(pd.DataFrame, t)\n # Create a size variable to define the size of the the circle for the plot.\n source['size'] = source['weight'].map(lambda x: math.sqrt((x - MIN)/(MAX - MIN))*50)\n\n plt.output_file(output_file)\n\n data_source = ColumnDataSource(source)\n\n p = plt.figure(x_range=TOPICS, y_range=WORDS,\n plot_width=1000, plot_height=1700,\n title=self.title)\n\n p.circle(x=\"topic\", y=\"word\", size=\"size\", fill_alpha=0.6, source=data_source)\n #p.xaxis().major_label_orientation = np.pi/3\n logging.info(\"generating termite plot for file %s\" % self.input_file)\n plt.show(p)\n","sub_path":"static/custom/process/topik/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204025922","text":"from tkinter import *\nfrom random import randrange as rn, choice\nimport time\nimport math\n\nroot = Tk()\n\nc = Canvas(root, bg='#00ffff',width=800, height=800)\nc.pack()\n\ncolors = ['black']#,'red','grey','lightgreen','green','cyan','yellow','orange','brown','red','blue','pink']\nglobal x,y,r\nball = dict(x = int(rn(100,700)), y = int(rn(100,700)), r = int(rn(20,100)), vx = int(rn(-15,15)), vy = int(rn(-15,15)), ay = rn(-6,6))\n\n\nprint(ball['x'],' ',ball['y'])\n\ndef ball_create():\n global b\n b = c.create_oval(ball['x']-ball['r'],ball['y']-ball['r'],ball['x']+ball['r'],ball['y']+ball['r'],fill=choice(colors))\n\ndef click(event):\n # if (abs(event.x-x)<=r)and(abs(event.y-y)<=r):\n print('Popal')\n #ball_create()\n move_ball()\n\ndef move_ball():\n c.delete(ALL)\n ball['x'] += ball['vx']*0.2\n ball['y'] += ball['vy']*0.2 + ball['ay']*0.8\n #print(ball['x'],' ',ball['y'],' ',ball['r'])\n if ((ball['x'] <= ball['r'] and ball['vx']<0) or (ball['x'] >= 800-ball['r'] and ball['vx']>0)):\n ball['vx'] = (-1)*ball['vx']\n ball['x'] += ball['vx']*0.2\n ball['y'] += ball['vy']*0.2 + ball['ay']*0.8\n #print(ball['x'],' . ',ball['y'])\n if ((ball['y'] <= ball['r'] and ball['vy']<0) or (ball['y'] >= 800-ball['r'] and ball['vy']>0)):\n ball['vy'] = (-1)*ball['vy']\n ball['x'] += ball['vx']*0.2\n ball['y'] += ball['vy']*0.2 + ball['ay']*0.8\n #print(ball['x'],' _ ',ball['y'])\n\n\n #b.move(int(ball['x']),int(ball['y']))\n ball_create()\n root.after(1,move_ball)\n #print(ball['x'],' ',ball['y'])\n\n\n\nball_create()\nc.bind('<Button-1>', click)\nmainloop()\n","sub_path":"catch_ball.py","file_name":"catch_ball.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"193528087","text":"import sys\n\nifilename = sys.argv[1]\nifile = open(ifilename, \"r\")\n\nlines = []\nfor line in ifile:\n lines.append(line)\nifile.close()\n\nhexstr = []\nfor line in lines:\n if line[0] != ':':\n print(\"expected RECORD MARK ':' not found\")\n continue\n RECLEN = int(line[1:3], 16)\n DATA = line[9:(9+2*RECLEN)]\n for i in range(0, len(DATA), 2):\n hexstr.append(int(DATA[i:(i+2)], 16))\nhexstr = hexstr[2:]\n\nstring = \"\"\nfor c in hexstr:\n if c == 0:\n break\n string += chr(c)\n\nofile = open(\"log.txt\", \"w\")\nofile.write(string)\nofile.close()\n","sub_path":"ihex2txt.py","file_name":"ihex2txt.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"54135640","text":"import os\n\nfrom django.http import JsonResponse\n\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom django.conf import settings\n\nfrom .src.main import VideoIntelligenceRunner\nfrom .src.config import gcs_bucket_url\n\n\nclass DashboardView(View):\n template_name = 'dashboard.html'\n\n def get(self, request, *args, **kwargs):\n #changing to check\n context = {'video_path': 'Here_how_Trump_North_Korea_summit_failed.mp4'}\n #context = {'video_path': 'data/' + video_name}\n return render(request, self.template_name, context=context)\n\n def post(self, request, *args, **kwargs):\n query = request.POST.get('query')\n video_url_obj = request.POST.get('video_url')\n video_button_obj = request.POST.get('video_button')\n video_url = str(video_url_obj)\n video_button = str(video_button_obj)\n dashbaord_video_name = os.path.split(video_url)[-1]\n # print(query)\n # print(video_url_obj,video_url,dashbaord_video_name)\n\n runner = VideoIntelligenceRunner()\n\n if query and hasattr(settings, 'ANALYTICS_DATA'):\n data = settings.ANALYTICS_DATA\n words = data['speech_analytics']['words']\n runner.search(words, query)\n data = runner.data\n else:\n data = runner.main(query,dashbaord_video_name,video_button)\n # setattr(settings, 'ANALYTICS_DATA', data)\n # data = {'speech_analytics': []}\n\n return JsonResponse(data)\n\n","sub_path":"video_analytics_webapp/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311789504","text":"import unittest\nfrom django.test import Client\nimport views\nimport models\n\nclass MineralTestCase(unittest.TestCase):\n c = Client()\n\n def setUp(self):\n models.Mineral.objects.all().delete()\n self.mineral1 = models.Mineral.objects.create(name='Unittestite', group='sulfides')\n self.mineral2 = models.Mineral.objects.create(name='Randomite', group='sulfides')\n self.mineral3 = models.Mineral.objects.create(name='Quartzite', group='sulfides')\n self.mineral4 = models.Mineral.objects.create(name='Meteorite', group='sulfides')\n self.mineral5 = models.Mineral.objects.create(name='Mondonite', group='sulfides')\n self.minerals = models.Mineral.objects.all()\n\n self.mineral_length = len(self.minerals)\n\n def test_mineral_list(self):\n response = self.c.get('/')\n\n code = response.status_code\n context = response.context['all_minerals']\n context_length = len(context)\n\n self.assertTrue(code == 200)\n self.assertEqual(self.mineral_length, context_length)\n\n def test_mineral_detail(self):\n response = self.c.get('/mineral/3')\n\n code = response.status_code\n context = response.context['mineral']\n\n self.assertTrue(code == 200)\n self.assertEqual(self.mineral3, context)\n\n def test_mineral_letter_search(self):\n response = self.c.get('/minerals/M')\n\n code = response.status_code\n context = response.context['all_minerals']\n\n self.assertTrue(code == 200)\n self.assertEqual(2, len(context))\n\n def test_mineral_group_search(self):\n response = self.c.get('/groups/sulfides')\n\n code = response.status_code\n context = response.context['all_minerals']\n\n self.assertTrue(code == 200)\n self.assertEqual(5, len(context))\n\n def test_mineral_list_post(self):\n response = self.c.post('/', {'mineral_search': 'sulfides'})\n\n code = response.status_code\n context = response.context['all_minerals']\n\n self.assertTrue(code == 200)\n self.assertEqual(5, len(context))\n","sub_path":"Minerals/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181615829","text":"#!/usr/bin/python\n# Twitter.py: Class that wraps the functionality of the Tweepy API.\n\nimport tweepy\nfrom AppPanic import panic\nfrom time import sleep\nimport os\nimport threading\n\n\nclass Twitter():\n\n # Init the latest @reply.\n def init_reply_context(self):\n\n try:\n self.reply_context = self.api_conn.mentions_timeline()[0].id\n except:\n panic(\"Error\", \"Can't access context\")\n\n print(\"Successfully added context with reply {0}\".format(self.reply_context))\n\n #Constructor...\n def __init__(self):\n\n # Load the access keys.\n try:\n with open('access_keys.txt') as access_file:\n\n items = access_file.readlines()\n self.consumer = items[0].rstrip()\n self.consumer_secret = items[1].rstrip()\n self.access = items[2].rstrip()\n self.access_secret = items[3].rstrip()\n except:\n panic(\"Error\", \"Can't access access_keys.txt\")\n\n # Apply the application keys to our Tweepy instance.\n authentication = tweepy.OAuthHandler(self.consumer, self.consumer_secret)\n authentication.set_access_token(self.access, self.access_secret)\n\n # Authenticate us with twitter.\n self.api_conn = tweepy.API(authentication)\n\n # Init the latest @reply.\n self.reply_context = 0\n self.init_reply_context()\n\n # Start the monitoring for @reply\n def start_monitor(self):\n\n while True:\n\n # This list will hold the new @reply's with each iteration.\n new_statuses = []\n\n # We only want to check every n seconds.\n sleep(30)\n\n # Try to get the new statuses with the current context.\n try:\n new_statuses = self.api_conn.mentions_timeline(since_id=self.reply_context)\n except:\n panic(\"Error\", \"Can't get new mentions with current context\")\n\n # Print notification message if there are no new @reply's\n if len(new_statuses) == 0:\n print(\"No new @reply's detected\")\n\n else:\n # Play sound from shell.\n os.system(\"afplay sounds/Tune.m4a &\")\n\n # Iterate through the statuses and print them.\n for status in new_statuses:\n print(\"@{0}: {1}\".format(status.author.screen_name, status.text))\n\n # Update our new context for subsequent requests.\n print(\"Successfully updated context with reply {0}\".format(new_statuses[-1].id))\n self.reply_context = new_statuses[-1].id\n","sub_path":"Twitter.py","file_name":"Twitter.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"136484327","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.path.append('/tools/endpoint')\nfrom share.s_tool import *\nfrom share.s_sys import *\n\ndef servers_status_func(session,servers):\n result = {'code':0,'msg':[],'errmsg':[],'log':[]}\n if servers == 'server':\n child = mypopen('''ps -ef | grep -E -v \"grep|login_server_start.py|game_server_start.py|game_server_stop.py|login_server_stop.py|update_server_stop.py|update_server_start.py|servers_status.py\" | grep '%s' '''% ('server')).stdout.read()\n parent = mypopen('''ps -ef | grep -E -v \"grep|login_server_start.py|game_server_start.py|game_server_stop.py|login_server_stop.py|update_server_stop.py|update_server_start.p|servers_status.py\" | grep -E '%s|%s' '''% ('ctc','guard')).stdout.read()\n if (child != ''):\n result['log'].append('已经开启的服务\\n %s' %child)\n else:\n result['log'].append('没有服务开启')\n if (parent != ''):\n result['log'].append('已经开启的该服务守护进程\\n %s' %parent)\n else:\n result['log'].append('没有该服务守护进程开启')\n\n elif servers == 'update':\n pidist = {}\n serlist1 = ''\n serlist2 = ''\n child = mypopen('''ps -ef | grep -E -v \"grep|login_server_start.py|game_server_start.py|game_server_stop.py|login_server_stop.py|update_server_stop.py|update_server_start.py|servers_status.py\" | grep '%s' '''% ('server')).stdout.read()\n parent = mypopen('''ps -ef | grep -E -v \"grep|login_server_start.py|game_server_start.py|game_server_stop.py|login_server_stop.py|update_server_stop.py|update_server_start.py|servers_status.py\" | grep '%s' '''% ('guard')).stdout.read()\n stdoutdata1, stderrdata = mypopen('find /*ss -name update_server.pid').communicate()\n stdoutdata2, stderrdata = mypopen('find /*ss -name guard.pid').communicate()\n waylist = stdoutdata1.split('\\n')[:-1]\n tmpway = stdoutdata2.split('\\n')[:-1]\n for y in range(len(tmpway)):\n waylist.append(tmpway[y])\n for i in range(len(waylist)):\n pid, pidresult = w_rpid(waylist[i],'kread')\n check_result(result, pidresult)\n pidnew = pid.strip('\\x00')\n area = waylist[i].split('/')[1].replace('ss','')\n pidist[pidnew] = area\n parlist1 = child.split('\\n')[:-1]\n parlist2 = parent.split('\\n')[:-1]\n for x in pidist.items():\n for i in range(len(parlist1)):\n if parlist1[i].split()[1] == x[0]:\n log(LOG_DEBUG,parlist1[i]+' '+x[1]+'update')\n serlist1 = serlist1+parlist1[i]+' '+x[1]+'update\\n'\n for x in pidist.items():\n for i in range(len(parlist2)):\n if parlist2[i].split()[1] == x[0]:\n serlist2 = serlist2+parlist2[i]+' '+x[1]+'update\\n'\n if (child != ''):\n result['log'].append('已经开启的服务\\n %s' %serlist1)\n else:\n result['log'].append('没有服务开启')\n if (parent != ''):\n result['log'].append('已经开启的该服务守护进程\\n %s' % serlist2)\n else:\n result['log'].append('没有该服务守护进程开启')\n\n elif servers == 'database':\n mysql = mypopen('''service mysqld status\n test $? -eq 0 && echo \"true\" || echo \"false\" ''')\n stdout = mysql.stdout.readlines()\n stderr = mysql.stderr.readlines()\n if stderr != []:\n result['errmsg'].append(stderr)\n if stdout[-1] == 'true\\n':\n result['log'].append('MySQL服务状态:启动')\n if stdout[-1] == 'false\\n':\n result['log'].append('MySQL服务状态:停止')\n\n if ( len(result['errmsg']) == 0 ):\n return obj2str(result)\n else:\n result['code'] = -100\n return obj2str(result)\n\nif __name__ == '__main__':\n log(LOG_DEBUG,servers_status_func(11,'update'))\n","sub_path":"endpoint/yw/servers_status.py","file_name":"servers_status.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272771368","text":"# -*- coding: utf-8 -*- \n\n##from xml_class import *\nfrom csv_class import add_values\nfrom mithra_class import *\n##import xml.dom.minidom\n##from xml.dom.minidom import Document\nfrom mithra.models import tblValues, tblHood\n#from mithradj import settings\n\ndef update_db_mithra(code_link):\n \n trama_txt= settings.PROJECT_ABSOLUTE_DIR+\"/generator/lectura210709.TXT\"\n output_file= settings.PROJECT_ABSOLUTE_DIR+\"/media/dygraphs/mithra_graphs/output.csv\"\n errores= []\n\n analisis= mithra_def(trama_txt) #abre y analiza trama mithra\n## print \"trama_principal\", analisis.tramaControl\n errores.append(analisis.error)\n\n #Crea fichero de salida para grafica\n file_salida = open(output_file, \"w\")\n file_salida.write('Time,Sensor1,Sensor2,Sensor3')\n file_salida.close()\n\n## try: \n## xml_doc = Document()\n##\n## xml_chart = xml_doc.createElement(\"chart\")\n## xml_doc.appendChild(xml_chart)\n##\n## xml_series = xml_doc.createElement(\"series\")\n## xml_chart.appendChild(xml_series)\n##\n## xml_graphs = xml_doc.createElement(\"graphs\")\n## xml_chart.appendChild(xml_graphs)\n##\n## for num_graphs in range (1, 4):\n## xml_graph = xml_doc.createElement(\"graph\")\n## xml_graph.setAttribute(\"gid\", str(num_graphs))\n## xml_graphs.appendChild(xml_graph)\n##\n## xml_result= xml_doc.toprettyxml()\n## file_salida = open(output_file, \"w\")\n## file_salida.write(xml_result)\n## file_salida.close()\n## errores.append(\"Se ha creado estructura .xml\")\n \n # recorre lista de medidas y rellena archivo .xml \n \n try:\n for each in analisis.arr_medidas:\n medida= analiza_med(each)\n \n## add_values(medida.fecha, medida.hora, medida.sen1, medida.sen2, medida.sen3, output_file)\n add_values(medida.dateTime, medida.sen1, medida.sen2, medida.sen3, output_file)\n \n## inserta resultado en bd si save_measure es true\n querySave= tblHood.objects.get(code_hood= '0001')\n if querySave.save_measure== True:\n query_hood= tblValues (hood_link= code_link, code_hood= '0001', dateTime= medida.dateTime, sensor1= medida.sen1, sensor2= medida.sen2, sensor3= medida.sen3, unique_code= medida.unique_code)\n query_hood.save()\n\n except:\n errores.append(\"No se ha creado la lista de medidas\")\n\n## except: \n## errores.append(\"No se ha podido crear estructura .xml\")\n\n return errores\n","sub_path":"mithra/generatorBORRAR/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297515843","text":"#!/usr/bin/env python\n\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2012, Robotiq, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Robotiq, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Copyright (c) 2012, Robotiq, Inc.\n# Revision $Id$\n\n\"\"\"@package docstring\nCommand-line interface for sending simple commands to a ROS node controlling a 2F gripper.\n\nThis serves as an example for publishing messages on the 'Robotiq2FGripperRobotOutput' topic using the 'Robotiq2FGripper_robot_output' msg type for sending commands to a 2F gripper.\n\"\"\"\n\nimport roslib; roslib.load_manifest('robotiq_2f_gripper_control')\nimport rospy\nfrom robotiq_2f_gripper_control.msg import _Robotiq2FGripper_robot_output as outputMsg\nfrom time import sleep\n\ndef publisher():\n \"\"\"Main loop which requests new commands and publish them on the Robotiq2FGripperRobotOutput topic.\"\"\"\n rospy.init_node('Robotiq2FGripperSimpleController') \n pub = rospy.Publisher('Robotiq2FGripperRobotOutput', outputMsg.Robotiq2FGripper_robot_output, queue_size=1)\n rate_hz = rospy.get_param('rate_hz')\n \n command = outputMsg.Robotiq2FGripper_robot_output();\n rate = rospy.Rate(rate_hz)\n \n while not rospy.is_shutdown():\n try:\n gripper_cmd = rospy.get_param('delayed_gripper_cmd')\n\n # build command msg\n if gripper_cmd == 1:\n command.rACT = 1 # activate\n command.rGTO = 1 # go to action\n command.rATR = 0 # Reset??\n command.rPR = 255 # closed\n command.rSP = 255 # speed\n command.rFR = 150 #force\n else:\n command.rACT = 1 # activate\n command.rGTO = 1 # go to action\n command.rATR = 0 # Reset??\n command.rPR = 0 # position open\n command.rSP = 255 # speed\n command.rFR = 150 #force\n\n # publish to gripper \n pub.publish(command)\n\n except:\n pass\n \n rate.sleep()\n \n\nif __name__ == '__main__':\n publisher()\n","sub_path":"src/robotiq/robotiq_2f_gripper_control/nodes/gripper_cmd_pub.py","file_name":"gripper_cmd_pub.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379895647","text":"import django_filters\n\nfrom .models import Ingredient\n\n\nclass IngredientsFilter(django_filters.FilterSet):\n \"\"\"Ingredients Filter\"\"\"\n name = django_filters.CharFilter(\n field_name='name',\n lookup_expr='istartswith'\n )\n\n class Meta:\n model = Ingredient\n fields = [\n 'name',\n ]\n","sub_path":"backend/foodgram/ingredients/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"140935135","text":"# import matplotlib.pyplot as plt, mpld3\n# import numpy as np\n# from matplotlib.patches import Rectangle\n# from matplotlib.collections import PatchCollection\nimport pandas as pd\nimport json\nimport sys\nimport ast\n\n\ndef initializeAnalysis(shapes):\n newshapes = []\n for x in shapes.split(\" \"):\n newshapes.append(ast.literal_eval(x))\n\n props = section_properties(newshapes)\n\n result = {\n \"props\": props,\n \"geom\": newshapes\n }\n\n print(json.dumps(result, indent=2))\n return result\n\n\ndef section_properties(test, mirror=False, xbar_init=0, ybar_init=0):\n columns = [\"el\", \"base\", \"height\", \"Area\", \"xbar\", \"Ax\", \"Ax^2\", \"Ioy\"]\n data_y = pd.DataFrame(columns=columns)\n\n # Y moment\n for i in range(len(test)):\n basey = test[i][2]\n heighty = test[i][3]\n areay = basey * heighty\n xbar = test[i][0] + basey / 2\n Ax = areay * xbar\n Ax_sq = Ax * xbar\n Ioy = (heighty * basey ** 3) / 12\n\n data_y.at[i, \"el\"] = i + 1\n data_y.at[i, \"base\"] = basey\n data_y.at[i, \"height\"] = heighty\n data_y.at[i, \"Area\"] = areay\n data_y.at[i, \"xbar\"] = xbar\n data_y.at[i, \"Ax\"] = Ax\n data_y.at[i, \"Ax^2\"] = Ax_sq\n data_y.at[i, \"Ioy\"] = Ioy\n\n # Sum all columns\n sums_y = data_y.sum(axis=0)\n xbar_cg = sums_y[\"Ax\"] / sums_y[\"Area\"]\n Ioy_all = sums_y[\"Ioy\"] + sums_y[\"Ax^2\"] - xbar_cg * sums_y[\"Ax\"]\n\n columns = [\"el\", \"base\", \"height\", \"Area\", \"ybar\", \"Ay\", \"Ay^2\", \"Iox\"]\n data_x = pd.DataFrame(columns=columns)\n\n # X moment\n for i in range(len(test)):\n basex = test[i][2]\n heightx = test[i][3]\n areax = basex * heightx\n ybarx = test[i][1] + heightx / 2\n Ay_x = areax * ybarx\n Ay_sqx = Ay_x * ybarx\n Iox = (basex * heightx ** 3) / 12\n\n data_x.at[i, \"el\"] = i + 1\n data_x.at[i, \"base\"] = basex\n data_x.at[i, \"height\"] = heightx\n data_x.at[i, \"Area\"] = areax\n data_x.at[i, \"ybar\"] = ybarx\n data_x.at[i, \"Ay\"] = Ay_x\n data_x.at[i, \"Ay^2\"] = Ay_sqx\n data_x.at[i, \"Iox\"] = Iox\n\n # Sum all columns\n sums_x = data_x.sum(axis=0)\n ybar_cg = sums_x[\"Ay\"] / sums_x[\"Area\"]\n Iox_all = sums_x[\"Iox\"] + sums_x[\"Ay^2\"] - ybar_cg * sums_x[\"Ay\"]\n\n sec_props = {\n \"Iox\": Iox_all,\n \"Ioy\": Ioy_all,\n \"xbar\": xbar_cg,\n \"ybar\": ybar_cg\n }\n\n return sec_props\n\n\n# def main(args):\n# print(\"test\")\n# print(args)\n# print(len(args))\n# print(args[1].split(\" \"))\n# # return initializeAnalysis([ast.literal_eval(args[1]), ast.literal_eval(args[1])])\n\n\nif __name__ == \"__main__\":\n# main(sys.argv)\n# print(\"exit main\")\n# print(sys.argv[1])\n initializeAnalysis(sys.argv[1])\n\n# python sectionProps.py '[ 0.50, 0, 1.00, 0.30]' '[ 0.85, 0.3, 0.30, 2.40]' '[ 0, 2.7, 2.00, 0.30]'\n\n# [(0.50, 0), 1.00, 0.30],\n# [(0.85, 0.3), 0.30, 2.40],\n# [(0 , 2.7), 2.00, 0.30]","sub_path":"controllers/sectionProps.py","file_name":"sectionProps.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66332689","text":"import json\nimport requests\nimport time\nimport datetime\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',\n 'Cookie': '_zap=e06beca8-088b-44db-b72e-80599bdaa80d; d_c0=\"AIAC9w0iuwyPTmO6yHiurlfWnI1UXdxtQSE=|1511496714\"; __utma=51854390.2024113287.1519484755.1519484755.1519484755.1; __utmz=51854390.1519484755.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=51854390.110-1|2=registration_date=20150622=1^3=entry_date=20150622=1; capsion_ticket=\"2|1:0|10:1519487310|14:capsion_ticket|44:OGJkMzU1ZDFmNTAyNDZiNTlkZGQ0YTQ2YjUzZDIxOGE=|03fdd00dcefbbffda22e6a4f5aee11f0ee6e82f9aef287c3e543120caac67c7a\"; z_c0=\"2|1:0|10:1519487367|4:z_c0|92:Mi4xcVFETUFRQUFBQUFBZ0FMM0RTSzdEQ1lBQUFCZ0FsVk5oOWQtV3dEdnJIZndIZjkzMjNRODRCRlpsMVluSC0yQ2NB|b8c4703d8fe6ea5165676bb28cbd13645c71a6481cedc7919c5b82990879886d\"; q_c1=a88b6273b9ce48a783fbfe88c9a9f4f9|1519612729000|1511347384000; aliyungf_tc=AQAAABbMVhqvXAUAHOU3O+o8G1C9RnuT; _xsrf=ede75c90-aa47-4448-864b-e3ab40ed66f0'\n}\n\nform_data = {\n 'include': 'data[*].created,answer_count,follower_count,author',\n 'offset': 0,\n 'limit': 20\n}\n\n\ndef get_info(form_data):\n data = []\n url = 'https://www.zhihu.com/api/v4/members/yan-xi-5-31/following-questions'\n response = requests.get(url, headers=headers, data=form_data)\n json_dir = json.loads(response.text)\n for info in json_dir['data']:\n find = {\n 'title': info['title'],\n 'created': str(datetime.datetime.fromtimestamp(info['created'])),\n 'updated_time': str(datetime.datetime.fromtimestamp(info['updated_time'])),\n 'follower_count': str(info['follower_count']),\n 'answer_count': str(info['answer_count']),\n 'site': info['url']\n }\n data.append(find)\n time.sleep(2)\n return data\n\n\nif __name__ == '__main__':\n all_data = []\n for offset in range(0, 420, 20):\n form_data['offset'] = offset\n all_data.extend(get_info(form_data))\n\n with open('C:\\\\Users\\\\D\\\\Desktop\\\\zhihu.csv', 'w') as csv_file:\n csv_file.write('title,created,updated_time,follower_count,answer_count,site\\n')\n for zhihu in all_data:\n csv_file.write(','.join([\n zhihu['title'],\n zhihu['created'],\n zhihu['updated_time'],\n zhihu['follower_count'],\n zhihu['answer_count'],\n zhihu['site']]) + '\\n'\n )\n\n\n","sub_path":"code/zhihu_v2.py","file_name":"zhihu_v2.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"284295952","text":"# %%\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.datasets import load_boston\r\nfrom sklearn.linear_model import Ridge\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\n\r\n# %%\r\nboston = load_boston()\r\n\r\nbostonDF = pd.DataFrame(boston.data, columns = boston.feature_names)\r\n\r\nbostonDF[\"PRICE\"] = boston.target\r\nprint(\"boston dataset size : \", bostonDF.shape)\r\n\r\ny_target = bostonDF[\"PRICE\"]\r\nX_data = bostonDF.drop(\"PRICE\", axis = 1, inplace = False)\r\n\r\n# %%\r\nridge = Ridge(alpha = 10)\r\nneg_mse_scores = cross_val_score(ridge, X_data, y_target, scoring=\"neg_mean_squared_error\", cv = 5)\r\nrmse_scores = np.sqrt(-1 * neg_mse_scores)\r\navg_rmse = np.mean(rmse_scores)\r\nprint(\"5 folds 의 개별 Negative MSE scores : \", np.round(neg_mse_scores, 3))\r\nprint(\"5 folds 의 개별 RMSE scores : \", np.round(rmse_scores, 3))\r\nprint(\"5 folds 의 평균 RMSE : {0:.3f}\".format(avg_rmse))\r\n\r\n# %%\r\nalphas = [0, 0.1, 1, 10, 100]\r\n\r\nfor alpha in alphas:\r\n ridge = Ridge(alpha=alpha)\r\n\r\n neg_mse_scores = cross_val_score(ridge, X_data, y_target, scoring=\"neg_mean_squared_error\", cv = 5)\r\n avg_rmse = np.mean(np.sqrt(-1 * neg_mse_scores))\r\n print(\"alpha {0} 일때 5 folds의 평균 RMSE : {1:.3f}\".format(alpha, avg_rmse))\r\n\r\n# %%\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nfig, axs = plt.subplots(figsize = (18, 6), nrows = 1, ncols = 5)\r\ncoeff_df = pd.DataFrame()\r\n\r\nfor pos, alpha in enumerate(alphas):\r\n ridge = Ridge(alpha = alpha)\r\n ridge.fit(X_data, y_target)\r\n\r\n coeff = pd.Series(data = ridge.coef_, index = X_data.columns)\r\n colname = \"alpha\" + str(alpha)\r\n coeff_df[colname] = coeff\r\n\r\n coeff = coeff.sort_values(ascending = False)\r\n axs[pos].set_title(colname)\r\n axs[pos].set_xlim(-3, 6)\r\n sns.barplot(x = coeff.values, y = coeff.index, ax = axs[pos])\r\n\r\nplt.show()\r\n\r\n# %%\r\nridge_alphas = [0, 0.1, 1, 10, 100]\r\n\r\nsort_column = \"alpha\" + str(ridge_alphas[0])\r\ncoeff_df.sort_values(by=sort_column, ascending = False)\r\n\r\n# %%\r\n","sub_path":"Python Machine Learning Perfect Guide/regression/ridge_regression.py","file_name":"ridge_regression.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514214843","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('photo', '0018_blog_postcount'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='blog',\n old_name='postCount',\n new_name='post_count',\n ),\n ]\n","sub_path":"photo/migrations/0019_auto_20151104_1549.py","file_name":"0019_auto_20151104_1549.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166109657","text":"def problem18():\n with open(\"/tmp/data\", \"r\") as data:\n data = data.read()\n data_arr = list(i.split(' ') for i in data.split('\\n')[:-1])\n\n def max_length(triangle):\n if len(triangle) == 1:\n return int(triangle[0][0])\n left_max = max_length(list(i[:-1] for i in triangle[1:]))\n right_max = max_length(list(i[1:] for i in triangle[1:]))\n return int(triangle[0][0]) + (left_max if left_max > right_max else right_max)\n\n print(max_length(data_arr))\n\n\ndef problem19():\n import calendar\n\n def days_nr(month, year):\n if month in (9, 4, 6, 11):\n return 30\n elif month in (1, 3, 5, 7, 8, 10, 12):\n return 31\n else:\n return 29 if calendar.isleap(year) else 28\n\n def next_day(n):\n return n + 1 if n < 7 else 1\n\n months = list(range(1, 13))\n years = list(range(1900, 2001))\n current_day = 1\n hits = 0\n\n for year in years:\n for month in months:\n count_of_days = days_nr(month, year)\n for i in range(1, count_of_days + 1):\n if year > 1900 and i == 1 and current_day == 7:\n hits += 1\n current_day = next_day(current_day)\n print(hits)\n\n\ndef problem21():\n from functools import reduce\n\n cache = {}\n\n def factors(n):\n return set(reduce(list.__add__,\n ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)))\n\n def div_sum(n):\n if cache.get(n) is not None:\n return cache.get(n)\n sum_factors = sum(factors(n)) - n\n cache[n] = sum_factors\n return sum_factors\n\n cache = {}\n am_numbers = set()\n\n for i in range(2, 10000):\n div_sum_tmp = div_sum(i)\n if div_sum(div_sum_tmp) == i and i != div_sum_tmp:\n am_numbers.add(i)\n am_numbers.add(div_sum_tmp)\n\n print(sum(am_numbers))\n\n\ndef problem22():\n print(sum((index + 1) * (sum('ABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(letter) + 1 for letter in name)) for index, name in\n enumerate(sorted(open(\"/tmp/names.txt\", \"r\").read().replace('\"', '').split(',')))))\n\n\ndef problem23():\n from functools import reduce\n\n abudant_set = set()\n\n def factors_no_n(n):\n result = set(reduce(list.__add__,\n ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)))\n result.remove(n)\n return result\n\n def is_abudant(n):\n return sum(factors_no_n(n)) > n\n\n def is_sum_possible(n):\n for j in abudant_set:\n # if j > n: return False\n if n - j in abudant_set:\n return True\n return False\n\n for i in range(1, 28124):\n if is_abudant(i):\n abudant_set.add(i)\n\n result = 0\n for i in range(1, 28124):\n if i % 100 == 0:\n print(\"progress..{}\".format(i))\n if not is_sum_possible(i):\n result += i\n\n print(result)\n\ndef problem24():\n from math import factorial\n\n def get(remaining, nrs, acc):\n nr = next(iter(nrs))\n if factorial(len(nrs)) > remaining:\n factminusone = factorial(len(nrs) - 1)\n nr = nrs[nrs.index(nr) + 1]\n while remaining - factminusone > factminusone:\n remaining -= factminusone\n nr = nrs[nrs.index(nr) + 1]\n acc.append(nr)\n nrs.remove(nr)\n get(remaining - factminusone, nrs, acc)\n else:\n print(''.join(map(str, (acc + list(reversed(nrs))))))\n get(1000000, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], list())\n\ndef problem25():\n cache = {}\n def fib(n):\n result = cache.get(n)\n if result:\n return result\n if n <= 2:\n cache[n] = 1\n return 1\n result = fib(n-1) + fib(n - 2)\n cache[n] = result\n return result\n\n for i in range(10, 1000000):\n result = fib(i)\n if len(str(result)) >= 1000:\n print(i)\n return\n\ndef problem26():\n def recurring_length(nr, denominator, acc):\n nr = nr * 10 * len(str(denominator))\n decimal = nr%denominator\n if decimal == 0:\n return 0\n\n prev = str(nr)+\"/\"+str(denominator)\n if prev in acc:\n return len(acc)\n else :\n acc.append(prev)\n return recurring_length(decimal, denominator, acc)\n\n maxvalue = 0\n maxnr = 0\n for i in range(1,1000):\n currlength = recurring_length(1,i,list())\n if currlength > maxvalue:\n maxvalue = currlength\n maxnr = i\n print(maxnr)\n\ndef problem27():\n import math\n cache = {}\n def is_prime(n):\n if n in cache:\n return cache[n]\n if n == 2:\n cache[n] = True\n return True\n if n%2 == 0 or n <= 1:\n cache[n] = False\n return False\n sqr = int(math.sqrt(n)) + 1\n for divisor in range(3, sqr, 2):\n if n%divisor == 0:\n cache[n] = False\n return False\n cache[n] = True\n return True\n\n def value(n,a,b):\n return n*n + a * n + b\n\n def cons_nr(a,b):\n for i in range(1,1000):\n if not is_prime(value(i,a,b)):\n return i\n maxNr = 0\n maxProduct = 0\n for i in range(-1000,1000):\n for y in range(-1000,1000):\n if cons_nr(i,y) > maxNr:\n maxNr = cons_nr(i,y)\n maxProduct = i * y\n print(maxProduct)\n\ndef problem28():\n d1incr = 2\n d2incr = 4\n diag1 = 1\n diag2 = 1\n sum = 0\n for i in range(1,1002):\n sum += diag1 + diag2\n diag1 += d1incr\n diag2 += d2incr\n d1incr += 2\n if i % 2 == 0:\n d2incr += 4\n print(sum - 1)\n\n# How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?\ndef problem29():\n resultset = set()\n prev = 2\n for i in range(2,101):\n for j in range(2,101):\n prev *= i\n resultset.add(prev)\n prev = i + 1\n print(\"Result is {}\".format(len(resultset)))\n\ndef problem30():\n power = 5\n totalSum = 0\n for i in range(2,531441):\n sum = 0\n for j in str(i):\n sum += int(j) ** power\n if sum == i:\n totalSum += i\n\n print(totalSum)\n\nproblem30()\n\n\n","sub_path":"python0-30/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"40483986","text":"import logging\nimport logging.handlers\n\ntest_logger = logging.getLogger('SysLogger')\ntest_logger.setLevel(logging.DEBUG)\n\nhandler = logging.handlers.SysLogHandler(address = '/dev/log', facility = 'local6')\n\n\ntest_logger.addHandler(handler)\n\ntest_logger.debug('Test log...')\n","sub_path":"conf/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576177631","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Armour(Sprite):\r\n \"\"\"a class to manage the defence vehicle\"\"\"\r\n\r\n def __init__(self, ai_game):\r\n \"\"\"Initialise the armour and set its start position\"\"\"\r\n super().__init__()\r\n self.screen = ai_game.screen\r\n self.screen_rect = ai_game.screen.get_rect()\r\n self.settings = ai_game.settings\r\n\r\n #load the image of the armour and get its rect\r\n self.image = pygame.image.load('images/armour.png')\r\n self.rect = self.image.get_rect()\r\n\r\n #start each new armour at the bottom center of the screen\r\n self.rect.midbottom = self.screen_rect.midbottom\r\n #movement flags\r\n self.moving_right = False\r\n self.moving_left = False\r\n\r\n #store a decimal value for the armour's horizontal pos\r\n self.x = float(self.rect.x)\r\n\r\n def update(self):\r\n \"\"\"update armour position based on movement flags using x value \r\n NOT rect\"\"\"\r\n if self.moving_right and self.rect.right < self.screen_rect.right:\r\n self.x += self.settings.armour_speed\r\n if self.moving_left and self.rect.left > 0:\r\n self.x -= self.settings.armour_speed\r\n\r\n #update rect object from self.x\r\n self.rect.x = self.x\r\n\r\n def blitme(self):\r\n \"\"\"Draw armour at its current location\"\"\"\r\n self.screen.blit(self.image, self.rect)\r\n\r\n def center_armour(self):\r\n \"\"\"center armour on screen\"\"\"\r\n self.rect.midbottom = self.screen_rect.midbottom\r\n self.x = float(self.rect.x)","sub_path":"Alien_Invasion/armour.py","file_name":"armour.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71743413","text":"# -*- coding: utf-8 -*-\r\n#COMECE AQUI ABAIXO\r\n#ENTRADA\r\nn = int(input(\"Digite o valor de n: \"))\r\ni = 1\r\ncont = 1\r\nwhile i<=n:\r\n if n>0:\r\n cont = cont*i\r\n i = i+ 1\r\nprint (\"%d!= %d\" % (n,cont))","sub_path":"moodledata/vpl_data/303/usersdata/296/83266/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58123857","text":"from Calculator import Calculator # import the class that handles the calculator functions\r\n\r\nc = Calculator() # create a Calculator object\r\nans = None\r\noperators = '** * / // - +'.split() # list of available operators for use in the calculator\r\n\r\n\"\"\"This calculator is intended to handle most expressions \r\navailable on a standard calculator, while handling user\r\nerrors at the same time, such as invalid expressions or \r\nzero division errors. The calculator will run until the\r\nuser types \"exit\", and the user has the option of carrying over\r\ntheir answer to perform more calculations, or clearing their \r\nanswer and starting over again. In addition, several blank\r\nprint lines were added to enhance readability and ensure that\r\nlines are not placed too closely together.\"\"\"\r\n\r\nwhile ans != 'exit':\r\n ans = input('Please enter the equation you want to evaluate, one character at a time. If you want to stop, type \"exit\": ').lower().strip()\r\n\r\n try: # try the below, and if it fails, move to the except blocks to catch errors.\r\n if ans == '=':\r\n c.push(ans)\r\n result = c.equals()\r\n print(result)\r\n add_to_answer = input('Type \"yes\" to add on to your answer, or \"no\" to start over: ')\r\n \r\n if add_to_answer != 'yes' and add_to_answer != 'no': # handling the case where a user inputs the wrong word\r\n while add_to_answer != 'yes' and add_to_answer != 'no':\r\n print('Sorry, please enter \"yes\" or \"no\".')\r\n add_to_answer = input('Type \"yes\" to add on to your answer, or \"no\" to start over: ')\r\n\r\n if add_to_answer == 'yes':\r\n c.expression = result.split()[-1]\r\n \r\n else:\r\n c.clear()\r\n \r\n elif add_to_answer == 'yes':\r\n c.expression = result.split()[-1]\r\n # allow the user to carry over their answer, or clear it.\r\n else:\r\n c.clear()\r\n \r\n else:\r\n if ans.isdecimal() or ans in operators:\r\n c.push(ans) # push the answer onto the expression, which is separated by a space in the method definition.\r\n \r\n else:\r\n if ans != 'exit'.lower().strip(): #if the answer is not valid, let the user know and do not add it to the expression.\r\n print('Sorry, that is not a valid character. Please try again.\\n')\r\n \r\n except SyntaxError: # handle cases such as \"7 5 =\"\r\n print()\r\n print('Sorry, one or more of the characters you entered were invalid. Please start over and make sure you type the correct characters!\\n')\r\n c.clear()\r\n print()\r\n \r\n except ZeroDivisionError: # handle ZeroDivisionError cases.\r\n print()\r\n print('Sorry, you cannot divide by zero! Please start over and make sure you don\\'t divide by zero!')\r\n c.clear()\r\n print()\r\n\r\nprint()\r\nprint('Thanks for using our calculator! Please try again soon!') # say goodbye!\r\n","sub_path":"calculator_app.py","file_name":"calculator_app.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489861338","text":"from logo import logo\nfrom random import choice\nimport time\n\n\ndef board(moves):\n current_board = f\" {moves['7']} | {moves['8']} | {moves['9']} \\n\" \\\n f\" - | - | - \\n\" \\\n f\" {moves['4']} | {moves['5']} | {moves['6']} \\n\" \\\n f\" - | - | - \\n\" \\\n f\" {moves['1']} | {moves['2']} | {moves['3']} \\n\"\n print(current_board)\n\n\ndef get_mode():\n mode = input('Would you like to play a 1 or 2 player game? Please enter \"1\" or \"2\": ')\n if mode.isnumeric and int(mode) in range(1, 3):\n return mode\n else:\n print('Mode input is invalid, please try again')\n get_mode()\n\n\ndef valid_input(player_move):\n if player_move.isnumeric() and int(player_move) in range(1, 10):\n return True\n else:\n print(\"Invalid input provided, please try again.\")\n return False\n\n\ndef check_move_available(player_move, moves):\n if moves[player_move] == \" \":\n return True\n else:\n print(\"That space is already taken. Please try again.\")\n return False\n\n\ndef check_win(marker, moves, player):\n if moves['7'] == moves['8'] == moves['9'] == marker \\\n or moves['4'] == moves['5'] == moves['6'] == marker \\\n or moves['1'] == moves['2'] == moves['3'] == marker \\\n or moves['7'] == moves['4'] == moves['1'] == marker \\\n or moves['8'] == moves['5'] == moves['2'] == marker \\\n or moves['9'] == moves['6'] == moves['3'] == marker \\\n or moves['1'] == moves['5'] == moves['9'] == marker \\\n or moves['7'] == moves['5'] == moves['3'] == marker:\n print(f'Congratulations {player}, you are the winner!')\n return True\n elif \" \" not in moves.values():\n print(\"Unlucky this time. It's a draw!\")\n return True\n else:\n return False\n\n\ndef tic_tac_toe():\n print(logo)\n print('Welcome to Tic-Tac-Toe! Use the numbers key pad to select your move, '\n 'e.g. \"7\" is top left, \"5\" is centre and \"3\" is bottom right.\\n')\n print(\" 7 | 8 | 9 \\n\"\n \" - | - | - \\n\"\n \" 4 | 5 | 6 \\n\"\n \" - | - | - \\n\"\n \" 1 | 2 | 3 \\n\")\n print('If you wish to exit the game at any time, please enter \"q\".\\n')\n\n mode = get_mode()\n player1 = input('Player 1, please enter your name: ')\n if mode == \"2\":\n player2 = input('Player 2, please enter your name: ')\n else:\n player2 = \"Computer\"\n\n moves = {'7': ' ', '8': ' ', '9': ' ',\n '4': ' ', '5': ' ', '6': ' ',\n '1': ' ', '2': ' ', '3': ' ', }\n\n player = player1\n game_on = True\n board(moves)\n\n while game_on:\n if player == player1 or mode == '2':\n player_move = input(f'{player}, select your move: ')\n else:\n time.sleep(0.75)\n available_spaces = [key for (key, value) in moves.items() if value == \" \"]\n player_move = choice(available_spaces)\n print(f'The Computer has chosen space {player_move}.')\n if player_move.lower() == 'q':\n print(\"Thanks for playing!\")\n game_on = False\n elif valid_input(player_move) and check_move_available(player_move, moves):\n if player == player1:\n moves[player_move] = \"X\"\n if check_win(\"X\", moves, player):\n game_on = False\n player = player2\n else:\n moves[player_move] = \"O\"\n if check_win(\"O\", moves, player):\n game_on = False\n player = player1\n board(moves)\n\n if input('Would you like to play another game? (\"y\" or \"n\"): ').lower() == \"y\":\n tic_tac_toe()\n else:\n print(\"Thanks for playing!\")\n\n\ntic_tac_toe()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651366384","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport re\nimport math\nimport codecs\nfrom bisect import bisect_right\nfrom typing import Callable\nfrom collections import defaultdict\nfrom xml.etree import ElementTree as ET\nfrom urllib.error import HTTPError, URLError\nimport json\nimport urllib.request, urllib.error, urllib.parse\nfrom functools import reduce, lru_cache\nimport itertools\nfrom typing import List\n\ntry:\n p = os.path.dirname(os.path.abspath(__file__))+\"/sources\"\n from sources.local_settings import *\n sys.path.insert(0, p)\n sys.path.insert(0, SEFARIA_PROJECT_PATH)\nexcept ImportError:\n pass\nfrom sefaria.datatype import jagged_array\nfrom sefaria.model import *\nfrom sefaria.model.schema import TitleGroup\n\n\ngematria = {}\ngematria['א'] = 1\ngematria['ב'] = 2\ngematria['ג'] = 3\ngematria['ד'] = 4\ngematria['ה'] = 5\ngematria['ו'] = 6\ngematria['ז'] = 7\ngematria['ח'] = 8\ngematria['ט'] = 9\ngematria['י'] = 10\ngematria['כ'] = 20\ngematria['ך'] = 20\ngematria['ל'] = 30\ngematria['מ'] = 40\ngematria['ם'] = 40\ngematria['נ'] = 50\ngematria['ן'] = 50\ngematria['ס'] = 60\ngematria['ע'] = 70\ngematria['פ'] = 80\ngematria['ף'] = 80\ngematria['צ'] = 90\ngematria['ץ'] = 90\ngematria['ק'] = 100\ngematria['ר'] = 200\ngematria['ש'] = 300\ngematria['ת'] = 400\n\ninv_gematria = {value: key for key, value in gematria.items()}\n\nwordToNumber = {}\nwordToNumber['ראשון'] = 1\nwordToNumber['שני'] = 2\nwordToNumber['שלישי'] = 3\nwordToNumber['רביעי'] = 4\nwordToNumber['חמישי'] = 5\nwordToNumber['ששי'] = 6\nwordToNumber['שביעי'] = 7\nwordToNumber['שמיני'] = 8\nwordToNumber['תשיעי'] = 9\nwordToNumber['עשירי'] = 10\n\nhe_char_ord = {\n 'א': 1,\n 'ב': 2,\n 'ג': 3,\n 'ד': 4,\n 'ה': 5,\n 'ו': 6,\n 'ז': 7,\n 'ח': 8,\n 'ט': 9,\n 'י': 10,\n 'כ': 11,\n 'ך': 11,\n 'ל': 12,\n 'מ': 13,\n 'ם': 13,\n 'נ': 14,\n 'ן': 14,\n 'ס': 15,\n 'ע': 16,\n 'פ': 17,\n 'ף': 17,\n 'צ': 18,\n 'ץ': 18,\n 'ק': 19,\n 'ר': 20,\n 'ש': 21,\n 'ת': 22\n}\n\nnum_to_char_dict = {1: \"א\",\n2: \"ב\",\n3: \"ג\",\n4: \"ד\",\n5: \"ה\",\n6: \"ו\",\n7: \"ז\",\n8: \"ח\",\n9: \"ט\",\n10: \"י\",\n11: \"כ\",\n12: \"ל\",\n13: \"מ\",\n14: \"נ\",\n15: \"ס\",\n16: \"ע\",\n17: \"פ\",\n18: \"צ\",\n19: \"ק\",\n20: \"ר\",\n21: \"ש\",\n22: \"ת\",\n}\n\n\ndef isGematria(txt):\n txt = txt.replace('.','')\n if txt.find(\"ך\")>=0:\n txt = txt.replace(\"ך\", \"כ\")\n if txt.find(\"ם\")>=0:\n txt = txt.replace(\"ם\", \"מ\")\n if txt.find(\"ף\")>=0:\n txt = txt.replace(\"ף\", \"פ\")\n if txt.find(\"ץ\")>=0:\n txt = txt.replace(\"ץ\", \"צ\")\n if txt.find(\"טו\")>=0:\n txt = txt.replace(\"טו\", \"יה\")\n if txt.find(\"טז\")>=0:\n txt = txt.replace(\"טז\", \"יו\")\n if len(txt) == 2:\n letter_count = 0\n for i in range(9):\n if inv_gematria[i+1]==txt[letter_count:2+letter_count]:\n return True\n if inv_gematria[(i+1)*10]==txt[letter_count:2+letter_count]:\n return True\n for i in range(4):\n if inv_gematria[(i+1)*100]==txt[letter_count:2+letter_count]:\n return True\n elif len(txt) == 4:\n first_letter_is = \"\"\n for letter_count in range(2):\n letter_count *= 2\n for i in range(9):\n if inv_gematria[i+1]==txt[letter_count:2+letter_count]:\n if letter_count == 0:\n #print \"single false\"\n return False\n else:\n first_letter_is = \"singles\"\n if inv_gematria[(i+1)*10]==txt[letter_count:2+letter_count]:\n if letter_count == 0:\n first_letter_is = \"tens\"\n elif letter_count == 2:\n if first_letter_is != \"hundred\":\n #print \"tens false\"\n return False\n for i in range(4):\n if inv_gematria[(i+1)*100]==txt[letter_count:2+letter_count]:\n if letter_count == 0:\n first_letter_is = \"hundred\"\n elif letter_count == 2:\n if txt[0:2] != 'ת':\n #print \"hundreds false, no taf\"\n return False\n elif len(txt) == 6:\n #rules: first and second letter can't be singles\n #first letter must be hundreds\n #second letter can be hundreds or tens\n #third letter must be singles\n for letter_count in range(3):\n letter_count *= 2\n for i in range(9):\n if inv_gematria[i+1]==txt[letter_count:2+letter_count]:\n if letter_count != 4:\n #\tprint \"3 length singles false\"\n return False\n if letter_count == 0:\n first_letter_is = \"singles\"\n if inv_gematria[(i+1)*10]==txt[letter_count:2+letter_count]:\n if letter_count == 0:\n #print \"3 length tens false, can't be first\"\n return False\n elif letter_count == 2:\n if first_letter_is != \"hundred\":\n #\tprint \"3 length tens false because first letter not 100s\"\n return False\n elif letter_count == 4:\n #print \"3 length tens false, can't be last\"\n return False\n for i in range(4):\n if inv_gematria[(i+1)*100]==txt[letter_count:2+letter_count]:\n if letter_count == 0:\n first_letter_is = \"hundred\"\n elif letter_count == 2:\n if txt[0:2] != 'ת':\n #print \"3 length hundreds false, no taf\"\n return False\n else:\n print(\"length of gematria is off\")\n print(txt)\n return False\n return True\n\n\nclass StructuredDocument:\n \"\"\"\n class for extracting specific parts (i.e. chapters) of a text file. Pieces that exist outside the structure (an intro\n for example) will be included, but they will not be as easily accessible as the chapters.\n \"\"\"\n\n def __init__(self, filepath, regex):\n with codecs.open(filepath, 'r', 'utf-8') as infile:\n lines = infile.readlines()\n\n sections, section_mapping = [], {}\n current_section, section_num, section_index = [], None, 0\n\n for line in lines:\n match = re.search(regex, line)\n if match:\n if len(current_section) > 0:\n sections.append(''.join(current_section))\n if section_num:\n section_mapping[section_num] = section_index\n section_index += 1\n current_section = []\n section_num = getGematria(match.group(1))\n\n current_section.append(line)\n else:\n sections.append(''.join(current_section))\n section_mapping[section_num] = section_index\n\n self._sections = sections\n self._section_mapping = section_mapping\n\n def get_section(self, section_number):\n section_index = self._section_mapping[section_number]\n return self._sections[section_index]\n\n def _set_section(self, section_number, new_section):\n section_index = self._section_mapping[section_number]\n self._sections[section_index] = new_section\n\n def edit_section(self, section_number, callback, *args, **kwargs):\n old_section = self.get_section(section_number)\n new_section = callback(old_section, *args, **kwargs)\n self._set_section(section_number, new_section)\n\n def get_whole_text(self):\n return ''.join(self._sections)\n\n def write_to_file(self, filename):\n with codecs.open(filename, 'w', 'utf-8') as outfile:\n outfile.write(self.get_whole_text())\n\n def get_chapter_values(self):\n return sorted(self._section_mapping.keys())\n\n\ndef getGematria(txt):\n if not isinstance(txt, str):\n txt = txt.decode('utf-8')\n index=0\n sum=0\n while index <= len(txt)-1:\n if txt[index:index+1] in gematria:\n sum += gematria[txt[index:index+1]]\n\n index+=1\n return sum\n\n\ndef he_ord(he_char):\n \"\"\"\n Get the order number for a hebrew character (א becomes 1, ת becomes 22). Sofi letters (i.e ך), return the same value\n as their regular\n :param he_char:\n :return:\n \"\"\"\n if len(he_char) != 1:\n raise AssertionError('Can only evaluate a single character')\n if re.search('[\\u05d0-\\u05ea]', he_char) is None:\n raise AssertionError('{} is not a Hebrew Character!'.format(he_char))\n return he_char_ord[he_char]\n\n\ndef he_num_to_char(num):\n assert 1 <= num <= 22\n return num_to_char_dict[num]\n\n\n\ndef numToHeb(engnum=\"\"):\n engnum = str(engnum)\n numdig = len(engnum)\n hebnum = \"\"\n letters = [[\"\" for i in range(3)] for j in range(10)]\n letters[0]=[\"\", \"א\", \"ב\", \"ג\", \"ד\", \"ה\", \"ו\", \"ז\", \"ח\", \"ט\"]\n letters[1]=[\"\", \"י\", \"כ\", \"ל\", \"מ\", \"נ\", \"ס\", \"ע\", \"פ\", \"צ\"]\n letters[2]=[\"\", \"ק\", \"ר\", \"ש\", \"ת\", \"תק\", \"תר\", \"תש\", \"תת\", \"תתק\"]\n if (numdig > 3):\n sub_engnum = int(engnum)-800\n if sub_engnum>400:\n raise KeyError\n return \"תת{}\".format(numToHeb(sub_engnum))\n for count in range(numdig):\n hebnum += letters[numdig-count-1][int(engnum[count])]\n hebnum = re.sub('יה', 'טו', hebnum)\n hebnum = re.sub('יו', 'טז', hebnum)\n # hebnum = hebnum.decode('utf-8')\n return hebnum\n\n\ndef multiple_replace(old_string, replacement_dictionary, using_regex = False):\n \"\"\"\n Use a dictionary to make multiple replacements to a single string\n\n :param old_string: String to which replacements will be made\n :param replacement_dictionary: a dictionary with keys being the substrings\n to be replaced, values what they should be replaced with.\n :param 'regex = True' uses re.sub rather then str.replace\n :return: String with replacements made.\n \"\"\"\n if using_regex:\n for keys, value in replacement_dictionary.items():\n old_string = re.sub(keys,value,old_string)\n else:\n for keys, value in replacement_dictionary.items():\n old_string = old_string.replace(keys, value)\n\n return old_string\n\n\ndef find_discrepancies(book_list, version_title, file_buffer, language, middle=False):\n \"\"\"\n Prints all cases in which the number of verses in a text version doesn't match the\n number in the canonical version.\n\n *** Only works for Tanach, can be modified to work for any level 2 text***\n\n :param book_list: list of books\n :param version_title: Version title to be examined\n :param file_buffer: Buffer for file to print results\n :param language: 'en' or 'he' accordingly\n :param middle: set to True to manually start scanning a book from the middle.\n If middle is set to True, user will be prompted for the beginning chapter.\n \"\"\"\n\n # loop through each book\n for book in book_list:\n\n # print book to give user update on progress\n print(book)\n book = book.replace(' ', '_')\n book = book.replace('\\n', '')\n\n if middle:\n\n print(\"Start {0} at chapter: \".format(book))\n start_chapter = eval(input())\n url = SEFARIA_SERVER + '/api/texts/' + book + '.' + \\\n str(start_chapter) + '/' + language + '/' + version_title\n\n else:\n url = SEFARIA_SERVER + '/api/texts/' + book + '.1/' + language + '/' + version_title\n\n\n try:\n # get first chapter in book\n response = urllib.request.urlopen(url)\n version_text = json.load(response)\n\n # loop through chapters\n chapters = Ref(book).all_subrefs()\n\n # check for correct number of chapters\n if len(chapters) != version_text['lengths'][0]:\n file_buffer.write('Chapter Problem in'+book+'\\n')\n\n for index, chapter in enumerate(chapters):\n\n # if starting in the middle skip to appropriate chapter\n if middle:\n if index+1 != start_chapter:\n continue\n\n else:\n # set middle back to false\n middle = False\n\n print(index+1)\n\n # get canonical number of verses\n canon = len(TextChunk(chapter, vtitle='Tanach with Text Only', lang='he').text)\n\n # get number of verses in version\n verses = len(version_text['text'])\n if verses != canon:\n file_buffer.write(chapter.normal() + '\\n')\n\n # get next chapter\n next_chapter = replace_using_regex(' \\d', version_text['next'], ' ', '.')\n next_chapter = next_chapter.replace(' ', '_')\n url = SEFARIA_SERVER+'/api/texts/'+next_chapter+'/'+language+'/'+version_title\n\n response = urllib.request.urlopen(url)\n version_text = json.load(response)\n\n except (URLError, HTTPError, KeyboardInterrupt, KeyError, ValueError) as e:\n print(e)\n print(url)\n file_buffer.close()\n sys.exit(1)\n\n\ndef jagged_array_to_file(output_file, jagged_array, section_names):\n \"\"\"\n Prints contents of a jagged array to a file. Recursive.\n :param output_file: File to write data.\n :param jagged_array: Multi dimensional array. Lowest level array should be strings.\n :param section_names: Names of segments to be printed in files (chapters, verse, siman, mishna etc.)\n Length must equal dimensions of jagged array.\n \"\"\"\n\n for index, item in enumerate(jagged_array):\n output_file.write('{} {}:\\n'.format(section_names[0], index+1))\n\n if type(item) is str or type(item) is str:\n output_file.write('{}\\n'.format(item))\n\n elif type(item) is list:\n jagged_array_to_file(output_file, item, section_names[1:])\n\n else:\n print('jagged array contains unknown type')\n output_file.close()\n raise TypeError\n\n\ndef ja_to_xml(ja, section_names, filename='output.xml'):\n \"\"\"\n Takes a jagged array and prints an xml file\n :param ja: list or nested list, bottom must be string or unicode\n :param section_names: list of names with which to identify sections (E.g. ['Chapter', 'Verse'])\n Length must match depth of of jagged_array\n :param filename: Name of file to output result.\n \"\"\"\n def build_xml(data, sections, parent):\n\n for index, item in enumerate(data):\n child = ET.SubElement(parent, sections[0], attrib={'index': str(index+1)})\n\n if isinstance(item, str):\n child.text = item\n\n elif isinstance(item, list):\n build_xml(item, sections[1:], parent=child)\n\n else:\n raise TypeError('Jagged Array contains unknown type')\n\n root = ET.Element('root')\n build_xml(ja, section_names, root)\n tree = ET.ElementTree(root)\n tree.write(filename, encoding=\"unicode\")\n\n\ndef file_to_ja(depth, infile, expressions, cleaner, grab_all=False):\n \"\"\"\n Designed to be the first stage of a reusable parsing tool. Adds lines of text to the Jagged\n Array in the desired structure (Chapter, verse, etc.)\n :param depth: depth of the JaggedArray.\n :param infile: Text file to read from\n :param expressions: A list of regular expressions with which to identify section (chapter) level. Do\n not include an expression with which to break up the segment levels.\n :param cleaner: A function that takes a list of strings and returns an array with the text parsed\n correctly. Should also break up and remove unnecessary tagging data.\n :param grab_all: If set to true, will grab the lines indicating new sections.\n :return: A jagged_array with the text properly structured.\n \"\"\"\n\n # instantiate ja\n structure = reduce(lambda x, y: [x], list(range(depth-1)), [])\n ja = jagged_array.JaggedArray(structure)\n\n # ensure there is a regex for every level except the lowest\n if depth - len(expressions) != 1:\n raise AttributeError('Not enough data to parse. Need {} expressions, '\n 'received {}'.format(depth-1, len(expressions)))\n\n # compile regexes, instantiate index list\n regexes, indices = [re.compile(ex) for ex in expressions], [-1]*len(expressions)\n temp = []\n\n # loop through file\n for line in infile:\n\n # check for matches to the regexes\n for i, reg in enumerate(regexes):\n\n if reg.search(line):\n # check that we've hit the first chapter and verse\n if indices.count(-1) == 0:\n ja.set_element(indices, cleaner(temp))\n temp = []\n\n if grab_all:\n temp.append(line)\n\n # increment index that's been hit, reset all subsequent indices\n indices[i] += 1\n indices[i+1:] = [-1 if x >= 0 else x for x in indices[i+1:]]\n break\n\n else:\n if indices.count(-1) == 0:\n temp.append(line)\n else:\n ja.set_element(indices, cleaner(temp))\n\n return ja\n\n\ndef file_to_ja_g(depth, infile, expressions, cleaner, gimatria=False, group_name='gim', grab_all=None):\n \"\"\"\n like file to ja but with changing the numbers to Gimatria\n Designed to be the first stage of a reusable parsing tool. Adds lines of text to the Jagged\n Array in the desired structure (Chapter, verse, etc.)\n :param depth: depth of the JaggedArray.\n :param infile: Text file to read from\n :param expressions: A list of regular expressions with which to identify section (chapter) level. Do\n not include an expression with which to break up the segment levels.\n :param cleaner: A function that takes a list of strings and returns an array with the text parsed\n correctly. Should also break up and remove unnecessary tagging data.\n :param grab_all: a boolean list according to the regexs, if True then grab all of that if False erase line\n the 5 is just above the 3 which is the deepest length we use for now.\n :param gimatria: if the text is presented with gimatria in it.\n :param group_name: a name given to the group of letters for the gimatria to actually use\n :return: A jagged_array with the text properly structured.\n \"\"\"\n\n if grab_all is None:\n grab_all = [False] * len(expressions)\n\n # instantiate ja\n structure = reduce(lambda x, y: [x], list(range(depth - 1)), [])\n ja = jagged_array.JaggedArray(structure)\n\n # ensure there is a regex for every level except the lowest\n if depth - len(expressions) != 1:\n raise AttributeError('Not enough data to parse. Need {} expressions, '\n 'received {}'.format(depth - 1, len(expressions)))\n\n # compile regexes, instantiate index list\n regexes, indices = [re.compile(ex) for ex in expressions], [-1] * len(expressions)\n temp = []\n\n # loop through file\n for line in infile:\n\n # check for matches to the regexes\n for i, reg in enumerate(regexes):\n found = reg.search(line)\n if found:\n\n if indices.count(-1) == 0:\n ja.set_element(indices, cleaner(temp), [])\n temp = []\n if grab_all[i]:\n temp.append(line)\n # increment index that's been hit, reset all subsequent indices\n if gimatria: # note: if you uncomment the top must make this elif\n gimt = getGematria(found.group('{}'.format(group_name)))\n if gimt != 0: # increment index that's been hit, reset all subsequent indices\n indices[i] = gimt - 1\n else:\n indices[i] += 1\n else:\n indices[i] += 1\n indices[i + 1:] = [-1 if x >= 0 else x for x in indices[i + 1:]]\n break\n\n else:\n if indices.count(-1) == 0:\n temp.append(line)\n else:\n ja.set_element(indices, cleaner(temp), [])\n\n return ja\n\ndef he_array_to_int(he_array):\n \"\"\"\n Takes an array of hebrew numbers (א,ב, י\"א...) and returns array of integers.\n :param he_array: Array of hebrew letters which represents numbers\n :return: Array of numbers\n \"\"\"\n\n numbers = []\n for he in he_array:\n numbers.append(getGematria(he.replace('\"', '')))\n return numbers\n\n\ndef replace_using_regex(regex, query, new):\n \"\"\"\n This is an enhancement of str.replace(). It will only call str.replace if the regex has\n been found, thus allowing replacement of tags that may serve multiple or ambiguous functions.\n Should there be a need, an endline parameter can be added which will be appended to the end of\n the string\n :param regex: A regular expression. Will be compiled locally.\n :param query: The input string to be examined.\n :param new: The text that will be inserted instead of 'old'.\n :return: A new string with 'old' replaced by 'new'.\n \"\"\"\n\n # compile regex and search\n reg = re.compile(regex)\n result = re.search(reg, query)\n if result:\n\n # get all instances of match\n matches = re.finditer(reg, query)\n for match in matches:\n temp = match.group()\n query = query.replace(temp, new)\n return query\n\n\ndef clean_jagged_array(messy_array, strip_list):\n \"\"\"\n Given a jagged array and a list of regexes, return a new jagged array with all cases in regex list\n striped out.\n :param messy_array: Jagged array to be cleaned\n :param strip_list: list of strings or regular expressions to be stripped from jagged array\n :return: New jagged array with all cases in strip_list removed.\n \"\"\"\n\n clean_array = []\n\n for item in messy_array:\n\n if type(item) is list:\n clean_array.append(clean_jagged_array(item, strip_list))\n\n elif type(item) is str or type(item) is str:\n\n for case in strip_list:\n item = re.sub(case, '', item)\n item = re.sub(' +', ' ', item)\n clean_array.append(item.lstrip(' '))\n\n else:\n print('Jagged array contains unknown type')\n raise TypeError\n\n return clean_array\n\n\ndef traverse_ja(ja, indices=None, bottom=str):\n \"\"\"\n A generator to move through a JaggedArray like structure, retrieving the indices of each element of\n the JA as you go.\n :param ja: JaggedArray like object to traverse\n :param indices: List of indices needed to locate the first element of the array. Leave empty if\n starting from the root.\n :param bottom: Data type at the bottom of the array. Used as a terminating condition.\n :yield: Dictionary with the keys indices and data, corresponding to the retrieved data and its\n corresponding address.\n \"\"\"\n if indices is None:\n indices = []\n\n if isinstance(ja, bottom):\n yield {'data': ja, 'indices': indices}\n\n else:\n for index, data in enumerate(ja):\n if index == 0:\n indices.append(index)\n else:\n indices[-1] = index\n if data:\n for thing in traverse_ja(data, indices[:], bottom):\n yield thing\n indices.pop()\n\n\ndef grab_section_names(section_expression, input_file, group_number=0):\n \"\"\"\n Grab the names of the sections that need to be converted into a complex text\n :param section_expression: An expression that can be compiled into a regex that will find\n the corresponding sections\n :param input_file: File from which to grab the results\n :param group_number: If needed, supply the capture group that will return the correct name.\n :return: List of strings.\n \"\"\"\n\n section_reg = re.compile(section_expression)\n names = []\n\n for line in input_file:\n\n found_match = section_reg.search(line)\n if found_match:\n names.append(found_match.group(group_number))\n\n return names\n\n\ndef simple_to_complex(segment_names, jagged_text_array):\n \"\"\"\n Given a simple text and the names of each section, convert a simple text into a complex one.\n :param segment_names: A list of names for each section\n :param jagged_text_array: A parsed jagged array to be converted from a simple to a complex text\n :return: Dictionary representing the complex text structure\n \"\"\"\n\n # Ensure there are the correct number of segment names\n if len(segment_names) != len(jagged_text_array):\n raise IndexError('Length of segment_names does not match length of jaggedArray')\n\n complex_text = {}\n\n for index, name in enumerate(segment_names):\n complex_text[name] = jagged_text_array[index]\n\n return complex_text\n\n\ndef convert_dict_to_array(dictionary, default_value=list):\n assert all([isinstance(item, int) for item in list(dictionary.keys())])\n assert callable(default_value)\n\n output_list = list()\n dictionary = defaultdict(default_value, dictionary)\n for key in range(max(dictionary.keys()) + 1):\n output_list.append(dictionary[key])\n return output_list\n\n\ndef restructure_file(filename, function, *args):\n \"\"\"\n Restructures a file according to function\n :param filename:\n :param function:\n :param args:\n \"\"\"\n original = codecs.open(filename, 'r', 'utf-8')\n updated = codecs.open('{}.tmp'.format(filename), 'w', 'utf-8')\n\n for line in original:\n new_line = function(line, *args)\n updated.write(new_line)\n\n original.close()\n updated.close()\n\n os.remove(filename)\n os.rename('{}.tmp'.format(filename), filename)\n\n\nclass ToratEmetData:\n \"\"\"\n Base class for parsing HTML downloaded from Torat Emet. Strategy is to iterate through the data line\n by line, identifying lines that contain important data. These lines can then be fed through an html\n parser (such as Beautiful soup) for cleanup and identification, and then ultimately structured into\n a proper jagged array or dictionary of jagged arrays.\n \"\"\"\n\n def __init__(self, path, from_url=False, codec='cp1255'):\n \"\"\"\n\n :param path: Path to file or url\n :param from_url: Set to True if data must be downloaded from url\n :param codec:\n \"\"\"\n self._path = path\n self._from_url = from_url\n self._codec = codec\n self.lines = self._get_lines()\n self._important_lines = self._extract_important_data()\n self.parsed_text = self._parse()\n\n def _get_lines(self):\n\n if self._from_url:\n lines = []\n for line in urllib.request.urlopen(self._path).readlines():\n lines.append(line.decode(self._codec))\n return lines\n\n else:\n with codecs.open(self._path, 'r', self._codec) as infile:\n return infile.readlines()\n\n def _extract_important_data(self):\n raise NotImplementedError\n\n @staticmethod\n def build_segments(section):\n\n comments = []\n\n bold = re.compile('<b>')\n if not bold.search(section):\n return [section]\n matches = bold.finditer(section)\n start = next(matches)\n\n for next_match in matches:\n comments.append(section[start.start(): next_match.start()])\n start = next_match\n else:\n comments.append(section[start.start():])\n return comments\n\n def _parse(self):\n\n book = {}\n for line in self._important_lines:\n chapter, verse = line['chapter'], line['verse']\n\n if chapter not in list(book.keys()):\n book[chapter] = {}\n\n book[chapter][verse] = self.build_segments(line['text'])\n\n for key in list(book.keys()):\n book[key] = convert_dict_to_array(book[key])\n\n book = convert_dict_to_array(book)\n return book\n\n\ndef get_cards_from_trello(list_name, board_json):\n \"\"\"\n Trello can export a board as a JSON object. Use this function to grab the names of all the cards that\n belong to a certain list on the board.\n :param list_name: Name of the list that holds the cards of interest\n :param board_json: The exported JSON file from trello that relates to the board of interest\n :return: A list of all the cards on the specified Trello list.\n \"\"\"\n\n board = json.loads(board_json.read())\n\n list_id = ''\n for column in board['lists']:\n if column['name'] == list_name:\n list_id = column['id']\n\n cards = []\n for card in board['cards']:\n if card['idList'] == list_id:\n cards.append(card['name'])\n\n return cards\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls._instances.get(cls) is None:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\ndef set_ranges_between_refs(refs, section_ref):\n '''\n :refs: an unsorted list of segments such as [Ref(Rashi on Genesis 2:11), Ref(Rashi on Genesis 2:4), Ref(Rashi on Genesis 2:10)]\n where all refs have the same section\n :section_ref: the section reference for the list of refs, in this case Ref(Rashi on Genesis 2)\n :return: sorted list of ranged refs where the i-th element is a range from itself to the i+1-th element.\n The last ref in the list is a range from itself to the final segment in the section, which for Rashi on Genesis 2 is 25.\n In this case:\n [Ref(Rashi on Genesis 2:4-9), Ref(Rashi on Genesis 2:10), Ref(Rashi on Genesis 2:11-25)]\n If an empty list is passed as refs, we simply return a list with one range over the entire section, such as:\n [Ref(Rashi on Genesis 2:1-25)]\n '''\n if refs == []:\n first_ref = section_ref.subref(1)\n return [first_ref.to(section_ref.all_segment_refs()[-1])]\n\n\n ranged_refs = []\n len_list = len(refs)\n refs = sorted(refs, key=lambda x: x.order_id())\n last_ref = section_ref.all_segment_refs()[-1]\n #print \"Refs: {}\".format(refs)\n #print \"Section: {}\".format(section_ref)\n #print \"Last ref: {}\".format(last_ref)\n for i, ref in enumerate(refs):\n if ref.is_range():\n ranged_refs.append(ref)\n continue\n assert ref.section_ref() is section_ref\n if i + 1 == len_list:\n new_range = ref.to(last_ref)\n else:\n next_ref = refs[i+1]\n if next_ref.sections[-1] == ref.sections[-1]:\n ranged_refs.append(ref)\n continue\n else:\n d = next_ref._core_dict()\n d['sections'][-1] -= 1\n d['toSections'][-1] -= 1\n new_range = ref.to(Ref(_obj=d))\n ranged_refs.append(new_range)\n return ranged_refs\n\n\nclass PlaceHolder(object):\n \"\"\"\n Useful for holding on to a variable without having to declare a name for them. Particulalrly useful for running a\n method and then using the result only if the method was successful.\n\n Example (uses re module)\n my_search = re.search(some_pattern, some_string)\n if my_search:\n print my_search.group()\n Becomes:\n holder = PlaceHolder()\n if holder(re.search(some_pattern, some_string)):\n print holder.group()\n\n \"\"\"\n def __init__(self):\n self._obj_store = None\n\n def __call__(self, _obj):\n self._obj_store = _obj\n return _obj\n\n def __getattr__(self, item):\n return getattr(self._obj_store, item)\n\n def get_stored_item(self):\n return self._obj_store\n\n\ndef clean_whitespace(some_string):\n \"\"\"\n Remove whitespace from beginning and end of string, as well as multiple spaces\n :param basestring some_string:\n :return:\n \"\"\"\n return ' '.join(some_string.split())\n\n\ndef split_version(version_dict, num_splits):\n \"\"\"\n Useful when a single version is larger than the max document size in mongodb (16MB). Breaks a version up, adding\n `Vol n`, where n = 1,2,3...<num_splits>.\n :param version_dict: Version dictionary, as would be uploaded to Sefaria without being split\n :param num_splits: Number of times to break up version (2 will give 2 version objects).\n :return: list of version objects\n \"\"\"\n def edges(size):\n chunk_length = float(size) / float(num_splits)\n chunk_indices = [math.trunc(chunk_length * i) for i in range(num_splits + 1)]\n return list(zip(chunk_indices[:-1], chunk_indices[1:]))\n\n volumes = []\n indices = edges(len(version_dict['text']))\n for vol_num, (start, end) in enumerate(indices, 1):\n new_fields = {\n 'versionTitle': '{}, Vol {}'.format(version_dict['versionTitle'], vol_num),\n 'text': [t if start <= ind < end else [] for ind, t in enumerate(version_dict['text'])]\n }\n new_version = version_dict.copy()\n new_version.update(new_fields)\n volumes.append(new_version)\n return volumes\n\n\ndef split_list(list_to_split, num_splits):\n chunk_length = float(len(list_to_split)) / num_splits\n indices = [math.trunc(chunk_length * i) for i in range(num_splits + 1)]\n for start, end in zip(indices[:-1], indices[1:]):\n yield list_to_split[start:end]\n\n\ndef schema_with_default(simple_ja):\n \"\"\"\n Take a standard JaggedArrayNode and makes it a default child of a SchemaNode.\n :param JaggedArrayNode simple_ja:\n :return: SchemaNode\n \"\"\"\n root_node = SchemaNode()\n root_node.title_group = simple_ja.title_group\n root_node.key = simple_ja.key\n simple_ja.title_group = TitleGroup()\n simple_ja.key = \"default\"\n simple_ja.default = True\n root_node.append(simple_ja)\n root_node.validate()\n return root_node\n\ndef change_array(ja, callback):\n \"\"\"\n Given a function(str) and a jagged array, returns a new jagged array after running the function on all elements.\n :param ja: a jagged array to be changed\n :param callback: a function run on substring\n :return: new jagged array with all elements changed by the functions\n \"\"\"\n\n new_array = []\n\n for item in ja:\n\n if isinstance(item, list):\n new_array.append(change_array(item, callback))\n\n elif isinstance(item, str):\n new_array.append(callback(item))\n\n else:\n print('Jagged array contains unknown type')\n raise TypeError\n\n return new_array\n\n\ndef get_window_around_match(start_char:int, end_char:int, text:str, window:int=10) -> tuple:\n before_window, after_window = '', ''\n\n before_text = text[:start_char]\n before_window_words = list(filter(lambda x: len(x) > 0, before_text.split()))[-window:]\n before_window = \" \".join(before_window_words)\n\n after_text = text[end_char:]\n after_window_words = list(filter(lambda x: len(x) > 0, after_text.split()))[:window]\n after_window = \" \".join(after_window_words)\n\n return before_window, after_window\n\ndef is_abbr_of(abbr, unabbr, match=lambda x, y: x.startswith(y), lang='he'):\n abbr = re.sub('[^א-ת]', '', abbr) if lang == 'he' else re.sub('[^a-z]', '', abbr)\n unabbr = unabbr.split()\n indexes = [[index for index, letter in enumerate(abbr) if word[0] == letter] for w, word in enumerate(unabbr)]\n choices = itertools.product(*indexes)\n for choi in choices:\n if choi[0] == 0 and all(i < j for i, j in zip(choi, choi[1:])):\n choi += (None,)\n if all(match(unabbr[n], abbr[choi[n]:choi[n+1]]) for n in range(len(unabbr))):\n return True\n return False\n","sub_path":"parsing_utilities/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":36560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"182860130","text":"import docx\n\ndoc = docx.Document(\"df.docx\")\nfullText = []\nfor para in doc.paragraphs:\n fullText.append(para.text)\nprint('\\n'.join(fullText))\n\ntables = doc.tables\nprev_cell = None\nfor table in tables:\n for row in table.rows:\n for cell in row.cells:\n if prev_cell is None or cell._tc != prev_cell._tc:\n for paragraph in cell.paragraphs:\n print(paragraph.text)\n prev_cell = cell\n","sub_path":"nlp/audit/docdemo.py","file_name":"docdemo.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137212596","text":"from multiprocessing import Process, Queue\nimport numpy as np\nimport time, math, os, argparse\n# from gpuinfo import GPUInfo\n\n\nclass_list = [\"Birds\", \"Empty\", \"Fox\", \"Humans\", \"Rodents\"]\nNUM_IMAGE = 10\nINF_DIR = \"/racelab/SantaCruzIsland_Labeled_5Class/Birds\"\nMODEL_DIR = '/racelab/checkpoints/resnet50_model.h5'\nWIDTH = 1920\nHEIGHT = 1080\n\nclass Scheduler:\n def __init__(self, gpu_num):\n self._queue = Queue()\n self._gpu_num = gpu_num\n self.__init_workers()\n\n def __init_workers(self):\n self._workers = list()\n for gpuid in range (self._gpu_num):\n self._workers.append(Worker(gpuid, self._queue))\n\n def start(self, image_list):\n\n for img in image_list:\n self._queue.put(img)\n\n # Add a None to indicate the end of queue\n self._queue.put(None)\n\n for worker in self._workers:\n worker.start()\n\n for worker in self._workers:\n worker.join()\n print (\"All image are done inferencing...\")\n\nclass Worker(Process):\n def __init__(self, gpuid, queue):\n Process.__init__(self, name=\"ModelProcessor\")\n self._gpuid = gpuid\n self._queue = queue\n \n def run(self):\n #set enviornment\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(self._gpuid)\n\n from tensorflow.keras.applications.resnet50 import preprocess_input\n from tensorflow.keras.preprocessing import image\n from tensorflow.keras.models import load_model\n trained_model = load_model(MODEL_DIR)\n \n while True:\n img_path = self._queue.get()\n if img_path == None:\n self._queue.put(None)\n break\n img = image.load_img(path=img_path, target_size=(1920, 1080))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n y_prob = trained_model.predict(x)\n index = y_prob.argmax()\n print (\"image : {0}, index : {1}\".format(img_path, index))\n \n print(\"GPU {} has done inferencing...\".format(self._gpuid))\n\ndef run_sequential(image_list):\n from tensorflow.keras.applications.resnet50 import preprocess_input\n from tensorflow.keras.preprocessing import image\n from tensorflow.keras.models import load_model\n\n trained_model = load_model('/racelab/checkpoints/resnet50_model.h5')\n for img_path in image_list:\n img = image.load_img(path=img_path, target_size=(1920, 1080))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n y_prob = trained_model.predict(x)\n index = y_prob.argmax()\n print (\"image : {0}, index : {1}\".format(img_path, index))\n \n\ndef handler(event, context): \n\n if isinstance(event['data'], dict) and \"num_image\" in event['data']:\n global NUM_IMAGE\n NUM_IMAGE = int(event['data']['num_image'])\n \n # Get GPU counts\n NUM_GPU = 0\n # available_devices = GPUInfo.check_empty()\n # if available_devices != None:\n # NUM_GPU = len(available_devices)\n # print (\"Current GPU num is {0}\".format(NUM_GPU))\n \n counter = 0\n image_list = list()\n for img in os.listdir(INF_DIR):\n image_list.append(os.path.join(INF_DIR, img))\n counter += 1\n if counter == NUM_IMAGE:\n break\n \n start = time.time()\n\n if NUM_GPU == 0:\n run_sequential(image_list)\n else:\n # initialize Scheduler\n scheduler = Scheduler(NUM_GPU)\n # start multiprocessing\n scheduler.start(image_list)\n \n end = time.time()\n # print (\"Time with model loading {0} for {1} images.\".format(end - start, NUM_IMAGE))\n return (\"Time with model loading {0} for {1} images.\".format(end - start, NUM_IMAGE))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"num_image\")\n args = parser.parse_args()\n handler({\"data\" : {\"num_image\" : args.num_image}}, {})","sub_path":"apps/image-clf-inf37.py","file_name":"image-clf-inf37.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433582225","text":"\n# 122.买卖股票的最佳时机II\n# 给定一个数组,它的第i个元素是一支给定股票第i天的价格。\n# 设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。\n# 注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n#\n# 示例\n# 1:\n# 输入: [7, 1, 5, 3, 6, 4]\n# 输出: 7\n# 解释: 在第2天(股票价格 = 1)的时候买入,在第3天(股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5 - 1 = 4 。\n# 随后,在第4天(股票价格 = 3)的时候买入,在第5天(股票价格 = 6)的时候卖出, 这笔交易所能获得利润 = 6 - 3 = 3 。\n\n\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if not prices:\n return 0\n n = len(prices)\n dp = [[0] * 2 for _ in range(n)]\n dp[0][0] = 0\n dp[0][1] = -prices[0]\n\n for i in range(1, n):\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1] + prices[i])\n dp[i][1] = max(dp[i - 1][1], dp[i - 1][0] - prices[i])\n\n return dp[n - 1][0]\n\n","sub_path":"Week_06/solution122_maxProfit.py","file_name":"solution122_maxProfit.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"194692714","text":"# -*- coding: utf-8 -*-\nimport numpy\nimport binascii\nfrom utils.XY2Res import *\nfrom Settings import XY2_PATH\n\n\nclass Mask:\n def __init__(self, x, y, w, h, size=0):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.size = size\n self.data = None\n self.raw = None\n\n def import_data(self, data):\n self.data = data\n\n def import_raw(self, data):\n self.raw = data\n\n\nclass MapX:\n def __init__(self, path):\n self.map_id = \"\"\n self.map_type = 0 # 地图类型,1旧地图,2新地图\n self.unit_offset = [] # 地图各单元索引\n self.pic = [] # 各单元解压缩后的图片RGB字节\n self.mask = [] # Mask数据\n self.mask_num = 0 # Mask数量\n self.mask_offset = [] # 新地图中Mask的索引\n self.block = [] #\n self.cell = None # 地图规则\n self.brig = [] # 光亮规则\n self.hand = None # 地图文件File\n self.map_width = 0 # 地图宽\n self.map_height = 0 # 地图高\n self.unit_width = 320 # 单元宽\n self.unit_height = 240 # 单元高\n self.col = 0 # 列数,即每行有多少个单元\n self.row = 0 # 行数,即每列有多少个单元\n self.n = 0 # 单元总数\n self.map_size = 0 # 地图大小\n self.jpg_head = bytes() # 旧地图中JPEG头\n self.coordinate = (0, 0) # 地图中游戏坐标最大值\n self.open(XY2_PATH + path)\n\n def open(self, path):\n \"\"\"\n 打开地图文件,记录地图单元索引数据\n :param path:\n :return:\n \"\"\"\n try:\n self.hand = open(path, 'rb')\n except FileNotFoundError:\n raise Exception(\"找不到地图\")\n else:\n self.map_id = path.split(\"/\")[-1] # 记录Map编号\n map_sign = self.read_bytes_to_hex_list(4) # map标识\n if map_sign == ['30', '2e', '31', '4d']: # 大话2新地图\n self.map_type = 2\n elif map_sign == ['58', '50', '41', '4d']: # 大话2旧地图\n self.map_type = 1\n else:\n raise Exception(\"地图类型错误\")\n self.map_width = self.read_bytes_to_int(4) # 地图总宽\n self.map_height = self.read_bytes_to_int(4) # 地图总高\n self.coordinate = (self.map_width//20, self.map_height//20) # 游戏坐标与像素的比例是1:20\n temp_col = self.map_width / self.unit_width\n self.col = int(temp_col) if temp_col == int(temp_col) else int(temp_col) + 1 # 地图列数\n temp_row = self.map_height / self.unit_height\n self.row = int(temp_row) if temp_row == int(temp_row) else int(temp_row) + 1 # 地图行数\n self.n = self.col * self.row # 地图单元Unit数\n\n self.unit_offset = [self.read_bytes_to_int(4) for _ in range(self.n)] # 地图单元Unit偏移\n self.pic = [None for _ in range(self.n)] # 初始化各单元图片列表\n self.mask = [[] for _ in range(self.n)] # 初始化各单元mask列表\n self.cell = numpy.zeros((self.map_height // 20, self.map_width // 20))\n\n if self.map_type == 1: # 大话2旧地图\n self.map_size = self.read_bytes_to_int(4) # 地图文件大小\n if self.read_bytes_to_hex_list(4) == ['48', '47', '50', '4a']: # H G P J\n size = self.read_bytes_to_int(4)\n self.jpg_head = self.hand.read(size)\n elif self.map_type == 2: # 大话2新地图\n self.hand.seek(4, 1) # 跳过4字节\n self.mask_num = self.read_bytes_to_int(4)\n for _ in range(self.mask_num):\n self.mask_offset.append(self.read_bytes_to_int(4))\n self.read_masks_in_new_map()\n\n def read_unit(self, unit_num):\n \"\"\"\n 读取地图特定单元\n :param unit_num: 地图单元索引号\n :return:\n \"\"\"\n file_pos = self.unit_offset[unit_num]\n self.hand.seek(file_pos) # 寻址\n unit_head_size = self.read_bytes_to_int(4) # unit头\n if self.map_type == 2: # 如果是新地图,需要跳过unit头\n if unit_head_size != 0:\n self.hand.seek(unit_head_size * 4, 1) # 跳过unit头\n masks = []\n loop = True\n while loop:\n try:\n _sign = self.read_bytes_to_hex_list(4)\n _size = self.read_bytes_to_int(4)\n if _sign == ['47', '45', '50', '4a']: # G E P J\n self.read_jpeg(_sign, _size, unit_num) # 读取jpeg数据\n elif _sign == ['47', '41', '4d', '49']: # G A M I image\n pass\n elif _sign == ['32', '53', '41', '4d']: # 2 S A M mask\n res = self.read_mask(_sign, _size, unit_num)\n masks.append(res) # 读取旧地图mask数据\n elif _sign == ['4b', '53', '41', '4d']: # K S A M mask\n res = self.read_mask(_sign, _size, unit_num)\n masks.append(res) # 读取旧地图mask数据\n elif _sign == ['4b', '4f', '4c', '42']: # K O L B block\n self.hand.seek(_size, 1) # 不读取block数据,没用\n elif _sign == ['4c', '4c', '45', '43']: # L L E C cell\n self.hand.seek(_size, 1) # 不读取cell,因为前面已经读过了\n elif _sign == ['47', '49', '52', '42']: # G I R B brig\n self.hand.seek(_size, 1) # 光照阴影数据,先不读取\n elif _sign == ['20c', '44', '4e', '45']:\n loop = False\n else:\n loop = False\n except ValueError:\n break\n if self.map_type == 1: # 旧地图\n self.mask[unit_num] = masks # 存储本单元mask数据\n elif self.map_type == 2: # 如果是新地图\n for compressed_mask in self.mask[unit_num]: # 该unit里的mask已经被预读到self.mask[unit_num]中了\n align_w = compressed_mask.w // 4 + (compressed_mask.w % 4 != 0)\n out_size = align_w * compressed_mask.h\n if not compressed_mask.data:\n _data = decompress_mask(compressed_mask.raw, out_size) # 解压mask数据\n compressed_mask.import_data(_data) # 更新成解压后的数据\n del compressed_mask.raw\n\n def read_jpeg(self, sign, size, unit_num):\n \"\"\"\n 读取单元内jpeg数据\n :param sign: JPEG类型检查\n :param size: 数据大小\n :param unit_num: 地图单元索引\n :return:\n \"\"\"\n if sign == ['47', '45', '50', '4a']: # G E P J\n if self.map_type == 1: # 旧地图\n pic = self.hand.read(size) # JPEG数据\n self.pic[unit_num] = read_old_map_to_rgb(self.jpg_head + pic + b\"\\xff\\xd9\")\n elif self.map_type == 2:\n pic = self.repair_new_jpg(self.read_bytes_to_hex_list(size)) # 修复为完整JPEG\n self.pic[unit_num] = self.hex_list_to_bytes(pic)\n\n def read_mask(self, sign, size, unit_num):\n \"\"\"\n 旧地图MASK读取方法。因为旧地图每个unit都会包含本unit里的的mask,因此是完全按需读取。\n :param sign:\n :param size:\n :param unit_num: 需要unit_num来确定mask的map像素位置\n :return: MASK实例\n \"\"\"\n if sign == ['4b', '53', '41', '4d'] or sign == ['32', '53', '41', '4d']: # K S A M or 2 S A M\n x = self.read_bytes_to_int(4)\n y = self.read_bytes_to_int(4)\n w = self.read_bytes_to_int(4) # mask宽\n h = self.read_bytes_to_int(4) # mask高\n row = unit_num // self.col\n col = unit_num % self.col\n x_of_map = col * 320 + x # mask关键点x\n y_of_map = row * 240 + y # mask关键点y\n data = self.hand.read(size - 16) # 读取MASK压缩数据,大小需要减去4 * 4\n align_w = w // 4 + (w % 4 != 0) # 每字节包含4个像素,需要对齐\n out_size = align_w * h\n _bytes = decompress_mask(data, out_size) # 解压mask数据\n mask = Mask(x_of_map, y_of_map, w, h)\n mask.import_data(_bytes)\n return mask\n\n def read_brig(self, sign, size):\n if sign == ['47', '49', '52', '42']: # G I R B brig\n brig = self.read_bytes_to_hex_list(size)\n self.brig.append(brig)\n\n def read_all_cells(self):\n for unit_num in range(self.n):\n pos = self.unit_offset[unit_num]\n self.hand.seek(pos) # 寻址\n unit_head_size = self.read_bytes_to_int(4) # unit头\n if self.map_type == 2: # 如果是新地图,需要跳过unit头\n if unit_head_size != 0:\n self.hand.seek(unit_head_size * 4, 1) # 跳过unit头\n row = unit_num // self.col # 行号\n col = unit_num % self.col # 列号\n while True:\n _sign = self.read_bytes_to_hex_list(4)\n _size = self.read_bytes_to_int(4)\n if _sign == ['4c', '4c', '45', '43']: # L L E C cell\n cell_list = self.read_bytes_to_int_list(_size)\n i = j = 0\n for one in cell_list:\n if one == 1:\n self.cell[row * 12 + i, col * 16 + j] = 1\n j += 1\n if j >= 16:\n j = 0\n i += 1\n break\n elif _sign == [\"00\", \"00\", \"00\", \"00\"]:\n break\n else:\n self.hand.seek(_size, 1)\n\n def read_masks_in_new_map(self):\n for pos in self.mask_offset:\n self.hand.seek(pos) # 寻址\n x = self.read_bytes_to_int(4)\n y = self.read_bytes_to_int(4)\n w = self.read_bytes_to_int(4)\n h = self.read_bytes_to_int(4)\n size = self.read_bytes_to_int(4)\n data = self.hand.read(size)\n _mask = Mask(x, y, w, h)\n _mask.import_raw(data)\n\n i = y // 240 - (y % 240 == 0)\n j = x // 320 - (x % 320 == 0)\n ii = (y + h) // 240 - ((y + h) % 240 == 0)\n jj = (x + w) // 320 - ((x + w) % 320 == 0)\n for _i in range(i, ii+1):\n for _j in range(j, jj+1):\n unit_num = _i * self.col + _j\n self.mask[unit_num].append(_mask)\n\n def read_bytes_to_int_list(self, size):\n return [x for x in self.hand.read(size)]\n\n def read_bytes_to_hex_list(self, size):\n hex_bit = binascii.hexlify(self.hand.read(size)).decode(\"utf-8\")\n return [hex_bit[i:i+2] for i in range(0, len(hex_bit), 2)]\n\n def read_bytes_to_int(self, size):\n return int.from_bytes(self.hand.read(size), byteorder=\"little\", signed=True)\n\n @staticmethod\n def repair_new_jpg(pic):\n i = 0\n while i < len(pic):\n if pic[i:i + 2] == ['ff', 'd8']: # 过\n i += 2\n elif pic[i:i + 2] == ['ff', 'a0']: # 删除第3、4个字节 FFA0\n del pic[i:i + 2]\n elif pic[i:i + 2] == ['ff', 'c0']: # 过\n i += 2\n i += int(pic[i] + pic[i + 1], 16)\n elif pic[i:i + 2] == ['ff', 'c4']: # 过\n i += 2\n i += int(pic[i] + pic[i + 1], 16)\n elif pic[i:i + 2] == ['ff', 'db']: # 过\n i += 2\n i += int(pic[i] + pic[i + 1], 16)\n elif pic[i:i + 2] == ['ff', 'da']: #\n i += 2\n da_len = int(pic[i] + pic[i + 1], 16)\n pic[i:i + 2] = ['00', '0c'] # 修改FF DA的长度00 09 为 00 0C\n i += da_len\n for x in [\"00\", \"3f\", \"00\"]: # 在FF DA数据的最后添加00 3F 00\n pic.insert(i, x)\n i += 1\n while i < len(pic): # 替换FF DA到FF D9之间的FF数据为FF 00\n if pic[i] == \"ff\":\n pic.insert(i + 1, \"00\")\n i += 1\n i += 1\n pic = pic[:-2] + [\"d9\"] # 但结束标志ff d9 不能改 // 这里多了一个字节,所以减去。\n else:\n break\n return pic\n\n @staticmethod\n def hex_list_to_bytes(_list):\n _bytes = bytes()\n for i in _list:\n _bytes += int(i, 16).to_bytes(length=1, byteorder='big')\n return _bytes\n\n\nsimple_old_map = MapX(\"scene/0001.map\")\n","sub_path":"utils/MAPX.py","file_name":"MAPX.py","file_ext":"py","file_size_in_byte":12870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102780028","text":"import numpy as np\nimport openjij\nfrom openjij.sampler import measure_time\nfrom openjij.sampler import SQASampler\nfrom openjij.utils.decorator import deprecated_alias\nimport cxxjij\n\n\nclass CSQASampler(SQASampler):\n def __init__(self,\n beta=5.0, gamma=1.0,\n num_sweeps=1000, schedule=None,\n num_reads=1):\n\n self.beta = beta\n self.gamma = gamma\n self.num_reads = num_reads\n self.num_sweeps = num_sweeps\n self.schedule = schedule\n self.energy_bias = 0.0\n self._schedule_setting = {\n 'beta': beta,\n 'gamma': gamma,\n 'num_sweeps': num_sweeps,\n 'num_reads': num_reads,\n }\n\n def _get_result(self, system, model):\n info = {}\n info['spin_config'] = system.spin_config\n\n state = cxxjij.result.get_solution(system)\n\n return state, info\n\n def sample_ising(self, h, J,\n beta=None, gamma=None,\n num_sweeps=None, schedule=None,\n num_reads=1,\n initial_state=None, updater='swendsenwang',\n reinitialize_state=True, seed=None, **kwargs):\n\n bqm = openjij.BinaryQuadraticModel(\n linear=h, quadratic=J, var_type='SPIN'\n )\n\n ising_graph = bqm.get_cxxjij_ising_graph()\n\n self._setting_overwrite(\n beta=beta, gamma=gamma,\n num_sweeps=num_sweeps, num_reads=num_reads\n )\n self._annealing_schedule_setting(\n bqm, beta, gamma, num_sweeps, schedule)\n\n # make init state generator --------------------------------\n if initial_state is None:\n def init_generator():\n n = len(bqm.indices)\n init_num_cut = 10\n c_spins = ising_graph.gen_spin()\n _cut = np.random.uniform(0, 1, (n, init_num_cut))\n spin_config = [[\n (t, s**(_ti+1))\n for _ti, t in enumerate(np.sort(_cut[i]))]\n for i, s in enumerate(c_spins)]\n return spin_config\n else:\n def init_generator(): return initial_state\n # -------------------------------- make init state generator\n\n # choose updater -------------------------------------------\n sqa_system = cxxjij.system.ContinuousTimeIsing_Dense(\n init_generator(), ising_graph, self.gamma\n )\n _updater_name = updater.lower().replace('_', '').replace(' ', '')\n if _updater_name == 'swendsenwang':\n algorithm = cxxjij.algorithm.Algorithm_ContinuousTimeSwendsenWang_run\n else:\n raise ValueError('updater is one of \"swendsen wang\"')\n # ------------------------------------------- choose updater\n\n response = self._cxxjij_sampling(\n bqm, init_generator,\n algorithm, sqa_system,\n reinitialize_state, seed, **kwargs\n )\n\n response.info['schedule'] = self.schedule_info\n\n return response\n","sub_path":"openjij/sampler/csqa_sampler.py","file_name":"csqa_sampler.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146088846","text":"from pprint import pprint\n\nfrom common.MaltegoTransform import *\n\n__author__ = 'Marc Gurreri'\n__copyright__ = 'Copyright 2018, msploitego Project'\n__credits__ = []\n__license__ = 'GPLv3'\n__version__ = '0.1'\n__maintainer__ = 'Marc Gurreri'\n__email__ = 'me@me.com'\n__status__ = 'Development'\n\ndef dotransform(args):\n mt = MaltegoTransform()\n # mt.debug(pprint(args))\n mt.parseArguments(args)\n ip = mt.getVar(\"ip\")\n port = mt.getVar(\"port\")\n hostid = mt.getVar(\"hostid\")\n dir = mt.getValue()\n\n website = mt.addEntity(\"maltego.URL\", \"http://{}:{}{}\".format(ip,port,dir))\n website.setValue(\"http://{}:{}{}\".format(ip,port,dir))\n website.addAdditionalFields(\"dir\", \"Directory\", False, dir)\n website.addAdditionalFields(\"url\", \"URL\", False, \"http://{}:{}{}\".format(ip,port,dir))\n website.addAdditionalFields(\"ip\", \"IP Address\", False, ip)\n website.addAdditionalFields(\"port\", \"Port\", False, port)\n mt.returnOutput()\n mt.addUIMessage(\"completed!\")\n\ndotransform(sys.argv)\n# args = ['tourl.py', '/xmlrpc.php', 'directory.name=/xmlrpc.php#port=80#ip=10.11.1.50']\n# dotransform(args)\n","sub_path":"msploitego/src/msploitego/transforms/tourl.py","file_name":"tourl.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"533645368","text":"import re\nimport unicodedata\nimport pandas as pd\nimport nltk\nimport prepare as prep\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n\ndef counts_and_ratios(df, column):\n '''\n Description:\n -----------\n This function takes in a columns name and creates a dataframe with value counts and\n percentages of the all categories within the column.\n \n Parameters:\n ----------\n df: Dataframe\n Dataframe being explored\n column: str\n Columns should be a categorical or binary column.\n '''\n labels = pd.concat([df[column].value_counts(),\n df[column].value_counts(normalize=True)], axis=1)\n labels.columns = ['n', 'pct']\n \n return labels\n\n\n\n\ndef create_wordcloud(string):\n # generates an img\n img = WordCloud(background_color='white').generate(string)\n # WordCloud() produces an image object, which can be displayed with plt.imshow\n plt.imshow(img)\n # axis aren't very useful for a word cloud\n plt.axis('off')\n\n\n\n\ndef compare_word_counts(df, text_col, cat_col, group1, group2, n=6):\n '''\n \n '''\n \n df['lem_text'] = [prep.clean_lem_stop(string) for string in df[text_col]]\n \n \n group1_df = df[df[cat_col] == group1]\n group2_df = df[df[cat_col] == group2]\n \n group1_string = ' '.join(group1_df.lem_text)\n group2_string = ' '.join(group2_df.lem_text)\n all_string = group1_string + group2_string\n \n \n group1_freq = pd.Series(group1_string.split()).value_counts()\n group2_freq = pd.Series(group2_string.split()).value_counts()\n all_freq = pd.Series(all_string.split()).value_counts()\n \n \n word_counts = (pd.concat([all_freq, group1_freq, group2_freq], axis=1, sort=True)\n .set_axis(['all', group1, group2], axis=1, inplace=False)\n .fillna(0))\n# .apply(lambda s: s.astype(int)\n\n # find words unique to each group\n unique_words = pd.concat([word_counts[word_counts[group1] == 0].sort_values(by=group1).tail(n),\n word_counts[word_counts[group2] == 0].sort_values(by=group2).tail(n)])\n \n \n return word_counts, unique_words\n\n\n\ndef proportion_graph(word_counts, group1, group2, n=20):\n \n var1 = str('p_' + group1)\n var2 = str('p_' + group2)\n (word_counts\n .assign(var1 = word_counts[group1] / word_counts['all'],\n var2 = word_counts[group2] / word_counts['all'])\n .sort_values(by='all')\n [[var1 , var2]]\n .tail(n)\n .sort_values(var2)\n .plot.barh(stacked=True))\n\n plt.title(f'Proportion of {group1} vs {group2} for the {n} most common words')","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611964474","text":"from selenium import webdriver\n\nservice_arg = [\n '--proxy=127.0.0.1:9743',\n '--proxy-type=9743'\n]\n\nbrowser = webdriver.PhantomJS(service_args=service_arg)\nbrowser.get('http://httpbin.org/get')\n\nprint(browser.page_source)\n\n","sub_path":"Agent/sz_10.py","file_name":"sz_10.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329416343","text":"import os\nimport sys\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport prime\n\n\ndef check(i):\n for p in primes:\n if p > i:\n return False\n poss_square = ((i - p) / 2) ** .5\n if poss_square == int(poss_square):\n return True\n\n\n\nprimes = prime.primes(10000000)\nfor i in range(3, 10000000, 2):\n if not check(i):\n print(i)\n exit()\n","sub_path":"41-50/46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461787392","text":"\"\"\"\r\nLandon Buell\r\nLightning Research\r\nGame Plan Take 11\r\n37 Feb 2019\r\n\"\"\"\r\n \r\n #################\r\n #### IMPORTS ####\r\n\r\nimport lhb_LMA_Base as Base\r\nimport numpy as np\r\nimport os\r\n\r\n ###################################\r\n #### COMPOSITE LEVEL FUNCTIONS ####\r\n\r\ndef Initialize ():\r\n \"\"\"\r\n Initializes full script\r\n --------------------------------\r\n Return dictionary of directories\r\n \"\"\"\r\n int_dir = os.getcwd() # establish initial directory\r\n \r\n #### Enter & Test Reading Directory Path ####\r\n while True: # Setup to establish a reading directory\r\n #read = Base.Input_Directory('READ FROM') # accept user input\r\n read = 'C:/Users/Landon/Documents/Lightning Research/LMA_raw'\r\n path = Base.Change_Directory(read) # attempt to change path \r\n if path == True: # if successful,\r\n break # break the loop\r\n \r\n #### Enter & Test Writing Directory Path ####\r\n while True: # Setup to establish a reading directory\r\n #write = Base.Input_Directory('WRITE TO') # accept user input\r\n write = 'C:/Users/Landon/Documents/Lightning Research/LMA_processed'\r\n path = Base.Change_Directory(write) # attempt to change path\r\n if path == True: # if successful,\r\n break # break the loop\r\n else: # if failure (path DNE)\r\n path = Base.Input_Create_Directory() # prompt user to create path\r\n if path == True: # if yes,\r\n os.mkdir(write) # make the directory\r\n break\r\n \r\n paths = {'intdir':int_dir,'readdir':read}\r\n \r\n\r\n","sub_path":"lhb_LMA_Comp.py","file_name":"lhb_LMA_Comp.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"162559173","text":"#- Python 3 source code\n\n#- bar-nodes-by-day.py ~~\n#\n# This program creates a bar plot that shows the average use of nodes by\n# CSC108 backfill jobs by day of the week, in the OLCF timezone.\n#\n# As always, remember to use the following on OLCF machines:\n#\n# $ module load python_anaconda2\n#\n# ~~ (c) SRW, 12 Jul 2018\n# ~~ last updated 04 Dec 2018\n\nfrom datetime import datetime\n\nimport matplotlib.pyplot as pyplot\nimport os\nimport sqlite3\n\n###\n\ndef analyze(connection):\n\n cursor = connection.cursor()\n\n query = \"\"\"\n SELECT strftime(\"%w\", SampleTime, \"unixepoch\", \"localtime\") AS day,\n avg(ReqProcs / 16) AS nodes\n FROM active\n WHERE\n Account = \"CSC108\"\n AND\n User = \"doleynik\"\n GROUP BY day\n ;\n \"\"\"\n\n days_of_week = []\n nodes = []\n for row in cursor.execute(query):\n days_of_week.append(int(row[\"day\"]))\n nodes.append(row[\"nodes\"])\n\n fig = pyplot.figure()\n ax = fig.add_subplot(111)\n\n pyplot.bar(days_of_week, nodes, align = \"center\")\n pyplot.title(\"Node Usage by Day of Week for CSC108 Backfill\")\n\n pyplot.xticks(range(7), (\"S\", \"M\", \"T\", \"W\", \"T\", \"F\", \"S\"))\n\n pyplot.ylabel(\"Average Nodes\")\n\n ax.xaxis.grid(True)\n pyplot.grid()\n\n current_script = os.path.basename(__file__)\n fig.savefig(os.path.splitext(current_script)[0] + \".png\", dpi = 300)\n\n###\n\ndef main():\n\n # Store current working directory.\n\n cwd = os.getcwd()\n\n # Find the data directory, where this script is running remotely at OLCF and\n # locally on a personal laptop, for example.\n\n if os.path.isdir(\"/lustre/atlas/proj-shared/csc108/data/moab/\"):\n data_dir = \"/lustre/atlas/proj-shared/csc108/data/moab/\"\n elif os.path.isdir(os.path.join(cwd, \"moab\")):\n data_dir = os.path.join(cwd, \"moab\")\n else:\n raise Exception(\"Data directory not found.\")\n\n # Create string to represent path to database file.\n\n dbfilename = os.path.join(data_dir, \"moab-data.sqlite\")\n\n # Open connection to the database (file).\n\n connection = sqlite3.connect(dbfilename)\n\n # Enable users to access columns by name instead of by index.\n\n connection.row_factory = sqlite3.Row\n\n # Ensure read-only access to the database\n\n connection.execute(\"PRAGMA query_only = true;\")\n\n # Run custom analyis code.\n\n analyze(connection)\n\n # Commit any changes and close the connection to the database.\n\n connection.commit()\n connection.close()\n\n###\n\nif __name__ == \"__main__\":\n main()\n\n#- vim:set syntax=python:\n","sub_path":"analysis/bar-nodes-by-day.py","file_name":"bar-nodes-by-day.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"196890576","text":"from trade_client import ClientInterface\nimport pprint\nimport datetime as dt\nimport pickle\nimport cx_Oracle\nimport random\nimport json\nimport redis\nimport pandas as pd\nimport os\nimport time\nimport sys\n\nclass OracleSql(object):\n '''\n Query data from database\n '''\n\n\n\n def __init__(self, pt=False):\n '''\n Initialize database\n '''\n self.host, self.oracle_port = '18.210.64.72', '1521'\n self.db, self.current_schema = 'tdb', 'wind'\n self.user, self.pwd = 'reader', 'reader'\n self.pt = pt\n\n def __enter__(self):\n '''\n Connect to database\n :return: self\n '''\n self.conn = self.__connect_to_oracle()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n\n def __connect_to_oracle(self):\n '''\n Connect to database\n :return: connection\n '''\n dsn = self.host + ':' + self.oracle_port + '/' + self.db\n try:\n connection = cx_Oracle.connect(self.user, self.pwd, dsn, encoding=\"UTF-8\", nencoding=\"UTF-8\")\n connection.current_schema = self.current_schema\n if self.pt is True:\n print('Connected to Oracle database successful!')\n except Exception:\n print('Failed on connecting to Oracle database!')\n connection = None\n return connection\n\n def query(self, sql: str):\n '''\n Query data\n '''\n import pandas as pd\n return pd.read_sql(sql, self.conn)\n\n def execute(self, sql: str):\n '''\n Execute SQL scripts, including inserting and updating\n\n '''\n self.conn.cursor().execute(sql)\n self.conn.commit()\n\n\ndef getTradingDays(startDate: str, endDate: str) -> list:\n sql = \\\n '''\n SELECT\n ''' + '''\n\tTRADE_DAYS \n FROM\n asharecalendar \n WHERE\n S_INFO_EXCHMARKET = 'SSE' \n AND trade_days BETWEEN {} AND {}\n '''.format(startDate, endDate)\n with OracleSql() as oracle:\n tradingDays = oracle.query(sql)\n return list(tradingDays.TRADE_DAYS)\n\n\nclass SubInterface(ClientInterface):\n def __init__(self, name):\n # self.positions = query_postion()\n # self.trade_pnl(1)\n super(SubInterface, self).__init__(name)\n self.position = None\n # self.check_balance()\n\n\n def init_pnl(self, date):\n #Get last close price using Oracle\n self.ticker_list = [key for key, value in self.position.items()]\n print(\"初始持仓:\", self.ticker_list)\n for contract, content in self.position.items():\n S_INFO_WINDCODE = contract[:6] + \".CFE\"\n sql = \\\n '''\n SELECT\n S_DQ_CLOSE \n FROM\n \"CINDEXFUTURESEODPRICES\" \n WHERE\n TRADE_DT = {}\n AND S_INFO_WINDCODE = '{}'\n '''.format(date, S_INFO_WINDCODE)\n with OracleSql() as oracle:\n content[\"last_close_price\"] = float(oracle.query(sql).squeeze())\n self.update_price()\n\n\n def update_price(self):\n # Get current price using Redis\n config_file_path = 'config.json'\n with open(config_file_path) as f:\n config_data = json.load(f)\n redis_ip = config_data['redis_ip']\n local_redis = redis.Redis(host=redis_ip, port=6379, db=3,\n decode_responses=True)\n stmt = 'SELECT s_info_windcode, fs_mapping_windcode FROM CfuturesContractMapping ' \\\n ' WHERE S_INFO_WINDCODE like \\'IC0%.CFE\\' AND STARTDATE <= {0} AND ENDDATE >= {0}'.format(\n dt.datetime.now().strftime('%Y%m%d'))\n wind_conn = cx_Oracle.connect('reader/reader@18.210.64.72:1521/tdb')\n wind_conn.current_schema = 'WIND'\n df = pd.read_sql(stmt, wind_conn).set_index('S_INFO_WINDCODE')\n fut_map_dict = df['FS_MAPPING_WINDCODE'].to_dict()\n ic00 = float(local_redis.get(fut_map_dict['IC00.CFE']))\n ic01 = float(local_redis.get(fut_map_dict['IC01.CFE']))\n ic02 = float(local_redis.get(fut_map_dict['IC02.CFE']))\n self.position[\"IC1908_1\"][\"price\"] = ic00\n self.position[\"IC1908_2\"][\"price\"] = ic00\n self.position[\"IC1909_1\"][\"price\"] = ic01\n self.position[\"IC1909_2\"][\"price\"] = ic01\n self.position[\"IC1912_1\"][\"price\"] = ic02\n self.position[\"IC1912_2\"][\"price\"] = ic02\n # print(pp.pprint(self.position))\n self.pos_pnl = 0\n for contract, content in self.position.items():\n if contract[-1] == '1':\n self.pos_pnl += (content[\"price\"] - content[\"last_close_price\"]) * float(content[\"current_vol\"]) * 200\n elif contract[-1] == '2':\n self.pos_pnl -= (content[\"price\"] - content[\"last_close_price\"]) * float(content[\"current_vol\"]) * 200\n print(\"持仓盈亏:\", self.pos_pnl)\n\n\n def trade_pnl(self, record):\n if record[\"futures_direction\"] == 1: # Open new position\n key = record[\"stock_code\"] + '_' + str(record[\"entrust_direction\"])\n self.position[key][\"current_vol\"] += record[\"entrust_quantity\"]\n elif record[\"futures_direction\"] == 2: # Close new position\n if record[\"entrust_direction\"] == 1:\n key = record[\"stock_code\"] + \"_2\"\n elif record[\"entrust_direction\"] == 2:\n key = record[\"stock_code\"] + \"_1\"\n self.position[key][\"current_vol\"] -= record[\"entrust_quantity\"]\n else:\n print(\"-----------------------\",record[\"futures_direction\"])\n\n if not os.path.exists(\"pnl_adjusted.pkl\"):\n with open(\"pnl_adjusted.pkl\", 'wb') as f:\n pickle.dump(0, f)\n with open(\"pnl_adjusted.pkl\", \"rb\") as f:\n pnl_adjusted = pickle.load(f)\n # Think it in a straight and simple way: when the price rises, we lose money if we long and we earn money if we short.\n if record[\"entrust_direction\"] == 1:\n pnl_adjusted -= record[\"total_deal_amount\"] - self.position[key][\"last_close_price\"] * 200\n elif record[\"entrust_direction\"] == 2:\n pnl_adjusted += record[\"total_deal_amount\"] - self.position[key][\"last_close_price\"] * 200\n print(\"交易盈亏调整:\", pnl_adjusted)\n with open(\"pnl_adjusted.pkl\", \"wb\") as f:\n pickle.dump(pnl_adjusted, f)\n self.pnl_adjusted = pnl_adjusted\n\n\n def onOnlySubscribeKnock(self, info):\n # print('on only subscribe knock', info)\n print(pp.pprint(info))\n self.trade_pnl(info)\n print(\"实时盈亏:\", self.pos_pnl + self.pnl_adjusted) # Actually, we should update self.pos_pnl here!!!\n\n # with open(str(random.randint(0, 100000)) + \".pkl\", 'wb') as f:\n # pickle.dump(info, f)\n\n\n def onQueryPosition(self, info):\n # print('query position: ', info)\n self.position = info[\"Position\"]\n self.preprocess_contract()\n\n\n date = dt.datetime.strftime(dt.datetime.now(), \"%Y%m%d\")\n tradingDay_list = getTradingDays(\"20120101\", \"20191231\")\n date_lag1 = tradingDay_list[tradingDay_list.index(date) - 1]\n\n self.init_pnl(date_lag1)\n\n\n def preprocess_contract(self):\n for contract, _ in self.position.items():\n for item, _ in self.position[contract].items():\n if item != \"combi_no\":\n self.position[contract][item] = float(self.position[contract][item])\n new_dict = dict()\n for contract, content in self.position.items():\n if contract.startswith(\"IC19\"):\n contract_name = contract[0 : 8]\n if contract_name not in new_dict:\n new_dict[contract_name] = content\n else:\n temp_dict = dict()\n for key, value in content.items():\n temp_dict[key] = new_dict[contract_name][key] + content[key]\n new_dict[contract_name] = temp_dict\n new_dict.pop(\"IC1907_2\")\n self.position = new_dict\n\n\n\nclass Position(object):\n def __init__(self):\n self.interface = SubInterface()\n self.position = self.interface.query_position()\n\n def update_xx_info(self, info):\n pass\n\n def update_xx2_info(self, info):\n pass\n # self.output(= None\n\n def output(self):\n print('Helo')\n\n\nif __name__ == '__main__':\n pp = pprint.PrettyPrinter(indent=4)\n # 10034的O32账号无权限查询8301账户,因此用7043账号查询8301账户\n # account_no = '8302'\n # combi_no = '83023005'\n account_no = '8301'\n combi_no = '8301361'\n\n reset = input(\"是否重置浮动盈亏? 按任意键选择“否”, 输入yes选择“是”\\n>>>\")\n if reset == \"yes\":\n os.remove(\"pnl_adjusted.pkl\")\n\n interface = SubInterface('ufx_trading_hhk')\n\n interface.init()\n # 查询持仓\n positions = interface.query_position(account_no, combi_no)\n interface.subscribe_knock(combi_no)\n\n ### Mock trading ###\n with open(\"Q.pkl\", 'rb') as f:\n Q = pickle.load(f)\n for label in \"abccdabccdaaaddccbb\":\n time.sleep(5)\n print('\\n\\n' + \"~\" * 80)\n # print(pp.pprint(Q[label]))\n interface.trade_pnl(Q[label])\n interface.update_price()\n print(\"实时盈亏:\", interface.pos_pnl + interface.pnl_adjusted)\n print(pp.pprint(interface.position))\n\n ####################=\n\n\n\n\n","sub_path":"main - backup-2019-08-07-2.py","file_name":"main - backup-2019-08-07-2.py","file_ext":"py","file_size_in_byte":9522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"49843571","text":"from tiny_string_parser.base_writers import BaseHeaderFileWriter, BaseCodeFileWriter\nfrom tiny_string_parser.compiler import CONDITION_ANY_LETTER, CONDITION_HEXADECIMAL_NUMBER, \\\n CONDITION_DECIMAL_NUMBER, CONDITION_INTEGER_NUMBER\nfrom functools import reduce\n\n# binary encoding\n#\n# State:\n#\n# byte 0: shortcut only-transition state : match 7bit character and advance one state up if successful\n#\n# 0CCCCCCC 1 character\n#\n# byte 0: state header\n#\n# 1LLLLLLL 0 length of state record\n#\n# Transition:\n#\n# byte 0: header\n#\n# + bit 7 flags present\n# |+--- bit 6 keep character\n# ||+--- bit 5 variable operation present\n# |||+--- bit 4 --+ target state reference size: 0 = stay on place 2 = uint_8 relative\n# ||||+--- bit 3 --/ 1 = advance one step 3 = uint16_t absolute\n# |||||+--- bit 2 --+ 0 = character follows\n# ||||||+--- bit 1 | 1 = match integer 3 = match hex 5-7 = reserved\n# |||||||+--- bit 0 --+ 2 = match decimal 4 = match any\n# 00000000\n#\n# if flags present: following byte is a bitmask to flags\n#\n# if character follows: following byte is a character to match\n#\n# if variable operation present: following byte is:\n#\n# + bit 7 --+ operation: 0 = reset variable 2 = set constant\n# |+--- bit 6 --+ 1 = append to variable\n# ||+--- bit 5 --+\n# |||+--- bit 4 |\n# ||||+--- bit 3 | variable id\n# |||||+--- bit 2 |\n# ||||||+--- bit 1 |\n# |||||||+--- bit 0 --+\n# 00000000\n#\n# if variable operation present with set constant operation, then byte constant follows\n#\n# target state reference follows. either uint16_t or uint8_t\n#\n\nclass EncodedStateMachineBuilder:\n def __init__(self):\n self.offsets = dict()\n self.compiler = None\n\n def set_offset(self, state, offset):\n self.offsets[state.name] = offset\n\n def get_offset(self, state):\n return self.offsets.get(state.name)\n\n def build(self, compiler):\n self.compiler = compiler\n\n def recompute_offsets():\n offset = 0\n for state in compiler.states:\n for transition in state.outgoing_transitions:\n if transition.target_state.order < transition.source_state.order:\n transition.target_state_reference_is_long = True\n self.set_offset(state, offset)\n offset = offset + self.encoded_state_size(state)\n\n def find_too_long_jump():\n for state in compiler.states:\n for transition in state.outgoing_transitions:\n if not transition.target_state_reference_is_long \\\n and self.get_offset(transition.target_state) > self.get_offset(transition.source_state) + 256:\n transition.target_state_reference_is_long = True\n return True\n return False\n\n while True:\n recompute_offsets()\n if not find_too_long_jump():\n break\n\n def encode(self):\n b = b\"\"\n for state in filter(lambda x: not x.is_final, self.compiler.states):\n b = b + self.encode_state(state)\n return b\n\n def encoded_transition_size(self, transition):\n size = 1\n if len(transition.flags) > 0:\n size = size + 1\n if len(transition.condition) == 1:\n size = size + 1\n if transition.store_variable is not None or transition.reset_variable is not None:\n size = size + 1\n if transition.store_constant is not None:\n size = size + 1\n if transition.target_state_is_immediatelly_following() or transition.target_state_is_cycle():\n size = size + 0\n elif transition.target_state_reference_is_long:\n size = size + 2\n else:\n size = size + 1\n return size\n\n def encoded_state_size(self, state):\n if state.is_final or state.is_trivial_state():\n return 1\n\n size = 1\n for transition in state.outgoing_transitions:\n size = size + self.encoded_transition_size(transition)\n return size\n\n def encode_state(self, state):\n if state.is_final:\n raise Exception(\"Final states are not encodeable\")\n if state.is_trivial_state():\n return state.outgoing_transitions[0].condition.encode('ascii')\n b = bytes([0b10000000 | (self.encoded_state_size(state) - 1)])\n for transition in state.outgoing_transitions:\n b = b + self.encode_transition(transition)\n assert len(b) == self.encoded_state_size(state)\n return b\n\n def encode_transition(self, transition):\n b = b\"\"\n header = 0\n if len(transition.flags) > 0:\n bit_mask = reduce(lambda x, y: x | y, map(lambda x: 1 << self.compiler.all_flags[x].index, transition.flags))\n b = b + bytes([bit_mask])\n header = header | 0b10000000\n if transition.keep_character:\n header = header | 0b01000000\n if len(transition.condition) == 1:\n b = b + transition.condition.encode('ascii')\n elif transition.condition == CONDITION_INTEGER_NUMBER:\n header = header | 1\n elif transition.condition == CONDITION_DECIMAL_NUMBER:\n header = header | 2\n elif transition.condition == CONDITION_HEXADECIMAL_NUMBER:\n header = header | 3\n elif transition.condition == CONDITION_ANY_LETTER:\n header = header | 4\n else:\n raise Exception(\"Do not know how to serialize condition %s\" % transition.condition)\n if transition.reset_variable is not None:\n b = b + bytes([self.compiler.variables[transition.reset_variable].index])\n header = header | 0b00100000\n elif transition.store_constant is not None:\n b = b + bytes([0b10000000 + self.compiler.variables[transition.store_variable].index])\n b = b + bytes([0]) # todo constant value\n header = header | 0b00100000\n elif transition.store_variable is not None:\n b = b + bytes([0b01000000 + self.compiler.variables[transition.store_variable].index])\n header = header | 0b00100000\n if transition.target_state_is_immediatelly_following():\n header = header | 0b00001000\n elif transition.target_state_is_cycle():\n header = header | 0b00000000\n elif transition.target_state_reference_is_long:\n lo_byte = self.get_offset(transition.target_state) & 0xff\n hi_byte = self.get_offset(transition.target_state) >> 8\n b = b + bytes([lo_byte, hi_byte])\n header = header | 0b00011000\n else:\n diff = self.get_offset(transition.target_state) - self.get_offset(transition.source_state)\n b = b + bytes([diff])\n header = header | 0b00010000\n b = bytes([header]) + b\n assert len(b) == self.encoded_transition_size(transition)\n return b\n\n def enhance_state_label(self, state):\n if self.get_offset(state) is not None:\n return \"\\noffset %d\" % self.get_offset(state)\n\n\nclass EncodedStateMachineCodeFileWriter(BaseCodeFileWriter):\n def __init__(self, compiler, builder):\n super().__init__(compiler)\n self.builder = builder\n\n def _write_implementation(self, fh):\n variable_selectors = \"\"\n for name, variable in self.compiler.variables.items():\n if variable.len is not None:\n selector = \"parserState->%s\" % variable.name;\n else:\n selector = \"&(parserState->%s)\" % variable.name;\n variable_selectors = variable_selectors + \" case %d: variable = %s; goto %s_ops;\\n\" % (\n variable.index,\n selector,\n variable.type.replace(\" \", \"_\"))\n\n state_machine = \"\"\n i = 0\n until_next_state = 0\n is_first_in_state = True\n for c in self.builder.encode():\n if i > 0:\n state_machine = state_machine + \", \"\n if until_next_state == 0:\n state_machine = state_machine + \"\\n /* %4d */ \" % i\n if c & 0x80 != 0:\n until_next_state = c & 0x7f\n else:\n until_next_state = 0\n is_first_in_state = True\n else:\n until_next_state = until_next_state - 1\n is_first_in_state = False\n i = i + 1\n state_machine = state_machine + (\n \"'%s'\" % bytes([c]).decode('ascii') if is_first_in_state and c >= 32 and c <= 127 else \"0x%02x\" % c)\n\n fh.write(\"\"\"\n#include \"%s.h\"\n#include <ctype.h>\n\nstatic const uint8_t STATE__MACHINE[] = { %s };\n\n#define STATE_LEN_IS_TRIVIAL(x) !((x)&0x80)\n#define STATE_LEN(x) ((x)&0x7f)\n\n#define TRANSITION_HEADER_HAS_FLAGS(x) ((x)&0x80)\n#define TRANSITION_HEADER_KEEP(x) ((x)&0x40)\n#define TRANSITION_HEADER_HAS_VAR(x) ((x)&0x20)\n#define TRANSITION_HEADER_TARGET_ARG(x) ((x)&0x10)\n#define TRANSITION_HEADER_TARGET(x) (((x)>>3)&0x03)\n#define TRANSITION_HEADER_MATCH_OP(x) ((x)&0x07)\n\n#define TRANSITION_MATCH_SPECIFIC_CHAR 0\n#define TRANSITION_MATCH_INTEGER 1\n#define TRANSITION_MATCH_DECIMAL 2\n#define TRANSITION_MATCH_HEX 3\n#define TRANSITION_MATCH_ANY 4\n\n#define TRANSITION_TARGET_STAY 0\n#define TRANSITION_TARGET_STEP_ONE 1\n#define TRANSITION_TARGET_REL8 2\n#define TRANSITION_TARGET_ABS16 3\n\n#define OPERATION_HAS_ARGUMENT(x) ((x)&0x80)\n#define OPERATION_VARIABLE(x) ((x)&0x3f)\n#define OPERATION(x) ((x)>>6)\n\n#define OPERATION_RESET 0\n#define OPERATION_STORE 1\n#define OPERATION_SET 2\n\n#define FLAG_NEGATIVE 1\n\nbool %sprocess_character(%s *parserState, unsigned char c) {\n const unsigned char *ip = STATE__MACHINE + parserState->state;\n uint8_t parser_flags = parserState->flags;\n uint8_t state_len;\n \n for(;;) {\ngo_again:\n state_len = *(ip++);\n\n if (STATE_LEN_IS_TRIVIAL(state_len)) {\n // immediate match and advance\n if (c == state_len) {\n goto character_finished;\n } else {\n goto unmatched_character;\n }\n }\n\n state_len = STATE_LEN(state_len);\n uint8_t original_state_len = state_len;\n while (state_len > 0) {\n uint8_t transition_header;\n uint8_t flags;\n uint8_t variable_operation;\n uint8_t variable_constant;\n uint8_t character;\n\n transition_header = *(ip++);\n state_len --;\n\n if (TRANSITION_HEADER_HAS_FLAGS(transition_header)) {\n flags = *(ip++);\n state_len --;\n }\n\n if (TRANSITION_HEADER_MATCH_OP(transition_header) == TRANSITION_MATCH_SPECIFIC_CHAR) {\n character = *(ip++);\n state_len --;\n }\n\n if (TRANSITION_HEADER_HAS_VAR(transition_header)) {\n variable_operation = *(ip++);\n state_len --;\n\n if (OPERATION_HAS_ARGUMENT(variable_operation)) {\n variable_constant = *(ip++);\n state_len --;\n }\n }\n\n if (TRANSITION_HEADER_HAS_FLAGS(transition_header)\n && (!(parser_flags & flags))) {\n goto try_next_transition;\n }\n\n switch (TRANSITION_HEADER_MATCH_OP(transition_header)) {\n case TRANSITION_MATCH_SPECIFIC_CHAR:\n if (c != character) {\n goto try_next_transition;\n }\n break;\n case TRANSITION_MATCH_INTEGER:\n if (c != '+' && c != '-' && !(c >= '0' && c <= '9')) {\n goto try_next_transition;\n }\n break;\n case TRANSITION_MATCH_DECIMAL:\n if (!(c >= '0' && c <= '9')) {\n goto try_next_transition;\n }\n break;\n case TRANSITION_MATCH_HEX:\n if (!(c >= '0' && c <= '9') && !(c >= 'a' && c <= 'f') && !(c >= 'A' && c <= 'F')) {\n goto try_next_transition;\n }\n break;\n }\n\n if (TRANSITION_HEADER_HAS_VAR(transition_header)) {\n void *variable;\n switch (OPERATION_VARIABLE(variable_operation)) {\n%s }\n goto end_of_operation;\n\nuint8_t_ops:\nint8_t_ops:\n switch (OPERATION(variable_operation)) {\n case OPERATION_RESET:\n parser_flags &= ~FLAG_NEGATIVE;\n *((uint8_t *)variable) = 0;\n goto reset_common;\n case OPERATION_STORE:\n switch (TRANSITION_HEADER_MATCH_OP(transition_header)) {\n case TRANSITION_MATCH_INTEGER:\n if (parserState->len == 0) {\n if (c == '-') {\n parser_flags |= FLAG_NEGATIVE;\n goto end_of_operation;\n }\n if (c == '+') {\n goto end_of_operation;\n }\n }\n // fall thru\n case TRANSITION_MATCH_DECIMAL:\n *((uint8_t *)variable) = *((uint8_t *)variable) * 10 + (c - '0');\n if (c != '0' && parser_flags & FLAG_NEGATIVE) {\n parser_flags &= ~FLAG_NEGATIVE;\n *((int8_t *)variable) = -*((int8_t *)variable);\n }\n goto store_common;\n case TRANSITION_MATCH_HEX:\n *((uint8_t *)variable) = *((uint8_t *)variable) << 4 + (c < 'A' ? c - '0' : 10 + c < 'a' ? c - 'A': c - 'a');\n goto store_common;\n }\n case OPERATION_SET:\n *((uint8_t *)variable) = variable_constant;\n goto end_of_operation;\n }\n goto end_of_operation;\n\nuint16_t_ops:\nint16_t_ops:\n switch (OPERATION(variable_operation)) {\n case OPERATION_RESET:\n parser_flags &= ~FLAG_NEGATIVE;\n *((uint16_t *)variable) = 0;\n goto reset_common;\n case OPERATION_STORE:\n switch (TRANSITION_HEADER_MATCH_OP(transition_header)) {\n case TRANSITION_MATCH_INTEGER:\n if (c == '-') {\n parser_flags |= FLAG_NEGATIVE;\n goto end_of_operation;\n }\n if (c == '+') {\n goto end_of_operation;\n }\n // fall thru\n case TRANSITION_MATCH_DECIMAL:\n *((uint16_t *)variable) = *((uint8_t *)variable) * 10 + (c - '0');\n if (c != '0' && parser_flags & FLAG_NEGATIVE) {\n parser_flags &= ~FLAG_NEGATIVE;\n *((int16_t *)variable) = -*((int16_t *)variable);\n }\n goto store_common;\n case TRANSITION_MATCH_HEX:\n *((uint16_t *)variable) = *((uint8_t *)variable) << 4 + (c < 'A' ? c - '0' : 10 + c < 'a' ? c - 'A': c - 'a');\n goto store_common;\n }\n }\n goto end_of_operation;\n\nchar_ops:\nunsigned_char_ops:\n switch (OPERATION(variable_operation)) {\n case OPERATION_RESET:\n *((uint8_t *)variable) = '\\\\0';\n goto reset_common;\n case OPERATION_STORE:\n *((uint8_t *)variable + parserState->len++) = c;\n *((uint8_t *)variable + parserState->len) = '\\\\0';\n goto end_of_operation;\n }\n goto end_of_operation;\n\nstore_common:\n parserState->len ++;\n goto end_of_operation;\n\nreset_common:\n parserState->len = 0;\n\n }\n\nend_of_operation:\n switch (TRANSITION_HEADER_TARGET(transition_header)) {\n case TRANSITION_TARGET_STAY:\n ip -= original_state_len - state_len + 1;\n break;\n case TRANSITION_TARGET_STEP_ONE:\n ip += state_len;\n break;\n case TRANSITION_TARGET_REL8: // advance by uint8_t offset\n ip = ip - ((original_state_len - state_len + 1) - *ip);\n state_len --;\n break;\n case TRANSITION_TARGET_ABS16:\n ip = STATE__MACHINE + *(uint16_t *)ip;\n state_len -= 2;\n break;\n }\n\n if (TRANSITION_HEADER_KEEP(transition_header)) {\n // try again for the same character\n goto go_again;\n }\n\n goto character_finished;\n\ntry_next_transition:\n if (TRANSITION_HEADER_TARGET_ARG(transition_header)) {\n ip ++;\n state_len --;\n if (TRANSITION_HEADER_TARGET(transition_header) == TRANSITION_TARGET_ABS16) {\n ip ++;\n state_len --;\n }\n }\n }\n\n bool felt_out_of_start_state;\nunmatched_character:\n felt_out_of_start_state = (ip == STATE__MACHINE + %s%s); \n ip = STATE__MACHINE;\n\n if (felt_out_of_start_state) {\n goto character_finished;\n }\n }\n\ncharacter_finished:\n parserState->state = ip - STATE__MACHINE;\n parserState->flags = parser_flags;\n return true;\n}\n \"\"\" % (self.compiler.parser_name,\n state_machine,\n self.compiler.interface_prefix,\n self.compiler.state_variable_type,\n variable_selectors,\n self.compiler.const_prefix,\n self.compiler.states[1].name))\n\n\nclass EncodedStateMachineHeaderFileWriter(BaseHeaderFileWriter):\n def __init__(self, compiler, builder):\n super().__init__(lambda x: builder.get_offset(x), compiler)\n self.builder = builder\n\n\n","sub_path":"tiny_string_parser/encoded_state_machine_v1.py","file_name":"encoded_state_machine_v1.py","file_ext":"py","file_size_in_byte":19442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"557730317","text":"class Edge():\n def __init__(self,colour = 'u'):\n self.colour = colour\n self.not_colour = []\n def addNotColour(self,colour):\n for i in colour:\n if colour not in self.not_colour:\n self.not_colour.append(i)\n # def changeColour(self,index):\n # self.colour = index\n def __repr__(self):\n return self.colour\ndef checkFailedCombination(rowx,rowy,combination):\n failedCombination = combination.copy()\n for i in range(num):\n if rowx[i] == 0 or rowy[i]==0:\n continue\n elif [rowx[i].colour,rowy[i].colour] in failedCombination:\n failedCombination.remove([rowx[i].colour,rowy[i].colour])\n return failedCombination\n\ndef searchForbidden(indexOfRow,indexOfColumn):\n edge = graph[indexOfRow][indexOfColumn]\n row = graph[indexOfRow]\n for col in range(num):\n if row[col] == 0 or row[col].colour =='n':\n continue\n elif row[col].colour != edge.colour and row[col].colour != 'u':\n graph[indexOfColumn][col].addNotColour(findTheRemainingOne(row[col],edge))\n graph[col][indexOfColumn].addNotColour(findTheRemainingOne(row[col],edge))\ndef findTheRemainingOne(edgex,edgey):\n result = ['r','g','b']\n result.remove(edgex.colour)\n result.remove(edgey.colour)\n return result\n\n\ndef checkValidity(indexX,indexY):\n global graph,num\n rowx = graph[indexX]\n rowy = graph[indexY]\n fail = []\n combinations = []\n if rowx[indexY].colour == 'n':\n combinations = notconnected\n # combinations = (notconnected if (rowx[indexY].colour == 'u' and rowy[indexX].colour=='u') else connected)\n # fail = checkFailedCombination(rowx,rowy,combinations)\n elif rowx[indexY].colour == 'u':\n if len(rowx[indexY].not_colour) ==3:\n rowx[indexY].colour = 'n'\n rowy[indexX].colour = 'n'\n combinations = notconnected\n else:\n for i in ['r', 'g', 'b']:\n if i not in rowx[indexY].not_colour:\n rowx[indexY].colour = i\n rowy[indexX].colour = i\n combinations = connected[i]\n searchForbidden(indexX,indexY)\n searchForbidden(indexY,indexX)\n else:\n combinations = connected[rowx[indexY].colour]\n fail = checkFailedCombination(rowx, rowy, combinations)\n if combinations == []:\n import sys\n sys.exit()\n for combi in fail:\n for col in range(num):\n # if rowx[col].colour == combi[0] and rowy[col].colour == 'u' and rowy[col].not_colour != combi[1]:\n if rowx[col] == 0 or rowx[col].colour == 'n' or rowy[col] ==0 or rowy[col] =='n':\n continue\n if rowx[col].colour == combi[0] and rowy[col].colour == 'u' and combi[1] not in rowy[col].not_colour:\n rowy[col].colour = combi[1]\n graph[col][indexY].colour = combi[1]\n searchForbidden(indexY,col)\n searchForbidden(col,indexY)\n fail.remove(combi)\n break\n elif rowy[col].colour == combi[1] and rowx[col].colour == 'u' and combi[0] not in rowx[col].not_colour:\n rowx[col].colour = combi[0]\n graph[col][indexX].colour = combi[0]\n searchForbidden(indexX,col)\n searchForbidden(col,indexX)\n fail.remove(combi)\n break\n elif rowx[col].colour == 'u' and rowy[col].colour == 'u' and (combi[0] not in rowx[col].not_colour) and (combi[1] not in rowy[col].not_colour):\n rowx[col].colour = combi[0]\n rowy[col].colour = combi[1]\n graph[col][indexY].colour = combi[1]\n graph[col][indexX].colour = combi[0]\n searchForbidden(indexX,col)\n searchForbidden(indexY,col)\n searchForbidden(col,indexX)\n searchForbidden(col,indexY)\n break\n\n if fail == []:\n return\n else:\n num = num + 1\n new_row = []\n for i in graph:\n # print(i)\n # print(num)\n i.append(Edge())\n new_row.append(Edge())\n new_row.append(0)\n graph.append(new_row)\n checkValidity(indexX,indexY)\n # print(num)\n\n\nfirstLine = [Edge(),Edge('n'),Edge('r'),Edge(),Edge()]\ngraph =[firstLine]+[[Edge('n'),Edge(),Edge(),Edge(),Edge()]]+\\\n [[Edge(),Edge(),Edge(),Edge(),Edge()]]+\\\n [[Edge(),Edge(),Edge(),Edge(),Edge()]]+\\\n [[Edge(),Edge(),Edge(),Edge(),Edge()]]\n\nnum = 5\nfor i in range(len(graph)):\n graph[i][i] = 0\n\nconnected = {'r':[['r','r'],['r','g'],['r','b'],['b','r'],['b','b'],['g','r'],['g','g']],\n 'b':[['b','b'],['b','g'],['b','r'],['r','r'],['r','b'],['g','b'],['g','g']],\n 'g':[['g','g'],['g','b'],['g','r'],['r','r'],['r','g'],['b','g'],['b','b']]\n }\nnotconnected = [['r','r'],['r','g'],['r','b'],\n ['g','r'],['g','g'],['g','b'],\n ['b','r'],['b','g'],['b','b']]\n# print(graph)\ni = 0\n\nwhile True:\n while i < num:\n j = i+1\n while j<num:\n checkValidity(i,j)\n j = j+1\n\n print(\"j = \"+str(j))\n i = i+1\n print('i = '+str(i))\n print(graph)\n break\n","sub_path":"colourCombination.py","file_name":"colourCombination.py","file_ext":"py","file_size_in_byte":5324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"149617473","text":"import requests\nimport xml.etree.ElementTree as ET\nfrom collections import defaultdict\nimport json\nimport os\nfrom zipfile import ZipFile\nfrom io import BytesIO\n\nfrom common.setting import OUTPUT_DIR,CONFIG_PATH\nfrom util.lib import Logger\nfrom util.lib import ConfigLoader\nfrom util.lib import FileLoader\n\nBASE_URL = 'http://resource.ufocatch.com/atom/edinetx/query/'\nNAMESPACE = '{http://www.w3.org/2005/Atom}'\n\nclass GetEDINET():\n \n def __init__(self,mode,code_option):\n self.mode = mode\n conf_cls = ConfigLoader.ConfigLoader(mode)\n confs = conf_cls.loader(\"EDINET\")\n if code_option:\n loader_cls = FileLoader.FileLoader([\"s_codes.csv\"],mode)\n loaders = loader_cls.loader()\n targets = loaders[\"s_codes\"]\n targets = targets[targets[\"業種分類\"]==\"サービス業\"][\"銘柄コード\"]\n self.s_codes = list(targets)\n else:\n self.s_codes = confs[\"s_codes\"]\n self.targets = confs[\"targets\"]\n self.start_date = confs[\"start_date\"]\n self.end_date = confs[\"end_date\"]\n \n def _get_response(self,s_code):\n url = \"{}{}\".format(BASE_URL,str(s_code))\n res = requests.get(url)\n return res.text\n \n def _is_target(self,title):\n for target in self.targets:\n if target in title:\n return True\n else:\n pass\n return False\n \n def _is_period(self,updated):\n if updated >= self.start_date and updated <= self.end_date:\n return True\n else:\n return False\n \n def _get_link(self,tree):\n yuhos = defaultdict(dict)\n for el in tree.findall('.//{}entry'.format(NAMESPACE)):\n updated = el.find(NAMESPACE+'updated').text\n if not self._is_period(updated):\n Logger.Logger(\"Out of term.({})\".format(updated),self.mode).logging()\n continue\n title = el.find('{}title'.format(NAMESPACE)).text\n if not self._is_target(title):\n Logger.Logger(\"Out of target.({})\".format(title),self.mode).logging()\n continue\n Logger.Logger('writing:{}.........'.format(title[:30]),self.mode).logging()\n _id = el.find('{}id'.format(NAMESPACE)).text\n link = el.find('./{}link[@type=\"application/zip\"]'.format(NAMESPACE))\n url = link.attrib['href']\n yuhos[_id] = {'id':_id,'title':title,'url':url}\n return yuhos\n \n def _write_download_info(self,infos, json_path):\n with open(json_path,'w') as f:\n json.dump(infos, f, indent=4)\n \n def _download_all_xbrl_files(self,infos,output_dir): \n for s_code, info_dcts in infos.items():\n output_path = os.path.join(output_dir,str(s_code))\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n \n for _id, info_dct in info_dcts.items():\n self._download_xbrl_file(info_dct['url'],_id,output_path)\n \n def _download_xbrl_file(self,url,_id,output_path):\n r = requests.get(url)\n if r.ok:\n path = os.path.join(output_path,_id)\n if not os.path.exists(path):\n os.mkdir(path)\n r = requests.get(url)\n z = ZipFile(BytesIO(r.content))\n z.extractall(path)\n \n def processing(self):\n for s_code in self.s_codes:\n Logger.Logger(\"Start to process to get data about s_code = {}\".format(str(s_code)),self.mode).logging()\n res = self._get_response(str(s_code))\n ET_tree = ET.fromstring(res.encode('utf-8'))\n ET.register_namespace('',NAMESPACE[1:-1])\n\n dat_download = defaultdict(dict)\n # get download file info\n info_dct = self._get_link(ET_tree)\n dat_download[s_code] = info_dct\n \n json_path = os.path.join(OUTPUT_DIR[self.mode],\"json/{}.json\".format(str(s_code)))\n\n self._write_download_info(info_dct,json_path)\n\n xbrl_dir = os.path.join(OUTPUT_DIR[self.mode],\"xbrl\")\n self._download_all_xbrl_files(dat_download,xbrl_dir)\n\n Logger.Logger(\"Finish to process to get data about s_code = {}\".format(str(s_code)),self.mode).logging()","sub_path":"FinancialAnalytics/util/DataCollection/GetEDINET.py","file_name":"GetEDINET.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211885822","text":"# Universidade Federal do Amazonas\n# Joao Pedro Salgado Pio Oliveira\n# Lab. de Codificacao 4, Exer \n# 28/07/2016\n\n# Entradas\nn = int(input(\"Qual o numero:\"))\n\n# codificacao\nwhile n != -1 :\n\tif n % 2 == 0 :\n\t\tprint(\"par\")\n\telse :\n\t\tprint(\"impar\")\n\tn = int(input(\"Qual o proximo numero:\")) \t","sub_path":"exs/1499-1132.py","file_name":"1499-1132.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"558616414","text":"\"\"\"\n给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。\n\n示例 1:\n\n输入: 2\n输出: [0,1,1]\n示例 2:\n\n输入: 5\n输出: [0,1,1,2,1,2]\n进阶:\n\n给出时间复杂度为O(n*sizeof(integer))的解答非常容易。但你可以在线性时间O(n)内用一趟扫描做到吗?\n要求算法的空间复杂度为O(n)。\n你能进一步完善解法吗?要求在C++或任何其他语言中不使用任何内置函数(如 C++ 中的 __builtin_popcount)来执行此操作。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/counting-bits\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import *\n\nclass Solution:\n def countBits(self, num: int) -> List[int]:\n dp = [0 for _ in range(num+1)]\n maxBit = 0\n for i in range(1, num+1):\n if i & (i-1) == 0:\n maxBit = i\n dp[i] = 1\n else:\n dp[i] = dp[i-maxBit] + 1\n return dp\n","sub_path":"338_bitCount.py","file_name":"338_bitCount.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391218786","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport six\nimport shutil\nfrom glob import glob\n\nfrom django.utils.six.moves import input\nfrom django.conf import settings\nfrom django.core.management.base import NoArgsCommand\n\nfrom trojsten.contests.models import Competition, Round, Semester\n\n\nclass Command(NoArgsCommand):\n help = 'Migrates contest directory structure.'\n\n def handle_noargs(self, **options):\n # Compute changes\n move_paths = list()\n rounds = list()\n\n for competition in glob(os.path.join(settings.TASK_STATEMENTS_PATH, '*')):\n for year in glob(os.path.join(competition, '*rocnik')):\n for rnd in glob(os.path.join(year, '*kolo')):\n new_path, round_obj = self.migrate_path(rnd)\n move_paths.append((rnd, new_path))\n if round_obj:\n rounds.append(round_obj)\n\n # List changes\n self.stdout.write('Following paths are going to be moved:')\n self.stdout.write('\\n'.join(('%s -> %s' % move for move in move_paths)))\n self.stdout.write('Following rounds are going to be renumbered:')\n self.stdout.write('\\n'.join((six.text_type(r) for r in rounds)))\n # Ask and apply\n choice = input('Do you wish to proceed? [yN]: ')\n if choice and choice[0].lower() == 'y':\n for src, dst in move_paths:\n shutil.move(src, dst)\n for round in rounds:\n round.save()\n\n def migrate_path(self, old_path):\n prefix, competition, year, round = old_path.rsplit('/', 3)\n year = ''.join(year[:-len('rocnik')])\n round = ''.join(round[:-len('kolo')])\n split = 3 if competition in {'UFO', 'FKS'} else 2\n semester = '1' if int(round) <= split else '2'\n\n round_obj = None\n if semester == '2':\n new_round = str(int(round) - split)\n try:\n competition_obj = Competition.objects.get(name=competition)\n semester_obj = Semester.objects.get(competition=competition_obj, year=year, number=semester)\n round_obj = Round.objects.get(number=round, semester=semester_obj)\n round_obj.number = int(new_round)\n except (Competition.DoesNotExist, Semester.DoesNotExist, Round.DoesNotExist) as e:\n print('%s not in DB: %s' % (old_path, e))\n round = new_round\n\n return os.path.join(prefix, competition, year, semester, round), round_obj\n","sub_path":"trojsten/contests/management/commands/migrate_rounds.py","file_name":"migrate_rounds.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207447921","text":"#!/usr/bin/env python\n\nimport json\nimport asyncio\nfrom urllib.request import urlopen\nimport websockets\n\n\nclass ServerItem:\n __connection = None\n\n def __init__(self, server_name, test_url):\n '''Tworzy nowy element reprezentujacy serwer.\n * test_url - adres URL razem z protokołem i portem spod kßórego\n pobrać dokument w celu sprawdzenia czy serwer działa.\n '''\n self.name = server_name\n self.test_url = test_url\n self.status = 'ok'\n\n def save(self):\n pass\n\n\nclass StatusServer:\n def __init__(self):\n self.__server_list = []\n\n # załaduj dane do pamięci\n with open('./servers.json', 'r') as f:\n data = json.loads(f.read())\n\n for s in data:\n self.__server_list.append(ServerItem(s['name'], s['test_url']))\n\n def run(self):\n self.__event_loop = asyncio.get_event_loop()\n try:\n update_server = websockets.serve(\n self.__updateserve, '0.0.0.0', 3000)\n self.__event_loop.run_until_complete(update_server)\n self.__event_loop.run_forever()\n except KeyboardInterrupt:\n print('\\nGoodbye\\n')\n finally:\n self.__event_loop.close()\n\n async def check_server(self, server):\n '''Sprawdź czy serwer odpwoie na żądanie GET'''\n print('Sprawdzam serwer ', server.test_url, end=' ')\n\n data = {\n 'action': 'status_update',\n 'server': server.__dict__,\n }\n\n try:\n response = urlopen(server.test_url)\n print(response.getcode())\n if response.getcode() != 200:\n data['server']['status'] = 'Error code != 200'\n print('Wysyłam aktualizację serwera', server)\n except Exception:\n data['server']['status'] = 'Błąd połączenia.'\n print('Wysyłam aktualizację serwera', server)\n finally:\n return data\n\n async def check_servers(self, websocket):\n for server in self.__server_list:\n msg = await self.check_server(server)\n if msg and msg['server']['status'] != 'ok':\n await websocket.send(json.dumps(msg))\n\n async def __updateserve(self, websocket, path):\n while True:\n await self.check_servers(websocket)\n await asyncio.sleep(60)\n\n\nif __name__ == '__main__':\n StatusServer().run()\n","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"172595741","text":"#Hannah Lerner\n#hlerner@bu.edu\n\n\n#1\ndef rem_first(elem, values):\n \"\"\" returns a version of values in which *only the first* occurrence\n of elem (if any) has been removed.\n inputs: elem is an arbitrary value\n values is an arbitrary list\n \"\"\"\n if values == '':\n return ''\n elif values[0] == elem:\n return values[1:]\n else:\n rest = rem_first(elem, values[1:])\n return values[0] + rest\n\n#2\n\ndef jscore(s1,s2):\n '''returns the number of letters in common between the teo inputs'''\n if s1 == '':\n return 0\n else:\n if s1[0] in s2:\n x=rem_first(s1[0],s1)\n y=rem_first(s1[0],s2)\n z=jscore(x,y)+1\n else:\n z=jscore(s1[1:],s2)\n return z\n#3\n\n\ndef lcs(s1,s2):\n '''returns the longest common string between the two inputs'''\n if s1 == '' or s2 == '':\n return ''\n else:\n if s1[0] == s2[0]:\n x=rem_first(s1[0],s1)\n y=rem_first(s2[0],s2)\n b=s1[0]+lcs(x,y)\n else:\n z=lcs(s1[1:],s2)\n a=lcs(s1,s2[1:])\n if len(z)>len(a):\n b = z\n else:\n b = a\n return b\n","sub_path":"Semester 1/Problem Sets/ps3pr4.py","file_name":"ps3pr4.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113444122","text":"# -*- coding: utf-8 -*-\n'''\n\n@author: Rem\n@contact: remch183@outlook.com\n@site: \n@software: PyCharm Community Edition\n@file: get_restrain.py\n@time: 2016/5/17 20:11\n'''\n__author__ = 'Rem'\n\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nrestrain_dic = {}\nnum = 0\nimport threading\n\ndriver = webdriver.PhantomJS()\n\n\ndef get_hero_restrain(name):\n global num, restrain_dic, driver\n num += 1\n print('i1')\n res_dic = {}\n maxd = 20\n url = 'http://dotamax.com/hero/detail/match_up_anti/%s/' % (name,)\n while maxd:\n maxd -= 1\n try:\n driver.get(url)\n bs = BeautifulSoup(driver.page_source, 'html.parser')\n break\n except BaseException:\n pass\n print('i3')\n for another in bs.find_all('tr'):\n ano_name = another.find(attrs={'class': 'hero-name-list'})\n if not ano_name is None:\n ano_dic = {}\n a = another.find_all(attrs={'style': 'height: 10px'})\n ano_dic['restrain_rate'] = a[0].get_text()\n ano_dic['win_rate'] = a[1].get_text()\n res_dic[ano_name.get_text()] = ano_dic\n print('i4')\n restrain_dic[name] = res_dic\n num -= 1\n\n\nif __name__ == '__main__':\n reader = pd.ExcelFile('hero_list.xlsx')\n df = reader.parse('Sheet1')\n num = 0\n for i, name in enumerate(df.ename):\n # t = threading.Thread(target=get_hero_restrain, args=(name,))\n # print('%d,%d'%(num, i))\n # while num > 3: time.sleep(0.5)\n # t.start()\n print(i)\n get_hero_restrain(name)\n print(restrain_dic)\n","sub_path":"get_restrain.py","file_name":"get_restrain.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"369165220","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file contains the RobotHandler class, which handles the communication with the uArm Swift pro.\n\"\"\"\n\nimport time\n\n\n\n\nfrom libraries.uArm_Python_SDK.uarm.wrapper import SwiftAPI\nfrom src.geometry_helper import GeometryHelper\nfrom src.robot_error import ErrorCode, RobotError\nfrom src.user_challenge import UserChallenge\n\n\nclass RobotHandler:\n def __init__(self):\n \"\"\"\n Connect on initialization.\n \"\"\"\n # connect to uArm\n self.__swift = SwiftAPI(filters={'hwid': 'USB VID:PID=2341:0042'})\n self.__swift.waiting_ready(timeout=5)\n # set general mode: 0\n self.__swift.set_mode(0)\n\n # TODO (ALR): Measure Servo offset again once uArm is installed.\n # set measured wrist servo scaling offset\n # It's 90+-78\n self.__lower_servo_limit = 12.0\n self.__higher_servo_limit = 168.0\n self.__angle_range_servo_limit = self.__higher_servo_limit-self.__lower_servo_limit\n self.__pick_up_height_correction = -9\n\n # set sleep time to wait for servo to finish\n self.__sleep_time = 1.0\n\n # initialize geometry helper\n self.__geometry_helper = GeometryHelper()\n\n # initialize empty position values\n self.__x_uarm = 0\n self.__y_uarm = 0\n self.__z_uarm = 0\n self.__wrist_angle = 0\n # set values and move to predefined position (3, 8)\n self.reset()\n\n def disconnect(self):\n \"\"\"\n Disconnect robot.\n \"\"\"\n self.__swift.flush_cmd()\n time.sleep(3)\n self.__swift.disconnect()\n\n def reset(self):\n \"\"\"\n Reset robot, go back to start position.\n \"\"\"\n # reset arm to home\n self.__swift.reset(wait=True, speed=10000)\n # get pose values in uarm frame\n pose = self.__swift.get_position()\n # check if successful\n if isinstance(pose, list):\n self.__x_uarm = pose[0]\n self.__y_uarm = pose[1]\n self.__z_uarm = pose[2]\n else:\n message = \"Die Roboter Position konnte nicht gelesen werden, überprüfe die Verbindung.\"\n raise RobotError(ErrorCode.E0001, message)\n \n self.pump_off()\n # set servo value in degrees\n wrist_angle = 90.0\n self.__swift.set_servo_angle(servo_id=3, angle=wrist_angle)\n self.__wrist_angle = wrist_angle\n\n\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n\n # move to fix starting position\n self.position_new([3, 8])\n self.height_new([2])\n self.__swift.flush_cmd()\n\n def position_new(self, position_user):\n \"\"\"\n Move robot arm to new position x, y in user frame.\n :param position_user: position in user frame [x_user, y_user]\n :type position_user: list[int]\n \"\"\"\n [x_user, y_user] = position_user\n self.x_user = x_user\n self.y_user = y_user\n # transform frames of positions\n uarm_dict = self.__geometry_helper.transform_position_user_to_uarm(x_user, y_user, self.__z_uarm)\n x_uarm_new = uarm_dict['x']\n y_uarm_new = uarm_dict['y']\n\n # calculate new wrist angle that keeps object in the same orientation\n wrist_angle_new = self.__geometry_helper.calculate_equal_wrist_rotation_new(self.__x_uarm, x_uarm_new,\n self.__y_uarm, y_uarm_new,\n self.__wrist_angle)\n #z_uarm_new = self.__geometry_helper.transform_height_user_to_uarm(self.__z_uarm, x_uarm_new, y_uarm_new)\n self.__swift.set_position(x=x_uarm_new, y=y_uarm_new)\n self.__swift.set_wrist(angle=wrist_angle_new, wait=True)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n\n # set new values\n self.__x_uarm = x_uarm_new\n self.__y_uarm = y_uarm_new\n # set wrist angle to new, not to corrected because the correction is only for the motor\n self.__wrist_angle = wrist_angle_new\n\n def height_new(self, z_user_list):\n \"\"\"x_uarm_new\n Move robot arm to z position in user frame.\n :param z_user_list: new height in user frame [z_user]\n :type z_user_list: list[int]\n \"\"\"\n z_user = z_user_list[0]\n # calculate new height in uarm frame\n z_uarm_new = self.__geometry_helper.transform_height_user_to_uarm(z_user, self.__x_uarm, self.__y_uarm)\n\n # move arm\n self.__swift.set_position(z=z_uarm_new)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n\n # set values\n self.__z_uarm = z_uarm_new\n\n def pump_on(self):\n \"\"\"\n Turn on the pump.\n \"\"\"\n #Correct angle to allow a wide range of corrections\n angle = self.__geometry_helper.adjust_wrist_rotation_before_pumpe_an(self.x_user,self.y_user)\n self.__wrist_angle = angle\n self.__swift.set_servo_angle(servo_id=3,angle = self.__wrist_angle)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n \n \n\n # move arm slightly down (those, the arm will not touch blocks and only grips them if the pump is on)\n z_uarm_corrected = self.__z_uarm +self.__pick_up_height_correction\n self.__swift.set_position(z=z_uarm_corrected)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n \n # TUrns pump on\n self.__swift.set_pump(on=True)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n \n # move arm slightly up again to reach previous position\n z_uarm_corrected = self.__z_uarm -self.__pick_up_height_correction\n self.__swift.set_position(z=z_uarm_corrected)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n\n def pump_off(self):\n \"\"\"\n Turn off the pump.\n \"\"\"\n self.__swift.set_pump(on=False)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n \n def drehen(self, rotation):\n rotation = rotation[0]\n current_angle = self.__wrist_angle\n angle = self.__geometry_helper.gripper_angle_rotation(current_angle,rotation)\n self.__wrist_angle = angle\n self.__swift.set_servo_angle(servo_id=3,angle = self.__wrist_angle)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time)\n \n \n\n \n def test_c(self,*args):\n # Function only for testing\n\n if len(args) == 1:\n rotation = (args[0])[0]\n else:\n rotation = 90\n print(\"Checking servo limits\")\n \n # Servo Test\n if len(args) == 1:\n if (args[0])[0] == 0:\n for rot in range (22,120,10):\n print(\"angle {}\".format(rot))\n self.__wrist_angle = self.__wrist_servo_correction(rot)\n self.__swift.set_servo_angle(servo_id=3,angle = self.__wrist_angle)\n self.__swift.flush_cmd()\n time.sleep(self.__sleep_time*2)\n elif (args[0])[0] == 1:\n \n for x in range( 5,7,1):\n for y in range(3,12,1):\n self.position_new([x,y])\n \n \n \n \n \n \n\n def __wrist_servo_correction(self, wrist_angle):\n \"\"\"\n Correct wrist servo offset / scaling.\n :param wrist_angle: geometrically calculated wrist angle\n :type wrist_angle: float\n :return: corrected / scaled wrist angle according to servo offset\n :rtype: float\n \"\"\"\n # linearly scaling angle to real wrist limits\n if wrist_angle <= 90.0:\n wrist_angle_corrected = (wrist_angle - self.__lower_servo_limit) * 90.0 / (90.0 - self.__lower_servo_limit)\n else:\n wrist_angle_corrected = 90.0 + 90.0 * (wrist_angle - 90) / (self.__higher_servo_limit - 90.0)\n\n return wrist_angle_corrected\n def __wrist_angle_limits(self,wrist_angle):\n # If wrist angle is inside the limit, return the percentage of the low and upper limit. Otherwise, return false\n \n angle_limit_ratio = (wrist_angle - self.__lower_servo_limit)/(self.__angle_range_servo_limit)\n \n if angle_limit_ratio >=0 and angle_limit_ratio <= 1:\n return angle_limit_ratio\n else:\n return False\n \n \n def __wrist_servo_correction_new(self, wrist_angle):\n \"\"\"\n Correct wrist servo offset / scaling.\n :param wrist_angle: geometrically calculated wrist angle\n :type wrist_angle: float\n :return: corrected / scaled wrist angle according to servo offset\n :rtype: float\n \"\"\"\n # linearly scaling angle to real wrist limits\n if wrist_angle < self.__lower_servo_limit:\n pass\n if wrist_angle > self.__higher_servo_limit:\n pass\n return wrist_angle\n","sub_path":"src/robot_handler.py","file_name":"robot_handler.py","file_ext":"py","file_size_in_byte":9076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"442569507","text":"class Solution:\n def maxPathSum(self, root: Optional[TreeNode]) -> int:\n def dfs(node):\n nonlocal ans\n if not node:\n return 0\n l = dfs(node.left)\n r = dfs(node.right)\n ans = max(ans, l+r+node.val, l+node.val, r+node.val, node.val)\n return max(node.val+l, node.val+r, node.val)\n ans = float('-inf')\n dfs(root)\n return ans\n# Runtime: 114 ms, faster than 59.55% of Python3 online submissions for Binary Tree Maximum Path Sum.\n# Memory Usage: 21.4 MB, less than 32.26% of Python3 online submissions for Binary Tree Maximum Path Sum.","sub_path":"124. Binary Tree Maximum Path Sum.py","file_name":"124. Binary Tree Maximum Path Sum.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132332974","text":"\nfrom app import create_app,db\nfrom app.models import User,Role,Department,ProductModel,ProductOrder,Product\nfrom flask_script import Manager,Shell\nfrom flask_migrate import Migrate,MigrateCommand\n\n\napp = create_app('default')\nmanager = Manager(app)\nmigrate = Migrate(app,db)\n\n\n\ndef make_shell_context():\n return dict(db=db,app=app,User=User,Department=Department,ProductOrder=ProductOrder,\n Role=Role,ProductModel=ProductModel,Product=Product)\n\nmanager.add_command('shell',Shell(make_context=make_shell_context))\nmanager.add_command('db',MigrateCommand)\n\n\n\nif __name__ == '__main__':\n manager.run()\n\n\n\n\n\n\n\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"46152305","text":"from setuptools import setup, find_packages\n\nversion = '0.5'\n\nsetup(name='topicaxis-opengraph',\n version=version,\n description=\"A module to parse the Open Graph Protocol\",\n long_description=open(\"README.rst\").read() + \"\\n\",\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Text Processing :: Markup :: HTML',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n keywords='opengraph protocol facebook',\n author='Panagiotis Matigakis',\n author_email='pmatigakis@gmail.com',\n url='https://github.com/topicaxis/opengraph',\n license='MIT',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'beautifulsoup4'\n ],\n test_suite=\"nose.collector\",\n tests_require=[\n \"nose==1.3.7\"\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551848581","text":"import consul\nimport pandas as pd\nimport sys\n\ndef consul_uploader(c,key,value):\n\tc.kv.put(key,value)\n\tprint(\"inserting user\",key,\"with node\",value)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"take arguments: <filepath to upload>\")\n sys.exit(1)\n filepath = str(sys.argv[1])\n c = consul.Consul()\n df = pd.read_csv(filepath)\n df.apply(lambda x: consul_uploader(c,str(x[0]),str(x[1])),axis = 1)","sub_path":"code/service_info_publisher.py","file_name":"service_info_publisher.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307126992","text":"import json\nfrom VersionLookupApi.app.service.version_parser import parse_version\nfrom VersionLookupApi.app.model.software import Software\n#Loads a json file that represents a list of software and turns it into a list\n#of software\ndef load_software_from_file(fileName):\n with open(fileName) as file:\n data = json.load(file)\n\n softwareList = []\n for softwareJsonLine in data:\n software = Software(softwareJsonLine['name'],\n parse_version(softwareJsonLine['version']))\n\n softwareList.append(software)\n\n return softwareList\n","sub_path":"backend/VersionLookupApi/app/service/file_loader.py","file_name":"file_loader.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"537297773","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom odoo import models, api, fields\n\nclass ProjectTask(models.Model):\n _inherit = 'project.task'\n\n unbuild_task = fields.Boolean(string='Tarea de Baja', default=False)\n unbuild_finished = fields.Boolean(string='Desinstalacion terminada', default=False)\n deactivate_service_stage = fields.Boolean(string='Etapa de desactivar servicio', default=False, related='stage_id.deactivate_service_stage')\n requesting_user = fields.Many2one('res.users', string='usuario que solicita')\n\n @api.model\n def create(self, vals):\n result = super(ProjectTask, self).create(vals)\n\n if result.unbuild_task:\n result.service_number.unbuild_task_id = result.id\n\n for line in result.project_id.users_to_notify:\n result.message_post(body=\"Solicitud de baja\", partner_ids=[line.partner_id.id])\n\n result.name = '(#' + str(result.id) + ') - ' + result.name\n \n return result\n\n #Cambiar de estado el numero de servicio\n def new_service_unbuild(self):\n for task in self:\n if task.unbuild_task:\n task.service_number.stage = 'disabled'\n note = task.service_number.notes + '<br/><p>Servicio dado de baja desde:</p><p>Tarea.- ' + self.name + '</p>'\n task.service_number.notes = note\n task.service_number.message_post(body=\"Servicio desactivado\", partner_ids=[task.company_id.subscription_user_id.partner_id.id])\n task.unbuild_finished = True\n\n\n def authorize_unbuild(self):\n for reg in self:\n reg.active = True\n reg.message_post(body=\"Baja Aprobada\")\n reg.service_number.message_post(body=\"La solicitud de baja (#\" + str(reg.id) + \") fue aprobada\", partner_ids=[reg.requesting_user.partner_id.id])\n if reg.service_number.sale_subscription:\n reg.service_number.sale_subscription.message_post(body=\"El servicio (\" + reg.service_number.name + \") esta en proceso de baja\" , partner_ids=[reg.company_id.subscription_user_id.partner_id.id])\n reg.service_number.sale_subscription_line.to_unbuild = True\n\n\n def reject_unbuild(self):\n for reg in self:\n reg.active = True\n reg.message_post(body=\"Baja Rechazada\")\n reg.service_number.unbuild_task_id = False\n reg.service_number.message_post(body=\"La solicitud de baja (#\" + str(reg.id) + \") fue rechazada\", partner_ids=[reg.requesting_user.partner_id.id])\n","sub_path":"service_unbuild/models/project_task.py","file_name":"project_task.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"92887576","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport json\nimport csv\nimport logging\n\nfrom deriva.core import DerivaServer, get_credential, urlquote, AttrDict, topo_sorted, tag\nfrom deriva.core.ermrest_model import Model, Table, Column, Key, ForeignKey, builtin_types\n\nfrom . import tableschema\n\n\"\"\"\nBasic C2M2 catalog sketch\n\nDemonstrates use of deriva-py APIs:\n- server authentication (assumes active deriva-auth agent)\n- catalog creation\n- model provisioning\n- basic configuration of catalog ACLs\n- small Chaise presentation tweaks via model annotations\n- simple insertion of tabular content\n\n\"\"\"\nlogger = logging.getLogger(__name__)\n\nclass CfdeDataPackage (object):\n # the translation stores frictionless table resource metadata under this annotation\n resource_tag = 'tag:isrd.isi.edu,2019:table-resource'\n # the translation leaves extranneous table-schema stuff under this annotation\n # (i.e. stuff that perhaps wasn't translated to deriva equivalents)\n schema_tag = 'tag:isrd.isi.edu,2019:table-schema-leftovers'\n\n # some useful group IDs to use later in ACLs...\n grp = AttrDict({\n # USC/ISI ISRD roles\n \"isrd_staff\": \"https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b\",\n 'isrd_testers': \"https://auth.globus.org/9d596ac6-22b9-11e6-b519-22000aef184d\",\n # demo.derivacloud.org roles\n \"demo_admin\": \"https://auth.globus.org/5a773142-e2ed-11e8-a017-0e8017bdda58\",\n \"demo_creator\": \"https://auth.globus.org/bc286232-a82c-11e9-8157-0ed6cb1f08e0\",\n \"demo_writer\": \"https://auth.globus.org/caa11064-e2ed-11e8-9d6d-0a7c1eab007a\",\n \"demo_curator\": \"https://auth.globus.org/a5cfa412-e2ed-11e8-a768-0e368f3075e8\",\n \"demo_reader\": \"https://auth.globus.org/b9100ea4-e2ed-11e8-8b39-0e368f3075e8\",\n })\n writers = [grp.demo_curator, grp.demo_writer]\n catalog_acls = {\n \"owner\": [grp.demo_admin],\n \"insert\": writers,\n \"update\": writers,\n \"delete\": writers,\n \"select\": [grp.demo_reader, grp.isrd_testers, grp.isrd_staff, \"*\"],\n \"enumerate\": [\"*\"],\n }\n ermrestclient_acls = {\n \"select\": [\"*\"],\n }\n\n def __init__(self, filename, verbose=True):\n self.filename = filename\n self.dirname = os.path.dirname(self.filename)\n self.catalog = None\n self.cat_model_root = None\n self.cat_cfde_schema = None\n\n if verbose:\n logger.setLevel(logging.DEBUG)\n logger.addHandler(logging.StreamHandler(stream=sys.stdout))\n\n with open(self.filename, 'r') as f:\n self.model_doc = tableschema.make_model(json.load(f))\n self.doc_model_root = Model(None, self.model_doc)\n self.doc_cfde_schema = self.doc_model_root.schemas.get('CFDE')\n\n if set(self.model_doc['schemas']) != {'CFDE'}:\n raise NotImplementedError('Unexpected schema set in data package: %s' % (self.model_doc['schemas'],))\n\n def set_catalog(self, catalog):\n self.catalog = catalog\n self.get_model()\n\n def get_model(self):\n self.cat_model_root = self.catalog.getCatalogModel()\n self.cat_cfde_schema = self.cat_model_root.schemas.get('CFDE')\n\n def provision_dataset_ancestor_tables(self):\n def tdef(tname):\n return Table.define(\n tname,\n [\n Column.define(\"descendant\", builtin_types.text, nullok=False, comment=\"Contained dataset in transitive relationship.\"),\n Column.define(\"ancestor\", builtin_types.text, nullok=False, comment=\"Containing dataset in transitive relationship.\"),\n ],\n [\n Key.define([\"descendant\", \"ancestor\"], constraint_names=[[\"CFDE\", tname + \"_assoc_key\"]]),\n ],\n [\n ForeignKey.define(\n [\"descendant\"], \"CFDE\", \"dataset\", [\"id\"],\n constraint_names=[[\"CFDE\", tname + \"_descendant_fkey\"]],\n ),\n ForeignKey.define(\n [\"ancestor\"], \"CFDE\", \"dataset\", [\"id\"],\n constraint_names=[[\"CFDE\", tname + \"_ancestor_fkey\"]],\n ),\n ],\n comment=\"Flattened, transitive closure of nested DatasetsInDatasets relationship.\",\n )\n\n if 'dataset_ancestor' not in self.cat_model_root.schemas['CFDE'].tables:\n self.cat_model_root.schemas['CFDE'].create_table(tdef(\"dataset_ancestor\"))\n self.cat_model_root.schemas['CFDE'].create_table(tdef(\"dataset_ancestor_reflexive\"))\n\n def provision_denorm_tables(self):\n def dataset_property(srctable, srccolumn):\n tname = 'dataset_denorm_%s' % srccolumn.name\n return (\n tname,\n Table.define(\n tname,\n [\n Column.define(\"dataset\", builtin_types.text, nullok=False),\n Column.define(srccolumn.name, builtin_types.text, srccolumn.nullok),\n ],\n [\n Key.define([\"dataset\", srccolumn.name]),\n ],\n [\n ForeignKey.define(\n [\"dataset\"], \"CFDE\", \"dataset\", [\"id\"],\n constraint_names=[[\"CFDE\", \"%s_ds_fkey\" % tname]],\n )\n ] + [\n ForeignKey.define(\n [srccolumn.name], 'CFDE', fkey.referenced_columns[0].table.name, [ c.name for c in fkey.referenced_columns ],\n constraint_names=[['CFDE', '%s_prop_fkey' % tname]]\n )\n for fkey in srctable.foreign_keys\n if {srccolumn.name} == set([ c.name for c in fkey.foreign_key_columns ])\n ],\n )\n )\n\n for tname, cname in [\n ('data_event', 'protocol'),\n ('bio_sample', 'sample_type'),\n ]:\n tab = self.cat_model_root.table('CFDE', tname)\n col = tab.column_definitions.elements[cname]\n tname, tdef = dataset_property(tab, col)\n if tname not in self.cat_model_root.schemas['CFDE'].tables:\n self.cat_model_root.schemas['CFDE'].create_table(tdef)\n\n def provision(self):\n if 'CFDE' not in self.cat_model_root.schemas:\n # blindly load the whole model on an apparently empty catalog\n self.catalog.post('/schema', json=self.model_doc).raise_for_status()\n else:\n # do some naively idempotent model definitions on existing catalog\n # adding missing tables and missing columns\n need_tables = []\n need_columns = []\n hazard_fkeys = {}\n for ntable in self.doc_cfde_schema.tables.values():\n table = self.cat_cfde_schema.tables.get(ntable.name)\n if table is not None:\n for ncolumn in ntable.column_definitions:\n column = table.column_definitions.elements.get(ncolumn.name)\n if column is not None:\n # TODO: check existing columns for compatibility?\n pass\n else:\n cdoc = ncolumn.prejson()\n cdoc.update({'table_name': table.name, 'nullok': True})\n need_columns.append(cdoc)\n # TODO: check existing table keys/foreign keys for compatibility?\n else:\n tdoc = ntable.prejson()\n tdoc['schema_name'] = 'CFDE'\n need_tables.append(tdoc)\n\n if need_tables:\n logger.debug(\"Added tables %s\" % ([tdoc['table_name'] for tdoc in need_tables]))\n self.catalog.post('/schema', json=need_tables).raise_for_status()\n\n for cdoc in need_columns:\n self.catalog.post(\n '/schema/CFDE/table/%s/column' % urlquote(cdoc['table_name']),\n json=cdoc\n ).raise_for_status()\n logger.debug(\"Added column %s.%s\" % (cdoc['table_name'], cdoc['name']))\n\n self.get_model()\n self.provision_dataset_ancestor_tables()\n self.provision_denorm_tables()\n\n def apply_custom_config(self):\n self.get_model()\n\n for schema in self.cat_model_root.schemas.values():\n for table in schema.tables.values():\n if table.is_association():\n for cname in {'RCB', 'RMB'}:\n for fkey in table.fkeys_by_columns([cname], raise_nomatch=False):\n print('Dropping %s' % fkey.uri_path)\n fkey.drop()\n\n # keep original catalog ownership\n # since ERMrest will prevent a client from discarding ownership rights\n acls = dict(self.catalog_acls)\n acls['owner'] = list(set(acls['owner']).union(self.cat_model_root.acls['owner']))\n self.cat_model_root.acls.update(acls)\n self.cat_model_root.table('public', 'ERMrest_Client').acls.update(self.ermrestclient_acls)\n self.cat_model_root.table('public', 'ERMrest_Group').acls.update(self.ermrestclient_acls)\n\n # set custom chaise configuration values for this catalog\n self.cat_model_root.annotations[tag.chaise_config] = {\n #\"navbarBrandText\": \"CFDE Data Browser\",\n \"navbarMenu\": {\n \"children\": [\n {\n \"name\": \"Browse\",\n \"children\": [\n { \"name\": \"Dataset\", \"url\": \"/chaise/recordset/#%s/CFDE:dataset\" % self.catalog._catalog_id },\n { \"name\": \"Data Event\", \"url\": \"/chaise/recordset/#%s/CFDE:data_event\" % self.catalog._catalog_id },\n { \"name\": \"File\", \"url\": \"/chaise/recordset/#%s/CFDE:file\" % self.catalog._catalog_id },\n { \"name\": \"Biosample\", \"url\": \"/chaise/recordset/#%s/CFDE:bio_sample\" % self.catalog._catalog_id },\n { \"name\": \"Subject\", \"url\": \"/chaise/recordset/#%s/CFDE:subject\" % self.catalog._catalog_id },\n { \"name\": \"Common Fund Program\", \"url\": \"/chaise/recordset/#%s/CFDE:common_fund_program\" % self.catalog._catalog_id },\n ]\n }\n ]\n }\n }\n\n def _update(parent, key, d):\n if key not in parent:\n parent[key] = dict()\n parent[key].update(d)\n \n # have Chaise display underscores in model element names as whitespace\n _update(\n self.cat_cfde_schema.display,\n \"name_style\",\n {\"underline_space\": True, \"title_case\": True}\n )\n\n def compact_visible_columns(table):\n \"\"\"Emulate Chaise heuristics while hiding system metadata\"\"\"\n # hacks for CFDE:\n # - assume we have an app-level primary key (besides RID)\n # - ignore possibility of compound or overlapping fkeys\n fkeys_by_col = {\n fkey.foreign_key_columns[0].name: fkey.names[0]\n for fkey in table.foreign_keys\n }\n return [\n fkeys_by_col.get(col.name, col.name)\n for col in table.column_definitions\n if col.name not in {\"RID\", \"RCT\", \"RMT\", \"RCB\", \"RMB\"}\n ]\n\n def visible_foreign_keys(table):\n \"\"\"Emulate Chaise heuristics while hiding denorm tables\"\"\"\n # hack: we use a fixed prefix for these tables\n return [\n fkey.names[0]\n for fkey in table.referenced_by\n if not fkey.table.name.startswith(\"dataset_denorm\")\n ]\n\n for table in self.cat_cfde_schema.tables.values():\n ntable = self.doc_cfde_schema.tables.get(table.name)\n if ntable is None:\n continue\n table.comment = ntable.comment\n table.display.update(ntable.display)\n for column in table.column_definitions:\n if column.name in {'id', 'url', 'md5'}:\n # set these acronyms to all-caps\n column.display[\"name\"] = column.name.upper()\n ncolumn = ntable.column_definitions.elements.get(column.name)\n if ncolumn is None:\n continue\n column.comment = ncolumn.comment\n column.display.update(ncolumn.display)\n for fkey in table.foreign_keys:\n try:\n npktable = self.doc_model_root.table(fkey.pk_table.schema.name, fkey.pk_table.name)\n nfkey = ntable.fkey_by_column_map({\n ntable.column_definitions[fk_col.name]: npktable.column_definitions[pk_col.name]\n for fk_col, pk_col in fkey.column_map.items()\n }) \n fkey.foreign_key.update(nfkey.foreign_key)\n except KeyError:\n continue\n table.visible_columns = {'compact': compact_visible_columns(table)}\n table.visible_foreign_keys = {'*': visible_foreign_keys(table)}\n\n # prettier display of built-in ERMrest_Client table entries\n _update(\n self.cat_model_root.table('public', 'ERMrest_Client').table_display,\n 'row_name',\n {\"row_markdown_pattern\": \"{{{Full_Name}}} ({{{Display_Name}}})\"}\n )\n\n def find_fkey(from_tname, from_cnames):\n from_table = self.cat_model_root.table(\"CFDE\", from_tname)\n if isinstance(from_cnames, str):\n from_cnames = [from_cnames]\n fkeys = list(from_table.fkeys_by_columns(from_cnames))\n if len(fkeys) > 1:\n raise ValueError('found multiple fkeys for %s %s' % (from_table, from_cnames))\n return fkeys[0]\n\n def assoc_source(markdown_name, assoc_table, left_columns, right_columns):\n return {\n \"source\": [\n {\"inbound\": find_fkey(assoc_table, left_columns).names[0]},\n {\"outbound\": find_fkey(assoc_table, right_columns).names[0]},\n \"RID\"\n ],\n \"markdown_name\": markdown_name,\n }\n\n dsa_to_dsd = [\n {\"inbound\": find_fkey(\"dataset_ancestor\", \"ancestor\").names[0]},\n {\"outbound\": find_fkey(\"dataset_ancestor\", \"descendant\").names[0]},\n ]\n\n dsa_to_dsd_r = [\n {\"inbound\": find_fkey(\"dataset_ancestor_reflexive\", \"ancestor\").names[0]},\n {\"outbound\": find_fkey(\"dataset_ancestor_reflexive\", \"descendant\").names[0]},\n ]\n\n ds_to_file_flat = [\n {\"inbound\": find_fkey(\"files_in_datasets\", \"containing_dataset_id\").names[0]},\n {\"outbound\": find_fkey(\"files_in_datasets\", \"file_id\").names[0]},\n ]\n\n ds_to_file = dsa_to_dsd_r + ds_to_file_flat\n\n ds_to_devent = ds_to_file_flat + [\n {\"inbound\": find_fkey(\"file_produced_by\", \"file_id\").names[0]},\n {\"outbound\": find_fkey(\"file_produced_by\", \"data_event_id\").names[0]},\n ]\n\n ds_to_bsamp = ds_to_devent + [\n {\"inbound\": find_fkey(\"bio_sample_processed_by\", \"data_event_id\").names[0]},\n {\"outbound\": find_fkey(\"bio_sample_processed_by\", \"bio_sample_id\").names[0]},\n ]\n \n # improve Dataset with pseudo columns?\n\n program = {\n \"source\": [\n {\"outbound\": find_fkey(\"dataset\", [\"data_source\"]).names[0]},\n \"RID\"\n ],\n \"markdown_name\": \"Common Fund Program\",\n \"open\": True,\n }\n self.cat_model_root.table('CFDE', 'dataset').visible_columns = {\n \"compact\": [\"title\", program, \"description\", \"url\"],\n \"filter\": {\"and\": [\n program,\n {\n \"markdown_name\": \"Data Method\",\n \"source\": [\n {\"inbound\": [\"CFDE\", \"dataset_denorm_method_ds_fkey\"]},\n {\"outbound\": [\"CFDE\", \"dataset_denorm_method_prop_fkey\"]},\n \"RID\",\n ],\n \"open\": True,\n },\n {\n \"markdown_name\": \"Data Platform\",\n \"source\": [\n {\"inbound\": [\"CFDE\", \"dataset_denorm_platform_ds_fkey\"]},\n {\"outbound\": [\"CFDE\", \"dataset_denorm_platform_prop_fkey\"]},\n \"RID\",\n ],\n },\n {\n \"markdown_name\": \"Data Protocol\",\n \"source\": [\n {\"inbound\": [\"CFDE\", \"dataset_denorm_protocol_ds_fkey\"]},\n {\"outbound\": [\"CFDE\", \"dataset_denorm_protocol_prop_fkey\"]},\n \"RID\",\n ],\n },\n {\n \"markdown_name\": \"Biosample Type\",\n \"source\": [\n {\"inbound\": [\"CFDE\", \"dataset_denorm_sample_type_ds_fkey\"]},\n {\"outbound\": [\"CFDE\", \"dataset_denorm_sample_type_prop_fkey\"]},\n \"RID\",\n ],\n \"open\": True,\n },\n assoc_source(\"Containing Dataset\", \"dataset_ancestor\", [\"descendant\"], [\"ancestor\"]),\n assoc_source(\"Contained Dataset\", \"dataset_ancestor\", [\"ancestor\"], [\"descendant\"]),\n assoc_source(\"Contained File\", \"files_in_datasets\", [\"containing_dataset_id\"], [\"file_id\"]),\n ]}\n }\n\n self.cat_model_root.table('CFDE', 'dataset').visible_foreign_keys = {\n \"*\": [\n {\n \"source\": dsa_to_dsd + [ \"RID\" ],\n \"markdown_name\": \"Included Datasets\",\n },\n {\n \"source\": ds_to_file + [ \"RID\" ],\n \"markdown_name\": \"Included Files\",\n }\n ]\n }\n\n self.cat_model_root.column('CFDE', 'dataset', 'id').column_display[\"*\"] = {\n \"markdown_pattern\": \"[{{{id}}}]({{{id}}})\"\n }\n\n ## apply the above ACL and annotation changes to server\n self.cat_model_root.apply()\n self.get_model()\n\n @classmethod\n def make_row2dict(cls, table, header):\n \"\"\"Pickle a row2dict(row) function for use with a csv reader\"\"\"\n numcols = len(header)\n missingValues = set(table.annotations[cls.schema_tag].get(\"missingValues\", []))\n\n for cname in header:\n if cname not in table.column_definitions.elements:\n raise ValueError(\"header column %s not found in table %s\" % (cname, table.name))\n\n def row2dict(row):\n \"\"\"Convert row tuple to dictionary of {col: val} mappings.\"\"\"\n return dict(zip(\n header,\n [ None if x in missingValues else x for x in row ]\n ))\n\n return row2dict\n\n def data_tnames_topo_sorted(self):\n def target_tname(fkey):\n return fkey.referenced_columns[0].table.name\n tables_doc = self.model_doc['schemas']['CFDE']['tables']\n return topo_sorted({\n table.name: [\n target_tname(fkey)\n for fkey in table.foreign_keys\n if target_tname(fkey) != table.name and target_tname(fkey) in tables_doc\n ]\n for table in self.cat_cfde_schema.tables.values()\n if table.name in tables_doc\n })\n\n def load_dataset_ancestor_tables(self):\n assoc_rows = self.catalog.get('/entity/datasets_in_datasets').json()\n ds_ids = [ row['id'] for row in self.catalog.get('/attributegroup/dataset/id').json() ]\n\n contains = {} # ancestor -> {descendant, ...}\n contained = {} # descendant -> {ancestor, ...}\n\n def add(d, k, v):\n if k not in d:\n d[k] = set([v])\n else:\n d[k].add(v)\n\n # reflexive links\n for ds in ds_ids:\n add(contains, ds, ds)\n add(contained, ds, ds)\n\n for row in assoc_rows:\n child = row['dataset_id']\n parent = row['containing_dataset_id']\n add(contains, parent, child)\n add(contained, child, parent)\n for descendant in contains.get(child, []):\n add(contains, parent, descendant)\n add(contained, descendant, parent)\n for ancestor in contained.get(parent, []):\n add(contains, ancestor, child)\n add(contained, child, ancestor)\n\n da_pairs = {\n (descendant, ancestor)\n for descendant, ancestors in contained.items()\n for ancestor in ancestors\n }\n\n self.catalog.post(\n '/entity/dataset_ancestor_reflexive',\n json=[\n {\"descendant\": descendant, \"ancestor\": ancestor}\n for descendant, ancestor in da_pairs\n ],\n )\n\n self.catalog.post(\n '/entity/dataset_ancestor',\n json=[\n {\"descendant\": descendant, \"ancestor\": ancestor}\n for descendant, ancestor in da_pairs\n # drop reflexive pairs\n if descendant != ancestor\n ],\n )\n\n def load_denorm_tables(self):\n query_prefix = '/attributegroup/D:=dataset/files_in_datasets/F:=file/file_produced_by/DE:=data_event'\n for tname, cname, query in [\n ('data_event', 'protocol', '/dataset:=D:id,DE:protocol'),\n ('bio_sample', 'sample_type', '/bio_sample_processed_by/B:=bio_sample/dataset:=D:id,B:sample_type'),\n ]:\n rows = self.catalog.get(\"%s%s\" % (query_prefix, query)).json()\n self.catalog.post(\"/entity/dataset_denorm_%s\" % cname, json=rows).raise_for_status()\n logger.debug(\"Denormalization table for %s.%s loaded.\" % (tname, cname))\n\n def load_data_files(self):\n tables_doc = self.model_doc['schemas']['CFDE']['tables']\n for tname in self.data_tnames_topo_sorted():\n # we are doing a clean load of data in fkey dependency order\n table = self.cat_model_root.table(\"CFDE\", tname)\n resource = tables_doc[tname][\"annotations\"].get(self.resource_tag, {})\n if \"path\" in resource:\n fname = \"%s/%s\" % (self.dirname, resource[\"path\"])\n with open(fname, \"r\") as f:\n # translate TSV to python dicts\n reader = csv.reader(f, delimiter=\"\\t\")\n row2dict = self.make_row2dict(table, next(reader))\n entity_url = \"/entity/CFDE:%s\" % urlquote(table.name)\n batch_size = 50000 # TODO: Should this be configurable?\n # Batch catalog ingests; too-large ingests will hang and fail\n # Largest known CFDE ingest has file with >5m rows\n batch = []\n for raw_row in reader:\n # Collect full batch, then post at once\n batch.append(row2dict(raw_row))\n if len(batch) >= batch_size:\n try:\n self.catalog.post(entity_url, json=batch)\n logger.debug(\"Batch of rows for %s loaded\" % table.name)\n except Exception as e:\n logger.error(\"Table %s data load FAILED from \"\n \"%s: %s\" % (table.name, fname, e))\n raise\n else:\n batch.clear()\n # After reader exhausted, ingest final batch\n if len(batch) > 0:\n try:\n self.catalog.post(entity_url, json=batch)\n except Exception as e:\n logger.error(\"Table %s data load FAILED from \"\n \"%s: %s\" % (table.name, fname, e))\n raise\n logger.info(\"All data for table %s loaded from %s.\" % (table.name, fname))\n\n\ndef main(args):\n \"\"\"Basic C2M2 catalog setup\n\n Examples:\n\n python3 -m cfde_deriva.datapackage \\\n ./table-schema/cfde-core-model.json \\\n /path/to/GTEx.v7.C2M2_preload.bdbag/data/GTEx_C2M2_instance.json\n\n When multiple files are specified, they are loaded in the order given.\n Earlier files take precedence in configuring the catalog model, while\n later files can merely augment it.\n\n When the JSON includes \"path\" attributes for the resources, the data\n files (TSV assumed) are loaded for each resource after the schema is\n provisioned.\n\n Environment variable parameters (with defaults):\n\n DERIVA_SERVERNAME=demo.derivacloud.org\n DERIVA_CATALOGID=\n\n Setting a non-empty DERIVA_CATALOGID causes reconfiguration of an\n existing catalog's presentation tweaks. It does not load data.\n \n \"\"\"\n # this is the deriva server where we will create a catalog\n servername = os.getenv('DERIVA_SERVERNAME', 'demo.derivacloud.org')\n\n # this is an existing catalog we just want to re-configure!\n catid = os.getenv('DERIVA_CATALOGID')\n\n ## bind to server\n credentials = get_credential(servername)\n server = DerivaServer('https', servername, credentials)\n\n # ugly quasi CLI...\n if len(args) < 1:\n raise ValueError('At least one data package JSON filename required as argument')\n\n # pre-load all JSON files and convert to models\n # in order to abort early on basic usage errors\n datapackages = [\n CfdeDataPackage(fname)\n for fname in args\n ]\n\n if catid is None:\n ## create catalog\n newcat = server.create_ermrest_catalog()\n print('New catalog has catalog_id=%s' % newcat.catalog_id)\n print(\"Don't forget to delete it if you are done with it!\")\n\n try:\n ## deploy model(s)\n for dp in datapackages:\n dp.set_catalog(newcat)\n dp.provision()\n print(\"Model deployed for %s.\" % (dp.filename,))\n\n ## customize catalog policy/presentation (only need to do once)\n datapackages[0].apply_custom_config()\n print(\"Policies and presentation configured.\")\n\n ## load some sample data?\n for dp in datapackages:\n dp.load_data_files()\n\n ## compute transitive-closure relationships\n datapackages[0].load_dataset_ancestor_tables()\n datapackages[0].load_denorm_tables()\n\n print(\"All data packages loaded.\")\n except Exception as e:\n print('Provisioning failed: %s.\\nDeleting catalog...' % e)\n newcat.delete_ermrest_catalog(really=True)\n raise\n\n print(\"Try visiting 'https://%s/chaise/recordset/#%s/CFDE:dataset'\" % (\n servername,\n newcat.catalog_id,\n ))\n else:\n ## reconfigure existing catalog\n oldcat = server.connect_ermrest(catid)\n datapackages[0].set_catalog(oldcat)\n datapackages[0].apply_custom_config()\n print('Policies and presentation configured for %s.' % (oldcat._server_uri,))\n\nif __name__ == '__main__':\n exit(main(sys.argv[1:]))\n","sub_path":"cfde_deriva/datapackage.py","file_name":"datapackage.py","file_ext":"py","file_size_in_byte":27778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281254415","text":"N , Q = map(int,input().split())\n\nD = [0]*Q\nInput = []\nlength = [-1]*Q\n\nfor i in range(N):\n Input.append(list(map(int,input().split())))\n\n# 距離順でソート\ndist = lambda A : A[2]\nInput.sort(key=dist)\n\nfor i in range(Q):\n D[i] = int(input())\n\n# i番目の人の座標を計算\nfor i in range(Q):\n for j in range(N):\n if(Input[j][1] <= D[i]): # 工事終了時刻 <= 出発時刻\n continue\n\n stopTime = Input[j][2] + D[i] # 出発時刻 + 距離/速度\n\n if(stopTime >= Input[j][0] - 0.5 and stopTime <= Input[j][1] - 0.5): # 工事期間内\n length[i] = Input[j][2]\n break\n\nfor l in length:\n print(l)\n","sub_path":"AtCoder/abc/128e.py","file_name":"128e.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113160378","text":"import logging\nfrom argparse import ArgumentParser\n\nfrom src.datasets import create_input_files\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger()\n\n\nargparser = ArgumentParser()\n\nargparser.add_argument(\"--dataset_type\", default=\"coco\", choices=[\"coco\", \"flickr8k\", \"flickr30k\"],\n help=\"Type of dataset.\")\nargparser.add_argument(\"--json_split_path\", help=\"Path to caption json file.\")\nargparser.add_argument(\"--image_dir\",\n help=\"Directory containing all the images. \" + \\\n \"This should be the directory containing val2014, NOT val2014 itself.\")\nargparser.add_argument(\"--data_dir\", help=\"Path to the directory to which the preprocessed data will be saved.\")\nargparser.add_argument(\"--caps_per_img\", type=int, default=2, help=\"Number of captions per image.\")\nargparser.add_argument(\"--min_word_freq\", type=int, default=2,\n help=\"Minimum number of appearances in the training data of a particular word. \"\n + \"Words below this threshold will be replaced with the UNK token.\")\nargparser.add_argument(\"--max_len\", type=int, default=50, help=\"Maximum length of any caption.\")\nargparser.add_argument(\"--use_all_train\", default=False, action=\"store_true\",\n help=\"Whether or not to use the full training data.\")\nargparser.add_argument(\"--train_percentage\", type=float, default=0.1,\n help=\"Percentage of training data that should be processed.\")\nargparser.add_argument(\"--val_percentage\", type=float, default=0.1,\n help=\"Percentage of validation data that should be processed.\")\nargparser.add_argument(\"--test_percentage\", type=float, default=0.1,\n help=\"Percentage of testing data that should be processed.\")\n\nargs = argparser.parse_args()\n\n\ncreate_input_files(\n dataset=args.dataset_type,\n split_json_path=\"C:\\\\Users\\\\Karan Sarkar\\\\Google Drive\\\\RPI\\\\NLP\\\\Assignment4\\\\caption_datasets\\\\dataset_coco.json\",\n image_folder=\"C:\\\\Users\\\\Karan Sarkar\\\\Google Drive\\\\RPI\\\\NLP\\\\Assignment4\",\n captions_per_image=args.caps_per_img,\n min_word_freq=args.min_word_freq,\n output_folder=\"C:\\\\Users\\\\Karan Sarkar\\\\Google Drive\\\\RPI\\\\NLP\\\\Assignment4\\\\\",\n max_len=args.max_len,\n use_all_train=args.use_all_train,\n train_percentage=args.train_percentage,\n val_percentage=args.val_percentage,\n test_percentage=args.test_percentage\n)\n","sub_path":"NLP/Assignment4/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73634935","text":"'''\r\nCreated on 28 mar 2019\r\n\r\n@author: Marcin\r\n'''\r\nfrom Utils.tkinterImport import tkinter, ttk, constants\r\nfrom Utils.Random import debugStatement, dev, unitsList\r\n\r\n\r\nclass Layout():\r\n def __init__(self, ):\r\n self.MainWindow = tkinter.Tk()\r\n self.MainWindow.title('Storage Shopping Assistant')\r\n mainframe = ttk.Frame(self.MainWindow, padding='3 3 12 12')\r\n mainframe.grid(column=0, row=0, sticky=(tkinter.N,\r\n tkinter.W,\r\n tkinter.E,\r\n tkinter.S))\r\n self.MainWindow.columnconfigure(0, weight=1)\r\n self.MainWindow.rowconfigure(0, weight=1)\r\n self._RecordField = NewRecordField(mainframe,\r\n self.newRecordAction)\r\n self._listingRecordsField = ListingRecordsField(\r\n mainframe,\r\n self.displayRecords)\r\n self._editRecordField = EditRecordField(mainframe,\r\n self.showRecordAction,\r\n self.updateRecord)\r\n self._onExitAction()\r\n\r\n def startMainLoop(self):\r\n self.MainWindow.mainloop()\r\n\r\n def _onExitAction(self):\r\n from Controller.Controller import Controller\r\n self.MainWindow.protocol(\"WM_DELETE_WINDOW\", Controller().exit)\r\n\r\n def newRecordAction(self):\r\n from View.View import View\r\n debugStatement(2, 'adding record')\r\n\r\n view = View()\r\n recordFields = dict([(var._name, var.get()) for var\r\n in self._RecordField.__dict__.values()])\r\n view.addRecord(recordFields)\r\n self._RecordField.clear()\r\n self._RecordField.productName.set(\"\")\r\n\r\n def _maxLen(self, iterable):\r\n iterableLen = [len(each) for each in iterable]\r\n return max(iterableLen)\r\n\r\n def displayRecords(self):\r\n from View.View import View\r\n view = View()\r\n\r\n self._listingRecordsField.recordDisplay.delete(1.0, constants.END)\r\n headers, records = view.listRecords()\r\n tmp = []\r\n tmp.append(*records)\r\n tmp.insert(0, headers)\r\n columns = [*zip(*tmp)]\r\n columnsSize = [self._maxLen(column) for column in columns]\r\n toDisplay = ''\r\n toDisplay += self._displayRow(headers, columnsSize)\r\n for record in records:\r\n toDisplay += self._displayRow(record, columnsSize)\r\n self._listingRecordsField.recordDisplay.insert(constants.INSERT,\r\n toDisplay)\r\n\r\n def _displayRow(self, row, columnsSize):\r\n rowString = ''\r\n for i, element in enumerate(row):\r\n columnLen = 0\r\n rowString += element\r\n columnLen = len(element)\r\n if columnLen <= columnsSize[i]:\r\n rowString += ' ' * (columnsSize[i] - columnLen)\r\n elif columnLen > columnsSize[i]:\r\n raise ValueError('Column size for pretty print was wrongly'\r\n + ' calculated')\r\n rowString += ' |'\r\n rowString += '\\n'\r\n return rowString\r\n\r\n def showRecordAction(self):\r\n from Controller.Controller import Controller\r\n controller = Controller()\r\n result = controller.showRecordById(self._editRecordField.id.get())\r\n for key, item in self._editRecordField.__dict__.items():\r\n item.set(result[key])\r\n\r\n def updateRecord(self):\r\n from Controller.Controller import Controller\r\n controller = Controller()\r\n fields = {}\r\n for key, item in self._editRecordField.__dict__.items():\r\n fields.update({key: item.get()})\r\n controller.updateRecord(fields)\r\n\r\nclass BasicField():\r\n def __init__(self, frame):\r\n pass\r\n\r\n\r\nclass NewRecordField(BasicField):\r\n def __init__(self, frame, action):\r\n self.productName = tkinter.StringVar(name='productName')\r\n self.quantity = tkinter.StringVar(name='quantity')\r\n self.unit = tkinter.StringVar(name='unit')\r\n self.price = tkinter.StringVar(name='price')\r\n self.shop = tkinter.StringVar(name='shop')\r\n self.dateOfPurchase = tkinter.StringVar(name='dateOfPurchase')\r\n self.category = tkinter.StringVar(name='category')\r\n\r\n ttk.Label(frame, text='ProductName').grid(\r\n column=0, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.productName).grid(\r\n column=0, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Quantity').grid(\r\n column=1, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=3, textvariable=self.quantity).grid(\r\n column=1, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Unit').grid(\r\n column=2, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Combobox(frame, width=3, textvariable=self.unit,\r\n values=unitsList).grid(\r\n column=2, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Price').grid(\r\n column=3, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=4, textvariable=self.price).grid(\r\n column=3, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Shop').grid(\r\n column=4, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.shop).grid(\r\n column=4, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='dateOfPurchase').grid(\r\n column=6, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.dateOfPurchase).grid(\r\n column=6, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='category').grid(\r\n column=7, row=1, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.category).grid(\r\n column=7, row=2, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Button(frame, text='AddRecord', command=action).grid(\r\n column=8, row=2, sticky=(tkinter.N, tkinter.E))\r\n\r\n def clear(self):\r\n for Var in self.__dict__.items():\r\n Var[1].set(\"\")\r\n\r\n\r\nclass ListingRecordsField(BasicField):\r\n def __init__(self, frame, action):\r\n self.recordList = tkinter.StringVar()\r\n self.recordDisplay = tkinter.Text(frame, width=100, height=10)\r\n self.recordDisplay.grid(column=0, row=5, columnspan=8)\r\n ttk.Button(frame, text='ShowRecords', command=action).grid(row=5,\r\n column=8)\r\n\r\n\r\nclass EditRecordField(BasicField):\r\n def __init__(self,frame, showRecordAction, updateAction):\r\n self.id = tkinter.StringVar()\r\n self.productName = tkinter.StringVar()\r\n self.quantity = tkinter.StringVar()\r\n self.unit = tkinter.StringVar()\r\n self.price = tkinter.StringVar()\r\n self.shop = tkinter.StringVar()\r\n self.dateOfPurchase = tkinter.StringVar()\r\n self.category = tkinter.StringVar()\r\n\r\n ttk.Label(frame, text='ID').grid(\r\n column=0, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=5, textvariable=self.id).grid(\r\n column=0, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='ProductName').grid(\r\n column=1, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.productName).grid(\r\n column=1, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Quantity').grid(\r\n column=2, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=3, textvariable=self.quantity).grid(\r\n column=2, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Unit').grid(\r\n column=3, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Combobox(frame, width=3, textvariable=self.unit,\r\n values=unitsList).grid(\r\n column=3, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Price').grid(\r\n column=4, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=4, textvariable=self.price).grid(\r\n column=4, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='Shop').grid(\r\n column=5, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.shop).grid(\r\n column=5, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='dateOfPurchase').grid(\r\n column=6, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.dateOfPurchase).grid(\r\n column=6, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Label(frame, text='category').grid(\r\n column=7, row=6, sticky=(tkinter.W, tkinter.E))\r\n ttk.Entry(frame, width=10, textvariable=self.category).grid(\r\n column=7, row=7, sticky=(tkinter.W, tkinter.E))\r\n\r\n ttk.Button(frame, text='ShowRecord', command=showRecordAction).grid(\r\n column=8, row=7, sticky=(tkinter.N, tkinter.E))\r\n\r\n ttk.Button(frame, text='UpdateRecord', command=updateAction).grid(\r\n column=8, row=8, sticky=(tkinter.N, tkinter.E))\r\n\r\n","sub_path":"Main/View/Layout.py","file_name":"Layout.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230576497","text":"#!.venv/bin/python3\nfrom formatserializer.args.arguments import ConvertArguments\nfrom formatserializer.converters.format_converter import FormatConverter\n\n\ndef main():\n parser = ConvertArguments().setup_args()\n\n ConvertArguments.parsing_config_file(parser)\n\n converter = FormatConverter(\n convert_format=parser.convert_format,\n file_from_covert=parser.file_from_convert,\n file_to_convert=parser.file_to_convert\n )\n\n converter.convert()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"console_tool.py","file_name":"console_tool.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41732998","text":"from django.shortcuts import render\nfrom django.forms import formset_factory\nfrom plots.forms import AddFileForm, ValueForm\n\ndef index(request):\n\tValueFormSet = formset_factory(ValueForm, extra=1)\n\tif request.method == 'POST':\n\t\tvalue_formset = ValueFormSet(request.POST, request.FILES, prefix='values')\n\telse:\n\t\tvalue_formset = ValueFormSet(prefix='values')\n\tcontext={'value_forms':value_formset}\n\treturn render(request, 'plots/index.html', context)","sub_path":"plots/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382381030","text":"# coding=utf-8\n# from types import MethodType\n\n\nclass Student(object):\n\n __slots__ = ('name', 'age') # 用tuple定于允许绑定的属性名称\n # 只对当前类有用,对继承对子类不起作用\n\ns = Student()\ns.name = 'ruan'\ns.age = 99\n# s.score = 99 不可绑定属性\n\n\n# def set_age(self, age):\n# self.age = age\n#\n# s.set_age = MethodType(set_age, s)\n# s.set_age(25)\n#\n# print(s.age)\n#\n#\n# def set_score(self, score):\n# self.score = score\n#\n# Student.set_score = MethodType(set_score, Student)\n# 给对象绑定方法","sub_path":"python/slots.py","file_name":"slots.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503512192","text":"#!/anaconda3/bin/python\n# coding: utf-8\n\nfrom lxml import etree\nimport xml.etree.ElementTree as ET\nimport os\nimport numpy as np\nimport pandas as pd\n# from XML_HANDLE import *\n# ou\nfrom XML_HANDLE import Xml_logfile\n\n\"\"\" \nPour la documentation sur le package lxml consulter: \n- https://docs.python.org/3.4/library/xml.etree.elementtree.html#module-xml.etree.ElementTree\n\n- Ce que je dois faire :\n - extraire les valeurs des cles(jobname, filename, directory) qui sont en format string ou variable depuis les fichiers XML.\n - recuperer leurs valeurs exactes en utilisant les jobnames des fichiers xml pour trouver leurs fichiers excecutables corespndantes.\n\n- Demarche:\n 1. Extraire tous les jobs d'un ou les fichier(s) xml du repertoire logFullDS /ok\n a. le resultat de cette operation va retourner une listes de tous les jobs trouve dans un fichier XML donnee.\n note : l'objectif c'est de le faire avec tous les fichiers xml\n 2. Aller chercher dans les jobs correspondants dans les fichiers executables (les logs).\n a. lire un par un tous les fichiers executables (log) qui sont le dossier logfullDS (faire une booucle for dans lequel with open sera appele)\n b. ensuite toujours dans cette meme boucle for, parcourir la liste collectionJobFromXML et pour chaque element i (qui est en fait le nom du job recuperer dans le fichier xml) de cette liste tester si il est bien presence un des fichier log.\n Si oui afficher le filename de ce fichier\n\n\"\"\"\n# DOC-FTSACprod.xml\n\nclass ParseElementXML():\n \"\"\"cette classe prend en entree le nom du fichier XML a parser et pretourne une colletction (format liste)nommé collectionJobFromXML des jobs dun fichier XML\n Note: le fichier XML doit etre dans le meme dossier que le fichier py \"\"\"\n\n def document(self, fileXML=\"SUPprd.xml\"):\n basePath = os.path.dirname(__file__)\n fullPath = os.path.join(basePath, fileXML)\n # ---------------------------------------------------------------------,\n # try:\n # basePath = os.path.dirname(__file__)\n # fullPath = os.path.join(basePath, fileXML)\n # print(fullPath)\n # return fullPath\n # except OSError:\n # return \"verifier bien le path du fichier. Il doit etre dans le dossier\"\n # print(fullPath)\n return fullPath\n\n def getRoot(self, fullPath):\n tree = etree.parse(fullPath)\n root = tree.getroot()\n print(root.tag, root.attrib)\n print(f\"Infos - nombre de child pour {root.tag}:\", len(root))\n print(\"_________-------_____----Header------___----___----___----___ \")\n return root\n\n def removeDuplicates(self, listDoublons): # not use\n '''cette methode permet de supprimer les doublons dans une liste. \n Elle prend en entree une liste d'elements et retourne ensuite la meme liste dans laquelle tous elements dupliques sont supprimes'''\n liste = []\n for i in listDoublons:\n if i not in liste:\n liste.append(i)\n return liste\n\n \nclass ParseLog():\n\n def changeDir(self):\n path_to_logfullDS = '/Users/ganasene/Downloads/folder/logsfullDS'\n r = os.chdir(path_to_logfullDS)\n return r\n\n def blockEventID(self, string):\n '''ici on va prendre le separator_word Event iD\n sep_word=Event \n !!! Dans les listes qui seront generees il en y aura certaines qui sont vide. \n Donc il faut en prendre compte lors de suppression des occurences de la liste bloc_jb'''\n bloc_jb = string.split('Event ') # Note : prendre juste Event plus espace pour que ca marche\n # del bloc_jb[2] # permet d'enlever la deuxieme occurence qui comporte que les parametre de conf du datastage\n \n del bloc_jb[0] # enleve la permiere occ qui est vide\n bloc_jb = bloc_jb[:]\n \n return bloc_jb\n\n\n\n# ===================MAIN1===================================================\n\np = ParseElementXML()\n\ncollectionJobFromXML = []\n\nfullPath = p.document()\n\nroot = p.getRoot(fullPath)\n\n# a decommenter.....\nfor job in root:\n # print(job.tag, job.attrib)\n # print(f\"infos - nombre de child pour {job.tag}:\", len(job))\n # print(\"\\t\\t->\"+str(job.attrib.get('Identifier')))\n collectionJobFromXML.append(job.attrib.get('Identifier'))\n \nprint(len(collectionJobFromXML))\ncollectionJobFromXML = list(set(collectionJobFromXML))\ncollectionJobFromXML = p.removeDuplicates(collectionJobFromXML)\ncollectionJobFromXML.remove(None)\nprint(len(collectionJobFromXML))\n# print(collectionJobFromXML)\n\n######## ======================== ======================\n\n# some resultat:\n# # Jx_FEUILLET_01_CHG_CPTRENDU job a trouver\n\n# /Users/ganasene/Downloads/folder/logsfullDS/SUPprdJx_FEUILLET_01_CHG_CPTRENDUlog.txt\nq = ParseLog()\n\npath_to_logfullDS = q.changeDir() # changement de repertoire \ntuple_job_logfile =[]\ncompt =0\nfor jobFromXML in collectionJobFromXML:\n compt+=1\n # print(f\"job {compt}/{len(collectionJobFromXML)} ({jobFromXML})\")\n\n for logfile in os.listdir(path_to_logfullDS):\n with open(logfile, encoding='utf8') as f:\n f = f.read()\n \n if jobFromXML in f:\n print(f\"job {compt}/{len(collectionJobFromXML)} {jobFromXML} -->{logfile}\")\n job_logfile = (jobFromXML, logfile)\n tuple_job_logfile.append(job_logfile)\n # bloc = q.blockEventID(f)\n # blockTextPar = bloc[0]\n # print(blockTextPar) \n else:\n # print(f\"Jx_FEUILLET_01_CHG_CPTRENDU is not in {logfile}\")\n pass\n\n### resultat\n# print(tuple_job_logfile)\n\n# ===================MAIN2===================================================\n# ### traitement 2 \n\n# # initiation des listes suivants en vue de creeer un dataframe\n# jobName = []\n# stagName = []\n# stageType = []\n# recordType = []\n# fileName = []\n# datasetValue =[]\n# #initialisation du tuple file_job\n# tuple_file_Job = []\n\n# #compteur\n# num_job = [] # nombre de job pour un fic XML donnee\n# num_stage = [] # nombre de job pour un fic XML donnee\n\n# # DOC-FTSACprod.xml\n# # SUPprd.xml\n\n# # basePath = os.path.dirname(__file__)\n# # fullPath = os.path.join(basePath, \"SUPprd.xml\")\n# # print(fullPath)\n\n# b = Xml_logfile()\n# fullPath = b.document()\n# print(fullPath)\n\n# # Instanciation du module etree\n# ## methode parse\n# tree = etree.parse(fullPath)\n# root = tree.getroot()\n# # print(root.tag, root.attrib)\n# # print(f\"Infos - nombre de child pour {root.tag}:\", len(root))\n# # print(\" \")\n\n\n\n# for job in root:\n# # print(job.tag, job.attrib)\n# # print(f\"infos - nombre de child pour {job.tag}:\", len(job))\n# jobN = job.attrib.get('Identifier')\n# num_job.append(jobN)\n# # print(\"\\t\\t1>\"+str(job.attrib.get('Identifier')))\n# # print('')\n# for record in job:\n# attribute = record.attrib.get('Type')\n# if attribute == 'CustomStage':\n# # print(\"\\t\"+record.tag, record.attrib)\n# # print(f\"\\tinfos - nombre de child pour {record.tag}:\", len(record))\n# # print(\"\\t\\t---->\"+ attribute)\n\n# for PropertyOrcollection in record:\n# attribute_Name = PropertyOrcollection.attrib.get('Name')\n# if attribute_Name == 'Name':\n# TextPropertyOrcollection = str(PropertyOrcollection.text)\n\n# # print(f'{jobN} ->{TextPropertyOrcollection}')\n# # print(\"\\t\\t\\t2>\"+TextPropertyOrcollection)\n# jobName.append(jobN)\n# stagName.append(TextPropertyOrcollection)\n\n# elif attribute_Name == 'StageType':\n# TextPropertyOrcollection = str(PropertyOrcollection.text)\n# # print(\"\\t\\t\\t3>\"+TextPropertyOrcollection)\n# # jobName.append(jobN)\n# stageType.append(TextPropertyOrcollection)\n\n# elif attribute == 'CustomOutput':\n# # print(\"\\t\"+record.tag, record.attrib)\n# # print(f\"\\tinfos - nombre de child pour {record.tag}:\", len(record))\n# # print(\"\\t\\t---->\"+ attribute)\n# for PropertyOrcollection in record:\n# if PropertyOrcollection.tag == 'Collection' and PropertyOrcollection.attrib.get(\"Name\") == 'Properties':\n# # print(\"\\t\\t\\t\"+PropertyOrcollection.tag, PropertyOrcollection.attrib)\n# attribute_Name = PropertyOrcollection.attrib.get('Name')\n\n# for subrecord in PropertyOrcollection:\n\n# for prop in subrecord:\n\n# if prop.attrib.get('Name') == 'Name':\n# Textprop = str(prop.text)\n# if Textprop == r\"file\\(20)\":\n# pass\n# # print(\"\\t\\t\\t\\t>\"+Textprop) \n# # elif Textprop == \"dataset\":\n# # print(\"\\t\\t\\t\\t>\"+Textprop)\n# # # pass\n# else:\n# pass\n# else:\n# Textprop = str(prop.text)\n# if r')file' in Textprop:\n# # print(f'{jobN} ->{Textprop}')\n# tup = (jobN, Textprop)\n# tuple_file_Job.append(tup)\n# # print(\"\\t\\t\\t\\t4>\"+Textprop)\n# fileName.append(Textprop)\n# recordType.append('CustomOutput') # ajout de lattribut Custominput dams la colonne recordType\n\n\n# else:\n# # fileName.append(\"NaN\")\n# pass\n\n# elif attribute == 'CustomInput':\n# # print(\"\\t\"+record.tag, record.attrib)\n# # print(f\"\\tinfos - nombre de child pour {record.tag}:\", len(record))\n# # print(\"\\t\\t---->\"+ attribute)\n# # recordType.append(attribute)\n\n# for PropertyOrcollection in record:\n# if PropertyOrcollection.tag == 'Collection' and PropertyOrcollection.attrib.get(\"Name\") == 'Properties':\n# # print(\"\\t\\t\\t\"+PropertyOrcollection.tag, PropertyOrcollection.attrib)\n# attribute_Name = PropertyOrcollection.attrib.get('Name')\n\n# for subrecord in PropertyOrcollection:\n\n# for prop in subrecord:\n\n# if prop.attrib.get('Name') == 'Name':\n# Textprop = str(prop.text)\n# if Textprop == \"dataset\":\n# pass\n# # print(\"\\t\\t\\t\\t>\"+Textprop)\n# else:\n# pass\n# else: # ie name = value ou autres \n# Textprop = str(prop.text)\n# if r\".ds\" in Textprop:\n# # print(f'{jobN} ->{Textprop}')\n# # print(\"\\t\\t\\t\\t4>\"+Textprop)\n# datasetValue.append(Textprop)\n# recordType.append('CustomOutput') # ajout de lattribut Custominput dams la colonne recordType\n \n\n# else:\n# # fileName.append(\"NaN\")\n# pass\n# else:\n# pass\n\n\n# # pas commode de gerer les length, l'astuce trouve est de manipuler les colonnes sous un format dataframe\n\n# print(\"|----Informations sur les elements extraits de ce fichier XML---|\")\n# print(\"1./Nombre de jobs dans ce fichier:\", len(num_job))\n# print(\"2.Nombre de stage dans ce fichier:\", len(stagName))\n# print(\"3./Nombre de stageType dans ce fichier:\", len(stageType))\n# print(\"4./Nombre de fileName de ce fichier:\", len(fileName))\n# print(\"4./Nombre de recordType de ce fichier:\", len(recordType))\n# print(\"4./Nombre de datasetValue de ce fichier:\", len(datasetValue))\n# print('\\n')\n\n# print(\"|---- check colonnes pour creation tableau ---|\")\n\n# print(len(jobName))\n# # print(jobName)\n\n# print(len(stagName))\n# # print(stagName)\n\n# print(len(stageType))\n# # print(stageType)\n\n# print(len(fileName))\n# # print(fileName)\n\n# print(len(recordType))\n# # print(recordType)\n\n# print(len(datasetValue))\n# # print(datasetValue)\n\n# # print('\\n')\n# # print(len(tuple_file_Job))\n# # print(tuple_file_Job)\n\n\n# ###=====================main3===================================\n# # ++++++++++++++++++++++Dataframe++++++++++++++++++++++++++++\n# file = []\n# data = {\n# \"jobName\": jobName,\n# \"stagName\": stagName,\n# \"stageType\": stageType,\n# \"recordType\": np.nan,\n# 'datasetValue': np.nan,\n# 'fileName': np.nan,\n\n# }\n\n# df = pd.DataFrame(data)\n# print(df)\n\n# # gestion cle-valeur\n# idx = []\n# tuple_idx_job = []\n# tuple_idx_file = []\n# tuple_idx_logfile = []\n# for key, job in enumerate(df.jobName):\n# for i in range(len(tuple_file_Job)):\n# if job == tuple_file_Job[i][0]:\n# idx_job = (key, job)\n# idx_file = (key, tuple_file_Job[i][1])\n# # print(idx_job)\n# tuple_idx_job.append(idx_job)\n# tuple_idx_file.append(idx_file)\n# idx.append(key)\n# file.append(tuple_file_Job[i][1])\n \n# # else:\n# # pass\n# # # file.append('NaN')\n\n# # for j in range(len(tuple_job_logfile)):\n# # if job == tuple_job_logfile[j][0]:\n# # idx_logfile = (key, logfile)\n# # tuple_idx_logfile.append(idx_logfile)\n\n\n# # print(tuple_idx_logfile)\n\n\n# # affection des data de files-excates à la 4 eme colonne\n# for j in range(len(tuple_idx_job)):\n# # i == index , j== job, et fl = file\n# ix = tuple_idx_job[j][0] # index des valeurs file\n# jb = tuple_idx_job[j][1]\n# fl = tuple_idx_file[j][1] # file extrait du tuple\n# # print(ix) #\n# # print(jb)\n# # print(fl)\n# df.loc[tuple_idx_job[j][0], 'fileName'] = fl\n\n\n\n# print(df)\n# # df.to_csv('/Users/ganasene/Desktop/outputxml.csv')\n\n\n\n","sub_path":"XML_ExeFile.py","file_name":"XML_ExeFile.py","file_ext":"py","file_size_in_byte":14670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409015303","text":"import pymysql\nfrom flaskext.mysql import MySQL\n\n\nclass DBResponse:\n \"\"\"Экземпляр данного класса описывает станадартный ответ на запрос к базе данных MySQL\"\"\"\n\n def __init__(self, status, data):\n \"\"\"\n :param status: Результат запрос к базе данных (уcпешен - True, неуспешен - False)\n :param data: Данные, полученные из базы данных\n :rtype: DBResponse\n \"\"\"\n self.status = status\n\n self.data = data\n\n\nclass MySQLHandler:\n \"\"\"Класс для выполнения запрос к базе данных MySQL посредством хранимых процедур\"\"\"\n\n def __init__(self, app, db_host, db_name, db_user, db_password):\n \"\"\"\n :param app: Экземпляра класса Flask\n :param db_host: Название сервера базы данных\n :param db_name: Название базы данных\n :param db_user: Логин пользователя базы данных\n :param db_password: Пароль пользователя базы данных\n \"\"\"\n self._app = app\n self._mysql = MySQL()\n # MySQL configurations\n self._app.config['MYSQL_DATABASE_USER'] = db_user\n self._app.config['MYSQL_DATABASE_PASSWORD'] = db_password\n self._app.config['MYSQL_DATABASE_DB'] = db_name\n self._app.config['MYSQL_DATABASE_HOST'] = db_host\n\n self._mysql.init_app(self._app)\n # self.conn = self._mysql.connect()\n\n def close_connection(self):\n try:\n self.conn.close()\n message = \"OK\"\n return DBResponse(True, message)\n except pymysql.err.OperationalError as e:\n return DBResponse(False, str(e))\n\n def connect(self):\n try:\n self.conn = self._mysql.connect()\n message = \"OK\"\n return DBResponse(True, message)\n except pymysql.err.OperationalError as e:\n return DBResponse(False, str(e))\n\n def call_get_data_proc(self, proc_name: str, *proc_args):\n \"\"\"\n Метод соединяется с базой данных MySQL и вызывает хранимую процедуру. Процедура получает некоторые данные из базы MySQL.\n :type proc_name: Название хранимой процедуры\n :param proc_args: Аргументы для вызова хранимой процедуры\n :return: Экземпляр класса DBResponse\n \"\"\"\n # connect to mysql database\n try:\n\n cursor = self.conn.cursor()\n cursor.callproc(proc_name, proc_args)\n data = cursor.fetchall()\n return DBResponse(True, data)\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n\n def call_create_delete_proc(self, proc_name, *proc_args):\n \"\"\"\n Метод соединяется с базой данных MySQL и вызывает хранимую процедуру. Процедура выполняет удаление или добавление данных.\n :param proc_name: Название хранимой процедуры\n :param proc_args: Аргументы для вызова хранимой процедуры\n :return: Экземпляр класса DBResponse\n \"\"\"\n # connect to mysql database\n try:\n\n cursor = self.conn.cursor()\n cursor.callproc(proc_name, proc_args)\n data = cursor.fetchall()\n if len(data) is 0:\n self.conn.commit()\n return DBResponse(True, data)\n else:\n return DBResponse(False, 'Не удалось выполнить действие!')\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n\n def execute_many_insert(self, query: str, data: list):\n \"\"\"\n @param query: Insert query string\n @type query: str\n @param data: Insert list of data (insert several times)\n @type data: list\n @return: Response from database\n @rtype: DBResponse\n \"\"\"\n try:\n\n cursor = self.conn.cursor()\n for elem in data:\n cursor.executemany(query, elem)\n self.conn.commit()\n return DBResponse(True, \"\")\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n\n def execute_insert(self, query: str, data: list) -> DBResponse:\n \"\"\"\n @param query: insert query string\n @type query: str\n @param data: insert data\n @type data: list\n @return: Response from database\n @rtype: DBResponse\n \"\"\"\n try:\n\n cursor = self.conn.cursor()\n cursor.executemany(query, data)\n self.conn.commit()\n lastid = cursor.lastrowid\n return DBResponse(True, {'lastid': lastid})\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n\n def execute_select(self, query: str, data: list = None) -> DBResponse:\n \"\"\"\n @param query: Select query string\n @type query: str\n @param data: None\n @type data: None\n @return: Response from database\n @rtype: DBResponse\n \"\"\"\n try:\n\n cursor = self.conn.cursor()\n cursor.execute(query, data)\n result = cursor.fetchall()\n return DBResponse(True, result)\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n\n def execute_update(self, query: str, data: list = None) -> DBResponse:\n \"\"\"\n @param data:\n @type data:\n @param query: update query string\n @type query: str\n :type data: DBResponse\n \"\"\"\n try:\n cursor = self.conn.cursor()\n cursor.executemany(query, data)\n self.conn.commit()\n return DBResponse(True, \"\")\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n\n def execute_delete(self, query: str, *data) -> DBResponse:\n \"\"\"\n @param query: delete query string\n @type query: str\n @param data: delete data params\n @type data:\n :type data: DBResponse\n \"\"\"\n try:\n\n cursor = self.conn.cursor()\n cursor.executemany(query, data)\n self.conn.commit()\n return DBResponse(True, \"\")\n except Exception as e:\n message = str(e)\n return DBResponse(False, message)\n finally:\n cursor.close()\n","sub_path":"server/libs/db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619993866","text":"import asyncio\nfrom aiohttp import web\nfrom aiohttp import ClientSession\n\nfrom keystoneauth1.identity import v3\nfrom keystoneauth1 import session\nfrom keystoneclient.v3 import client\nfrom novaclient import client as nova_client\nfrom glanceclient import client as glance_client\nfrom neutronclient.v2_0 import client as neutron_client\n\nimport uvloop\nimport json\n\n\nAUTH_URL = 'http://10.220.104.33:5000/v3'\nUSERNAME = \"admin\"\nPASSWORD = \"admin\"\nPROJECT_NAME = \"admin\"\nUSER_DOMAIN_ID = \"default\"\nPROJECT_DOMAIN_ID = \"default\"\n\nNOVA_ENDPOINT = 'http://10.220.104.33:8774/v2.1'\nGLANCE_ENDPOINT = 'http://10.220.104.33:9292/v2'\nNEUTRON_ENDPOINT = 'http://10.220.104.33:9696/v2.0'\n\nasync def get_data(request):\n auth_token = get_auth_token()\n res_list = await asyncio.gather(\n list_flavors(auth_token),\n list_servers(auth_token),\n list_images(auth_token),\n list_networks(auth_token),\n list_ports(auth_token),\n list_fips(auth_token),\n )\n response = web.Response(body=\"\\n\".join(\n [str(x) for x in res_list]).encode())\n return response\n\nasync def list_flavors(auth_token):\n\n url = '%s/flavors/detail' % NOVA_ENDPOINT\n headers = {'content-type': 'application/json', 'X-Auth-Token': auth_token}\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n return [s['name'] for s in response['flavors']]\n\nasync def list_servers(auth_token):\n\n url = '%s/servers/detail' % NOVA_ENDPOINT\n headers = {'content-type': 'application/json', 'X-Auth-Token': auth_token}\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n return [s['name'] for s in response['servers']]\n\nasync def list_images(auth_token):\n\n url = '%s/images' % GLANCE_ENDPOINT\n headers = {'content-type': 'application/json', 'X-Auth-Token': auth_token}\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n return [s['name'] for s in response['images']]\n\nasync def list_networks(auth_token):\n\n url = '%s/networks.json' % NEUTRON_ENDPOINT\n headers = {'content-type': 'application/json', 'X-Auth-Token': auth_token}\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n return [s['name'] for s in response['networks']]\n\nasync def list_ports(auth_token):\n\n url = '%s/ports.json' % NEUTRON_ENDPOINT\n headers = {'content-type': 'application/json', 'X-Auth-Token': auth_token}\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n return [s['name'] for s in response['ports']]\n\nasync def list_fips(auth_token):\n\n url = '%s/floatingips.json' % NEUTRON_ENDPOINT\n headers = {'content-type': 'application/json', 'X-Auth-Token': auth_token}\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n response = await response.json()\n return [s['floating_ip_address'] for s in response['floatingips']]\n\ndef get_auth_token():\n auth = v3.Password(auth_url=AUTH_URL, username=USERNAME,\n password=PASSWORD, project_name=PROJECT_NAME,\n user_domain_id=USER_DOMAIN_ID, project_domain_id=PROJECT_DOMAIN_ID)\n sess = session.Session(auth=auth)\n return sess.get_token()\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\napp = web.Application()\napp.router.add_route(\"GET\", \"/\", get_data)\nweb.run_app(app, port=8080)\n","sub_path":"async_server.py","file_name":"async_server.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211187946","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'commerce'\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^registro/$', views.registro, name='registro'),\n\turl(r'^negocios/$', views.negocios, name='negocios'),\n\turl(r'^productos/$', views.productos, name='productos'),\n\turl(r'^registrarse/$', views.registrarse, name='registrarse'),\n\turl(r'^login/$', views.login, name='login'),\n\turl(r'^logout/$', views.logout, name='logout'),\n\turl(r'^(?P<negocio_id>[0-9]+)/productos/$', views.productos, name='productos'),\n\turl(r'^pedido/$', views.pedido, name='pedido'),\n]","sub_path":"hastatucasap/commerce/hastatucasap/commerce/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"17755283","text":"#!/usr/bin/env python\n# uniqueinorder.py\n\n'''\nunique_in_order('AAAABBBCCDAABBB') == ['A', 'B', 'C', 'D', 'A', 'B']\nunique_in_order('ABBCcAD') == ['A', 'B', 'C', 'c', 'A', 'D']\nunique_in_order([1,2,2,3,3]) == [1,2,3]\n'''\n\ndef unique_in_order(strDuplicates):\n lstFinal = []\n charLast = \"\"\n charCurrent =\"\"\n for charCurrent in strDuplicates:\n print(\"last is\" + charLast)\n print(\"current is\" + charCurrent)\n if charCurrent != charLast:\n lstFinal.append(charCurrent)\n charLast = charCurrent\n else:\n charLast = charCurrent\n return lstFinal\n\n\nprint(unique_in_order('AAAABBBCCDAABBB'))","sub_path":"codewars/UniqueInOrder/uniqueinorder.py","file_name":"uniqueinorder.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53604913","text":"import pygame\nimport numpy as np\n\nfrom classes import Application\nfrom colors import Colors\n\n\nclass SuperStarfield(Application):\n MIN_SIZE = (300, 200)\n VEL_MAX = 0\n\n DEFAULT_CONFIG = (\"Super Starfield\", Colors.ORANGE)\n WINDOW_PROPERTIES = [\"RESIZABLE\"]\n\n def __init__(self, parent, screen, args):\n self.set_parent(parent)\n\n # set up a random batch of stars for the background\n self.z_range = (50, 2000) # range for Z coordinates of stars\n\n self.action = \"\"\n self.background_color = (0, 0, 0)\n self.resize(screen)\n\n def resize(self, screen):\n w, h = screen.get_size()\n self.nr_stars = int(w * h / 200)\n self.screen = screen\n self.screen_size = np.array(self.screen.get_size())\n self.mid_screen = self.screen_size // 2\n if self.parent:\n self.parent.set_title(\"{} - {} étoiles\".format(self.DEFAULT_CONFIG[0], self.nr_stars))\n self.init()\n\n def init(self):\n self.stars = np.random.rand(self.nr_stars, 3) * np.array([self.screen_size[0] - 2, self.screen_size[1] - 2, 1.0]) \\\n + np.array([-self.mid_screen[0], -self.mid_screen[1], 0.0])\n\n # adjust Z coordinates as more stars needed at distance for a balanced view\n self.stars[:, 2] = (self.stars[:, 2] ** 0.5) * (self.z_range[1] - self.z_range[0]) + self.z_range[0]\n self.star_move = np.array([0.0, 0.0, -0.5])\n self.prev_time = pygame.time.get_ticks()\n\n def set_parent(self, parent):\n self.parent = parent\n\n def get_action(self):\n return self.action\n\n def update(self):\n if self.parent:\n if self.parent.keypressed():\n self.touche = self.parent.get_key()\n if self.touche == pygame.K_ESCAPE:\n self.action = \"QUIT\"\n\n # clear screen for a new frame\n self.time = pygame.time.get_ticks()\n # self.screen.fill(self.background_color)\n self.move_stars(self.time, self.prev_time)\n\n def move_stars(self, time, prev_time):\n\n # move stars in X,Y depending on their Z coordinate - the closer the faster / bigger move. Hence divide star_move X & Y by star Z\n self.stars += (time - prev_time) * self.star_move / np.hstack((self.stars[:, 2:3], self.stars[:, 2:3], np.ones((self.nr_stars, 1))))\n self.stars[:, 1][self.stars[:, 1] > self.mid_screen[1] - 2] -= self.screen_size[1] - 2\n\n # move stars using Z coordinate and Z move\n if self.star_move[2] != 0.0:\n self.stars[:, 0:2] *= self.stars[:, 2:3] / (self.stars[:, 2:3] + (time - prev_time) * self.star_move[2])\n\n # if outside of screen, normally replace with a new random star at a random X, Y edge and random Z\n nr_half = self.nr_stars // 2\n # first half: vertical edge\n self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])] = np.hstack((\n np.random.randint(0, 2, (np.shape(self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])])[0], 1)) * (self.screen_size[0] - 2) - self.mid_screen[0],\n np.random.rand(np.shape(self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])])[0], 1) * (self.screen_size[1] - 2) - self.mid_screen[1],\n np.random.rand(np.shape(self.stars[0:nr_half, :][(self.stars[0:nr_half, 2] > self.z_range[1])])[0], 1) * (self.z_range[1] - self.z_range[0]) + self.z_range[0]\n ))\n # second half: horizontal edge\n self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])] = np.hstack((\n np.random.rand(np.shape(self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])])[0], 1) * (self.screen_size[0] - 2) - self.mid_screen[0],\n np.random.randint(0, 2, (np.shape(self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])])[0], 1)) * (self.screen_size[1] - 2) - self.mid_screen[1],\n np.random.rand(np.shape(self.stars[nr_half:, :][(self.stars[nr_half:, 2] > self.z_range[1])])[0], 1) * (self.z_range[1] - self.z_range[0]) + self.z_range[0]\n ))\n # if Z too close OR X, Y out of bounds due to Z move, replace with a new random star at maximum Z\n self.stars[(self.stars[:, 2] < self.z_range[0]) | (abs(self.stars[:, 0] + 1) > self.mid_screen[0] - 1) | (abs(self.stars[:, 1] + 1) > self.mid_screen[1] - 1)] \\\n = np.random.rand(np.shape(self.stars[(self.stars[:, 2] < self.z_range[0]) | (abs(self.stars[:, 0] + 1) > self.mid_screen[0] - 1) | (abs(self.stars[:, 1] + 1) > self.mid_screen[1] - 1)])[0], 3) \\\n * np.array([self.screen_size[0] - 2, self.screen_size[1] - 2, 0]) + np.array([-self.mid_screen[0], -self.mid_screen[1], self.z_range[1]])\n\n def draw(self):\n self.screen.fill(self.background_color)\n while self.screen.get_locked():\n self.screen.unlock()\n rgb_array = pygame.surfarray.pixels3d(self.screen)\n # print(self.time)\n\n # define color as a function of distance\n c_shades = np.array([0.6, 0.8, 1.0]) # percentage of maximum R, G, B color used to tilt to Blue\n # c_shades = np.array([0.0, 0.6, 1.0]) # percentage of maximum R, G, B color used to tilt to Green\n \n colors = (c_shades * ((1.0 - self.stars[:, 2:3] / (self.z_range[1] - self.z_range[0])) * 200 + 55)).astype(np.uint8)\n stars_int = (self.stars[:, 0:2]).astype(np.int16)\n \n rgb_array[(stars_int[:, 0] + self.mid_screen[0]), (stars_int[:, 1] + self.mid_screen[1]), 0:3] = colors\n # add additional pixels to those which are closest (color is above a threshold)\n \n rgb_array[(stars_int[:, 0][colors[:, 2] > 130] + self.mid_screen[0] + 1),\n (stars_int[:, 1][colors[:, 2] > 130] + self.mid_screen[1]), 0:3] = colors[colors[:, 2] > 130]\n rgb_array[(stars_int[:, 0][colors[:, 2] > 180] + self.mid_screen[0]),\n (stars_int[:, 1][colors[:, 2] > 180] + self.mid_screen[1] + 1), 0:3] = colors[colors[:, 2] > 180]\n\n rgb_array[(stars_int[:, 0][colors[:, 2] > 220] + self.mid_screen[0] + 1),\n (stars_int[:, 1][colors[:, 2] > 220] + self.mid_screen[1] + 1), 0:3] = colors[colors[:, 2] > 220]\n\n self.prev_time = self.time + 0\n\n\ndef run():\n pygame.init()\n running = True\n screen = pygame.display.set_mode((1800, 800), pygame.RESIZABLE, 32) # pygame.FULLSCREEN)\n # my_screen = screen.subsurface(pygame.Rect(100, 50, 600, 300))\n\n w, h = screen.get_size()\n window_surf = pygame.Surface((w-100, h-100), 0, 32)\n\n w, h = window_surf.get_size()\n my_screen = screen.subsurface(pygame.Rect(50, 50, w, h))\n\n f = SuperStarfield(None, my_screen, ())\n while running:\n pygame.time.Clock().tick(60)\n f.update()\n f.draw()\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.KMOD_LGUI:\n pass\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pass\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pass\n\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_ESCAPE:\n f.close()\n running = False\n\n elif event.type == pygame.AUDIO_S16:\n pass\n\n elif event.type == pygame.QUIT:\n running = False\n\n elif event.type == pygame.VIDEORESIZE:\n f.resize(screen)\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n print(\"Compilation : Ok\")\n run()\n print(\"Fin\")\n","sub_path":"window2/superstarfield.py","file_name":"superstarfield.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"372338066","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/site-packages/webapptitude/application.py\n# Compiled at: 2016-08-31 16:32:16\nimport os, webapp2, logging\nfrom webapp2 import exc as exceptions\nfrom handlers import patch_request, patch_response\nfrom decorator import is_dev_server\ntry:\n from appengine_config import config as site_config\nexcept ImportError:\n site_config = {}\n\nlogging.getLogger().setLevel(logging.INFO)\n\nclass SaneRouter(webapp2.Router):\n\n def dispatch(self, request, response):\n request = patch_request(request)\n response = patch_response(response)\n result = super(SaneRouter, self).dispatch(request, response)\n if isinstance(result, webapp2.Response):\n response = result\n return response\n\n\nclass WSGIApplication(webapp2.WSGIApplication, object):\n debug = False\n router_class = SaneRouter\n\n @webapp2.cached_property\n def dev_appserver(self):\n return is_dev_server()\n\n @webapp2.cached_property\n def logger(self):\n logger = logging.getLogger(__name__)\n logger.setLevel(self.debug and logging.DEBUG or logging.INFO)\n logger.addHandler(logging.StreamHandler())\n return logger\n\n @classmethod\n def accept_http_method(cls, *methods):\n \"\"\"Allow the request to handle additional HTTP methods.\"\"\"\n cls.allowed_methods = cls.allowed_methods.union(methods)\n\n def __init__(self, *args, **kwargs):\n insert_static_route = os.environ.get('STATIC_ROUTE', None)\n config = {}\n config.update(site_config)\n config.update(kwargs.pop('config', {}))\n kwargs['config'] = config\n if 'debug' not in kwargs:\n kwargs['debug'] = self.dev_appserver\n self.debug = kwargs.get('debug', False)\n super(WSGIApplication, self).__init__(*args, **kwargs)\n if insert_static_route:\n self.router.add(webapp2.Route(insert_static_route, handler=webapp2.RequestHandler, name='static'))\n return\n\n def route(self, path_expr, *args, **options):\n \"\"\"Construct and attach a new request handler, with route path.\"\"\"\n\n def __handler(handler_cls):\n route = webapp2.Route(path_expr, handler_cls, *args, **options)\n self.router.add(route)\n return handler_cls\n\n if len(args) and issubclass(args[0], webapp2.RequestHandler):\n handler = args[0]\n args = args[1:]\n return __handler(handler)\n else:\n return __handler\n\n def build(self, request, name, args=[], kwargs={}):\n \"\"\"Build a URL based on a named route.\"\"\"\n assert isinstance(request, webapp2.Request)\n return self.router.build(request, name, args, kwargs)\n\n\nWSGIApplication.accept_http_method('PATCH', 'HEAD', 'TRACE')","sub_path":"pycfiles/webapptitude-0.0.10.linux-x86_64.tar/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414667504","text":"#Matthew 12/02/2020\r\n\r\n#Import\r\nfrom tkinter import*\r\nimport random\r\n\r\n#Class\r\nclass MathQuiz():\r\n def __init__(self, parent):\r\n #Variables\r\n self.score = 0\r\n self.name = \"\"\r\n \r\n \r\n \"\"\"Welcome Frame\"\"\"\r\n self.Welcome = Frame(parent)\r\n self.Welcome.grid(row=0, column=0)\r\n \r\n #title\r\n self.TitleLabel = Label(self.Welcome, text = \"Welcome to Mathquiz\", \r\n bg = \"blue\", fg = \"white\", width = 20, padx = 30, \r\n pady = 30, font = (\"Time\", \"14\", \"bold italic\"))\r\n self.TitleLabel.grid(columnspan = 2)\r\n \r\n #Button\r\n self.NextButton = Button(self.Welcome, text = \"Next\", command = lambda:[self.QuestionGen(), self.show_Quiz()])\r\n self.NextButton.grid(row = 8, column = 1)\r\n \r\n \r\n \"\"\"Quiz Frame\"\"\"\r\n self.Quiz = Frame(parent)\r\n \r\n #Title\r\n self.TitleLabel = Label(self.Quiz, text = \"QUIZ\", \r\n bg = \"blue\", fg = \"white\", width = 20, padx = 30, \r\n pady = 30, font = (\"Time\", \"14\", \"bold\"))\r\n self.TitleLabel.grid(columnspan = 2)\r\n \r\n #Question\r\n self.Question = Label(self.Quiz, fg = \"black\",font = (\"Arial\", \"10\"))\r\n self.Question.grid(row = 7, column = 0)\r\n self.Question.configure(text = \"\")\r\n \r\n #Reply\r\n self.Reply = Label(self.Quiz, fg = \"black\", font = (\"Arial\", \"10\"))\r\n self.Reply.grid(row = 7, column = 1)\r\n self.Reply.configure(text = \"\")\r\n \r\n #Answer Box\r\n self.AnswerBox = Entry(self.Quiz)\r\n self.AnswerBox.grid(row = 8, column = 0)\r\n \r\n #Home button\r\n self.HomeButton = Button(self.Quiz, text = \"Home\", command = self.show_Welcome)\r\n self.HomeButton.grid(row = 9, column = 1)\r\n \r\n #Check button\r\n self.CheckButton = Button(self.Quiz, text = \"Check\", command = self.Check)\r\n self.CheckButton.grid(row = 9, column = 0)\r\n \r\n \r\n \"\"\"CORRECT page\"\"\"\r\n self.Correct = Frame(parent)\r\n #Title\r\n self.TitleLabel = Label(self.Correct, text = \"CORRECT!\",\r\n bg = \"green\", fg = \"white\", width = 20, padx = 30, \r\n pady = 30, font = (\"Time\", \"14\", \"bold italic\"))\r\n self.TitleLabel.grid(columnspan = 2)\r\n \r\n #Back button\r\n self.BackButton = Button(self.Correct, text = \"Next Question\", command = lambda:[self.QuestionGen(), self.show_Quiz()])\r\n self.BackButton.grid(row = 9, column = 0) \r\n \r\n \r\n \"\"\"INCORRECT page\"\"\"\r\n self.Incorrect = Frame(parent)\r\n #Title\r\n self.TitleLabel = Label(self.Incorrect, text = \"INCORRECT!\",\r\n bg = \"red\", fg = \"white\", width = 20, padx = 30, \r\n pady = 30, font = (\"Time\", \"14\", \"bold italic\"))\r\n self.TitleLabel.grid(columnspan = 2)\r\n \r\n #Total\r\n #self.AnswerLabel = Label(self.incorrect)\r\n \r\n #Back button\r\n self.BackButton = Button(self.Incorrect, text = \"Next Question\", command = lambda:[self.QuestionGen(), self.show_Quiz()])\r\n self.BackButton.grid(row = 9, column = 0) \r\n \r\n \r\n \r\n #Functions\r\n def show_Welcome(self):\r\n self.Quiz.grid_remove()\r\n self.Welcome.grid()\r\n \r\n def show_Quiz(self):\r\n self.Welcome.grid_remove()\r\n self.Correct.grid_remove()\r\n self.Incorrect.grid_remove()\r\n self.Quiz.grid()\r\n \r\n def Check(self):\r\n print(self.total)\r\n self.answer = self.AnswerBox.get()\r\n self.answer_mod = int(self.answer)\r\n if self.answer_mod == self.total:\r\n self.Quiz.grid_remove()\r\n self.Correct.grid()\r\n\r\n else:\r\n self.Quiz.grid_remove()\r\n self.Incorrect.grid()\r\n \r\n def QuestionGen(self):\r\n #list\r\n num_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n \r\n #Picks numbers\r\n self.number_1 = random.choice(num_list)\r\n self.number_2 = random.choice(num_list)\r\n self.total = self.number_1 + self.number_2\r\n self.add = self.number_1, \"+\", self.number_2\r\n \r\n self.Question.configure(text = self.add)\r\n \r\n \r\n\r\n#Mainroutine \r\nif __name__ == \"__main__\":\r\n root = Tk()\r\n frames = MathQuiz(root)\r\n root.title(\"Quiz\")\r\n root.mainloop()","sub_path":"Teacher Class GUI v1.6_Awnser Page.py","file_name":"Teacher Class GUI v1.6_Awnser Page.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592774103","text":"import base64\nimport json\nimport os\n\nfrom slack_bolt import App\nfrom slack_sdk.errors import SlackApiError\n\n\ndef main(event, context):\n\t# Using SLACK_BOT_TOKEN environment variable\n\tapp = App(\n\t)\n\tslack_api_id = os.environ['SLACK_API_ID'].replace('-', '_')\n\tenv_prefix = os.environ['ENV_PREFIX']\n\tbase_url = os.environ['{}_SLACK_CONNECTOR_{}_GATEWAY_URL'.format(env_prefix, slack_api_id)]\n\t# Set Slack API base URL to the URL of slack-connector application gateway.\n\tapp.client.base_url = \"{}/\".format(base_url)\n\tprint(\"received message with id: {}\".format(event[\"data\"][\"ID\"]))\n\tprint(\"slack api base URL: {}\".format(app.client.base_url))\n\tprint(\"sending notification to channel: {}\".format(os.environ['NOTIFICATION_SLACK_CHANNEL']))\n\t# Get cloud events data.\n\tmsg = json.loads(base64.b64decode(event[\"data\"][\"Data\"]))\n\tprint(msg)\n\tif len(msg[\"slackCommitersLogins\"]) > 0:\n\t\tslack_users = \"\"\n\t\tfor commiter in msg[\"slackCommitersLogins\"]:\n\t\t\tif commiter != \"\":\n\t\t\t\tprint(commiter)\n\t\t\t\tif slack_users != \"\":\n\t\t\t\t\tslack_users = \"{}, <@{}>\".format(slack_users, commiter)\n\t\t\t\telse:\n\t\t\t\t\tslack_users = \"<@{}>\".format(commiter)\n\t\tif slack_users != \"\":\n\t\t\tnotify_msg = \"{} please check what's going on\".format(slack_users)\n\t\telse:\n\t\t\tnotify_msg = \"<!here>, couldn't find commiter slack username, please check this failure or ask commiter for it.\"\n\telse:\n\t\tnotify_msg = \"<!here>, couldn't find commiter slack username, please check this failure or ask commiter for it.\"\n\tprint(notify_msg)\n\ttry:\n\t\t# Deliver message to the channel.\n\t\t# https://slack.dev/python-slack-sdk/api-docs/slack_sdk/web/slack_response.html#slack_sdk.web.slack_response.SlackResponse\n\t\tresult = app.client.chat_postMessage(channel=os.environ['NOTIFICATION_SLACK_CHANNEL'],\n\t\t\t\t\t\t\t\t\t\t\ttext=\"{} prowjob {} execution failed, view logs: {}\".format(msg[\"job_type\"], msg[\"job_name\"], msg[\"url\"]),\n\t\t\t\t\t\t\t\t\t\t\tusername=\"CiForceBot\",\n\t\t\t\t\t\t\t\t\t\t\ticon_emoji=\"https://www.stickpng.com/img/download/580b57fbd9996e24bc43bdfe/image\",\n\t\t\t\t\t\t\t\t\t\t\tlink_names=\"true\",\n\t\t\t\t\t\t\t\t\t\t\tblocks=[\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"header\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"plain_text\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"text\": \"Prowjob execution failed\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"section\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"text\": \"*Name:* {}\\n*Type:* {}\\n<{}|*View logs*>\".format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmsg[\"job_name\"], msg[\"job_type\"], msg[\"url\"])\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"section\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"text\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"text\": \"{}\".format(notify_msg)\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t])\n\t\tassert result.get(\"ok\", False), \"Assert response from slack API is OK failed. This is critical error.\"\n\t\tprint(\"sent notification for message id: {}\".format(event[\"data\"][\"ID\"]))\n\t# https://slack.dev/python-slack-sdk/api-docs/slack_sdk/errors/index.html#slack_sdk.errors.SlackApiError\n\texcept SlackApiError as e:\n\t\t# https://slack.dev/python-slack-sdk/api-docs/slack_sdk/web/slack_response.html#slack_sdk.web.slack_response.SlackResponse\n\t\tassert e.response.get(\"ok\", False) is False,\\\n\t\t\t\"Assert response from slack API is not OK failed. This should not be error.\"\n\t\tprint(f\"Got an error: {e.response['error']}\")\n\t\tprint(\"failed sent notification for message id: {}\".format(event[\"data\"][\"ID\"]))\n","sub_path":"development/kyma-slack-connector/notifyCommiterFunction/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223375627","text":"\nfrom PyQt4 import QtCore\nfrom qgis.core import *\n\n\nclass PromaidesDEMLayer(QgsPluginLayer):\n\n LAYER_TYPE = \"Promaides DEM\"\n\n FIELDS = [QgsField('id', QtCore.QVariant.Int),\n QgsField('elevation', QtCore.QVariant.Double),\n QgsField('material_id', QtCore.QVariant.Int),\n QgsField('init_condition', QtCore.QVariant.Double),\n QgsField('bc_enabled', QtCore.QVariant.String),\n QgsField('bc_stationary', QtCore.QVariant.String),\n QgsField('bc_value', QtCore.QVariant.Double),\n QgsField('bc_type', QtCore.QVariant.String)]\n\n def __init__(self):\n QgsPluginLayer.__init__(self, PromaidesDEMLayer.LAYER_TYPE, \"Promaides DEM plugin layer\")\n self.setValid(True)\n\n def createLayer(self):\n return PromaidesDEMLayer()\n\n def showLayerProperties(self, layer):\n pass\n\n def readXml(self, node):\n pass\n\n def writeXml(self, node, doc):\n pass\n\n\nclass PromaidesDEMDataProvider(QgsVectorDataProvider):\n\n def __init__(self, uri):\n QgsVectorDataProvider.__init__(self, uri)\n self._load(header, uri)\n\n def _load(self, header, uri):\n\n data = []\n\n with open(uri, 'r') as prm:\n\n data_section = False\n num_elements = 0\n\n for line in prm:\n\n if line.startswith('#'):\n continue\n\n elif line.startswith('!BEGIN'):\n data_section = True\n continue\n\n elif line.startswith('!END'):\n if num_elements != header['nc'] * header['nr']:\n raise ValueError('File does not provide enough cells')\n data_section = False\n continue\n\n if data_section:\n\n elements = line.split()\n index = int(elements[0])\n elevation = float(elements[1])\n material = int(elements[2])\n init = float(elements[3])\n bc_enabled = elements[4]\n bc_stat = elements[5]\n bc_value = float(elements[6]) if bc_stat else int(elements[6])\n bc_type = elements[7] if bc_enabled == 'true' else ''\n\n data.append([index, elevation, material, init, bc_enabled, bc_stat, bc_value, bc_type])\n num_elements += 1\n\n raster = Raster(layerName, header['xll'], header['yll'],\n header['dc'], header['dr'],\n header['nc'], header['nr'],\n header['angle'], header['no_data'], data)\n\n return raster","sub_path":"layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"284728999","text":"# -*- coding: utf-8 -*-\n# @Author: Lu Shaohao(Bravo)\n# @Date: 2019-09-12 09:08:45\n# @Last Modified by: Lu Shaohao(Bravo)\n# @Last Modified time: 2019-09-12 09:27:14\n\nfrom mmdet.apis import init_detector, inference_detector, show_result_pyplot\nimport mmcv\n\nconfig = 'configs/cascade_rcnn_x101_64x4d_fpn_1x.py'\ncheckpoint_file = 'work_dirs/cascade_rcnn_x101_64x4d_fpn_1x/latest.pth'\n\nmodel = init_detector(config_file, checkpoint_file, device='cuda:0')\n\ntest_file = open('data/test.txt', 'r')\ntest_imgs = [l.strip() for l in test_file.readlines]\nfor img in test_imgs[0]:\n result = inference_detector(model, img)\n show_result_pyplot(img, result, model.CLASSES)\n\n","sub_path":"bin/mmdet/datasets/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225095577","text":"import time\nimport uuid\nimport logging\n\nimport adage.nodestate as nodestate\n\nlog = logging.getLogger(__name__)\n\nclass Node(object):\n def __init__(self,name,task,identifier = None, define_time = None, result = None):\n self.identifier = identifier or str(uuid.uuid4())\n self.name = name\n self.task = task\n\n # the timestamps\n self.define_time = define_time or time.time()\n self.submit_time = None\n self.ready_by_time = None\n\n # backend to update state against\n self.backend = None\n\n # relevant state data\n self.resultproxy = None\n self._result = result\n self._state = nodestate.DEFINED\n\n def __repr__(self):\n return '<Node name: {} id: {} state: {}>'.format(self.name,self.identifier,self.state)\n\n def update_state(self,backend = None):\n #if we do not have a result object\n #that means it's not submitted yet\n if not self.resultproxy:\n self._state = nodestate.DEFINED\n return\n\n backend = self.backend or backend\n if not backend:\n raise RuntimeError('no backend to update state against')\n\n #if we have a resultobject\n #but the result is not ready\n #the node is still running\n if not backend.ready(self.resultproxy):\n self._state = nodestate.RUNNING\n return\n\n #if it's ready it's either successful\n #or failed\n if backend.successful(self.resultproxy):\n self._state = nodestate.SUCCESS\n self._result = backend.result(self.resultproxy)\n else:\n self._state = nodestate.FAILED\n\n #it's ready so set time stamp it not already set\n if not self.ready_by_time:\n self.ready_by_time = time.time()\n log.info('node ready %s',self)\n\n @property\n def result(self):\n if self._result is None:\n log.warning(\n 'result requested but it is None proxy: %s, backend: %s',\n self.resultproxy, self.backend\n )\n return self._result\n\n @property\n def state(self):\n return self._state\n\n def ready(self):\n return self.state in [nodestate.SUCCESS, nodestate.FAILED]\n\n def successful(self):\n return self.state == nodestate.SUCCESS\n","sub_path":"adage/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"563662272","text":"# -*- coding: utf-8 -*-\n\n#<remove>\n_show_plots_ = False\n#</remove>\n\nimport quantarhei as qr\n\n \nen = [0.0, 1.0]\nm1 = qr.Molecule(name=\"Mol1\",elenergies=en)\nm2 = qr.Molecule(name=\"Mol2\",elenergies=en)\n\nag = qr.Aggregate(name=\"Homodimer\")\nag.add_Molecule(m1)\nag.add_Molecule(m2)\n\n\nag.set_resonance_coupling(0,1,0.1)\n\nag.build(mult=1)\n\nH = ag.get_Hamiltonian()\n\n#with qr.energy_units(\"1/cm\"):\n# print(H)\n\n#\n# Here we test generation of states with 3 level molecules\n#\n\nen = [0.0, 10100.0] #, 20200.0]\nwith qr.energy_units(\"1/cm\"):\n m1 = qr.Molecule(name=\"Mol1\",elenergies=en)\n m2 = qr.Molecule(name=\"Mol2\",elenergies=en)\n m3 = qr.Molecule(name=\"Mol3\",elenergies=en)\n\nag = qr.Aggregate(name=\"Trimer-3-lev\")\nag.add_Molecule(m1)\nag.add_Molecule(m2)\nag.add_Molecule(m3)\n\nii = 0\nfor sig in ag.elsignatures(mult=4):\n print(ii, sig)\n ii += 1\n \nag.build(mult=2)\n\nH = ag.get_Hamiltonian()\n\nwith qr.energy_units(\"1/cm\"):\n print(H)\n print(H.dim)","sub_path":"examples/demo_002_Molecule_Aggregate.py","file_name":"demo_002_Molecule_Aggregate.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433455623","text":"# system libraries\nimport os\nimport re\nimport time\n\n# libraries for google app engine\nimport webapp2\nimport jinja2\nfrom google.appengine.ext import db\n\n# libraries used for storing user names and passwords\nimport hashlib\nimport hmac\nimport random\nfrom string import letters\n\nSECRET = 'foo'\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\nJINJA_ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),\n autoescape=True)\n\n# functions for validating cookies and encryption\n\n\ndef make_secure_val(unsecure_val):\n return '%s|%s' % (unsecure_val, hmac.new(SECRET, unsecure_val).hexdigest())\n\n\ndef check_secure_val(hash_val):\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val\n\n\ndef make_salt(length=5):\n return ''.join(random.choice(letters) for x in xrange(length))\n\n\ndef make_pw_hash(name, pw, salt=None):\n if not salt:\n salt = make_salt()\n h = hashlib.sha256(name + pw + salt).hexdigest()\n return '%s,%s' % (salt, h)\n\n\ndef valid_pw(name, password, h):\n salt = h.split(',')[0]\n return h == make_pw_hash(name, password, salt)\n\n\ndef users_key(group='default'):\n return db.Key.from_path('users', group)\n\n\nclass User(db.Model):\n name = db.StringProperty(required=True)\n pw_hash = db.StringProperty(required=True)\n email = db.StringProperty()\n\n @classmethod\n def by_id(cls, uid):\n return cls.get_by_id(uid, parent=users_key())\n\n @classmethod\n def by_name(cls, name):\n u = cls.all().filter('name =', name).get()\n return u\n\n @classmethod\n def register(cls, name, pw, email=None):\n pw_hash = make_pw_hash(name, pw)\n return cls(parent=users_key(), name=name, pw_hash=pw_hash, email=email)\n\n @classmethod\n def login(cls, name, pw):\n u = cls.by_name(name)\n if u and valid_pw(name, pw, u.pw_hash):\n return u\n\n\ndef render_str(template, **params):\n t = JINJA_ENV.get_template(template)\n return t.render(params)\n\n\nclass BlogHandler(webapp2.RequestHandler):\n \"\"\"helper functions to shorten request calls\"\"\"\n\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n params['user'] = self.user # gets user into Base.html for logout text\n return render_str(template, **params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\n def set_secure_cookie(self, name, unsecure_val):\n cookie_val = make_secure_val(unsecure_val)\n self.response.headers.add_header('Set-Cookie', '%s=%s; Path=/'\n % (name, cookie_val))\n\n def read_secure_cookie(self, name):\n cookie_val = self.request.cookies.get(name)\n \"\"\"shorthand for return cookie_val if cookie_val exists and\n check_secure_val exists (i.e. doesn't return None\"\"\"\n return cookie_val and check_secure_val(cookie_val)\n\n def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))\n\n def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')\n\n def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))\n\n\nclass MainPageHandler(BlogHandler):\n\n def get(self):\n self.redirect('/signup')\n\n\ndef blog_key(name='default'):\n return db.Key.from_path('blogs', name)\n\n\nclass Post(db.Model):\n \"\"\"defines Post Kind in GAE datastore\"\"\"\n user = db.ReferenceProperty(User, collection_name='posts')\n author = db.StringProperty(required=True)\n subject = db.StringProperty(required=True)\n content = db.TextProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n last_modified = db.DateTimeProperty(auto_now=True)\n likes = db.IntegerProperty(default=0)\n\n def render(self):\n \"\"\"replaces plain text line breaks with proper HTML breaks\"\"\"\n self._render_text = self.content.replace('\\n', '<br>')\n return render_str(\"post.html\", post=self)\n\n\nclass Liker(db.Model):\n post = db.ReferenceProperty(Post, collection_name='likers')\n likerKey = db.StringProperty()\n\n\nclass Comment(db.Model):\n post = db.ReferenceProperty(Post, collection_name='comments')\n commenterKey = db.StringProperty()\n author = db.StringProperty()\n content = db.TextProperty()\n created = db.DateTimeProperty(auto_now_add=True)\n last_modified = db.DateTimeProperty(auto_now=True)\n\n\nclass BlogFrontHandler(BlogHandler):\n\n def get(self):\n \"\"\"get the top 10 most recent blog posts\"\"\"\n posts = db.GqlQuery(\"SELECT * FROM Post ORDER BY created LIMIT 10\")\n self.render(\"front.html\", posts=posts)\n\n\nclass NewPostHandler(BlogHandler):\n\n def get(self):\n if self.user:\n self.render(\"post-form.html\")\n else:\n self.redirect(\"/login\")\n\n def post(self):\n # make sure we have a logged in user by checking self.user\n if self.user:\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n\n # if both the subject and content fields have data forward to a\n # permalink\n if subject and content:\n p = Post(\n user=self.user,\n author=self.user.name,\n subject=subject,\n content=content)\n p.put()\n self.redirect('/blog/%s' % str(p.key().id()))\n else:\n error = \"both subject and content are required\"\n self.render(\"post-form.html\", subject=subject, content=content,\n error=error)\n else:\n self.redirect(\"/login\")\n\n\nclass PermaLinkHandler(BlogHandler):\n\n def get(self, postID):\n post = Post.get_by_id(int(postID))\n\n if not post:\n self.error(404)\n return\n\n self.render(\"permalink.html\", post=post)\n\n\nclass EditPostHandler(BlogHandler, db.Model):\n\n def get(self, postKey):\n if self.user:\n q = Post.all()\n post = q.filter('__key__', db.Key(postKey)).get()\n self.render(\n \"edit-post.html\",\n subject=post.subject,\n content=post.content)\n else:\n self.redirect(\"/login\")\n\n def post(self, postKey):\n if self.user:\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n\n # if both the subject and content fields have data forward to a\n # permalink\n if subject and content:\n q = Post.all()\n post = q.filter('__key__', db.Key(postKey)).get()\n\n # only the author of a post can edit it\n if self.user.key() == post.user.key():\n post.subject = subject\n post.content = content\n post.put()\n self.redirect('/blog/%s' % str(post.key().id()))\n else:\n self.redirect('/blog')\n else:\n error = \"both subject and content are required\"\n self.render(\"post-form.html\", subject=subject, content=content,\n error=error)\n else:\n self.redirect(\"/login\")\n\n\nclass DeletePostHandler(BlogHandler, db.Model):\n\n def get(self, postKey):\n if self.user:\n q = Post.all()\n post = q.filter('__key__', db.Key(postKey)).get()\n if self.user.key() == post.user.key():\n db.delete(db.Key(postKey))\n time.sleep(1)\n self.redirect('/blog')\n else:\n self.redirect('/login')\n\n\nclass LikePostHandler(BlogHandler, db.Model):\n\n def get(self, postKey):\n if self.user:\n q = Post.all()\n post = q.filter('__key__', db.Key(postKey)).get()\n\n # make sure this user hasn't already liked this post\n already_liked = False\n for liker in post.likers:\n if liker.likerKey == str(self.user.key()):\n already_liked = True\n\n if not already_liked and post.user.key() != self.user.key():\n Liker(post=post, likerKey=str(self.user.key())).put()\n post.likes += 1\n post.put()\n time.sleep(1)\n\n self.redirect('/blog')\n else:\n self.redirect('/login')\n\n\nclass CommentHandler(BlogHandler, db.Model):\n\n def get(self, postKey):\n if self.user:\n self.render(\"comment-form.html\")\n else:\n self.redirect(\"/login\")\n\n def post(self, postKey):\n if self.user:\n comment = self.request.get(\"comment\")\n\n q = Post.all()\n post = q.filter('__key__', db.Key(postKey)).get()\n\n Comment(\n post=post,\n author=self.user.name,\n commenterKey=str(\n self.user.key()),\n content=comment).put()\n post.put()\n time.sleep(1)\n\n self.redirect('/blog')\n else:\n self.redirect(\"/login\")\n\n\nclass EditCommentHandler(BlogHandler, db.Model):\n\n def get(self, commentKey):\n if self.user:\n q = Comment.all()\n comment = q.filter('__key__', db.Key(commentKey)).get()\n\n # users can only edit comments they created\n if self.user.name == comment.author:\n self.render(\"edit-comment.html\", content=comment.content)\n else:\n self.redirect(\"/blog\")\n else:\n self.redirect(\"/login\")\n\n def post(self, commentKey):\n if self.user:\n content = self.request.get(\"comment\")\n q = Comment.all()\n comment = q.filter('__key__', db.Key(commentKey)).get()\n\n # user can only edit comments they created\n if self.user.name == comment.author:\n comment.content = content\n comment.put()\n time.sleep(1)\n self.redirect('/blog')\n else:\n self.redirect(\"/login\")\n\n\nclass DeleteCommentHandler(BlogHandler, db.Model):\n\n def get(self, commentKey):\n if self.user:\n q = Comment.all()\n comment = q.filter('__key__', db.Key(commentKey)).get()\n\n # users can only delete comments they created\n if self.user.name == comment.author:\n db.delete(db.Key(commentKey))\n time.sleep(1)\n\n self.redirect('/blog')\n else:\n self.redirect('/login')\n\n\ndef valid_username(username):\n USER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n return USER_RE.match(username)\n\n\ndef valid_password(password):\n PWORD_RE = re.compile(r\"^.{3,20}$\")\n return PWORD_RE.match(password)\n\n\ndef valid_email(email):\n EMAIL_RE = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return not email or EMAIL_RE.match(email)\n\n\nclass SignupHandler(BlogHandler):\n\n def get(self):\n self.render(\"signup-form.html\")\n\n def post(self):\n username = self.request.get(\"username\")\n password = self.request.get(\"password\")\n verify = self.request.get(\"verify\")\n email = self.request.get(\"email\")\n\n params = dict(username=username, email=email)\n\n # validate the user input against our regex\n any_error = False\n\n if not valid_username(username):\n params['error_username'] = \"Invalid username.\"\n any_error = True\n\n if not valid_password(password):\n params['error_password'] = \"Invalid password.\"\n any_error = True\n elif password != verify:\n params['error_verify'] = \"Passwords do not match!\"\n any_error = True\n\n if email and not valid_email(email):\n params['error_email'] = \"Email address is not valid.\"\n any_error = True\n\n # check to see if this user name is already taken\n u = User.by_name(username)\n if u:\n params['error_username'] = \"Username already exists.\"\n any_error = True\n\n if any_error:\n self.render(\"signup-form.html\", **params)\n else:\n u = User.register(username, password, email)\n u.put()\n\n self.login(u)\n self.redirect('/welcome')\n\n\nclass LoginHandler(BlogHandler):\n\n def get(self):\n self.render(\"login-form.html\")\n\n def post(self):\n username = self.request.get(\"username\")\n password = self.request.get(\"password\")\n\n u = User.login(username, password)\n if u:\n self.login(u)\n self.redirect('/welcome')\n else:\n msg = \"Invalid Login\"\n self.render('login-form.html', error=msg)\n\n\nclass LogoutHandler(BlogHandler):\n\n def get(self):\n self.logout()\n self.redirect('/login')\n\n\nclass WelcomeHandler(BlogHandler):\n\n def get(self):\n if self.user:\n self.render(\"welcome.html\", username=self.user.name)\n else:\n self.redirect('/signup')\n\napp = webapp2.WSGIApplication([('/', MainPageHandler),\n ('/signup', SignupHandler),\n ('/welcome', WelcomeHandler),\n ('/blog/?', BlogFrontHandler),\n ('/blog/newpost', NewPostHandler),\n ('/blog/(\\d+)', PermaLinkHandler),\n ('/login', LoginHandler),\n ('/logout', LogoutHandler),\n ('/editpost/([a-zA-Z0-9_-]+)', EditPostHandler),\n ('/deletepost/([a-zA-Z0-9_-]+)', DeletePostHandler),\n ('/likepost/([a-zA-Z0-9_-]+)', LikePostHandler),\n ('/comment/([a-zA-Z0-9_-]+)', CommentHandler),\n ('/editcomment/([a-zA-Z0-9_-]+)', EditCommentHandler),\n ('/deletecomment/([a-zA-Z0-9_-]+)', DeleteCommentHandler)],\n debug=True)\n","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":14294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"55010481","text":"\"\"\"Multi object tracking using Kalman Filter.\n\nThis is a modification of:\n\nSORT: A Simple, Online and Realtime Tracker\nCopyright (C) 2016 Alex Bewley alex@dynamicdetection.com.\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nfrom __future__ import print_function\n\nfrom numba import jit\nimport os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom skimage import io\nfrom sklearn.utils.linear_assignment_ import linear_assignment\nimport glob\nimport time\nimport argparse\nfrom filterpy.kalman import KalmanFilter\n\nfrom collections import defaultdict, deque\nfrom functools import partial\nimport warnings\n\n\n\n@jit\ndef squared_diff(a, b):\n return (a - b) ** (2)\n\n\n@jit\ndef euclidean(bb_test_, bb_gt_):\n \"\"\"\n Computes similarity using euclidean distance between\n two bboxes in the form [x, y, z, s, r, yaw]\n using 1/ (1 + euclidean_dist)\n\n \"\"\"\n x1, y1, z1, s1, r1, yaw1 = get_bbox(bb_test_)\n x2, y2, z2, s2, r2, yaw2 = get_bbox(bb_gt_)\n\n # o = (np.sum(squared_diff(i,j) for (i,j) in [(x1, x2), (y1, y2), (yaw1, yaw2)]))\n # this is not jit compatible. resort to using for loop:\n\n output = 0.\n for (i, j) in [(x1, x2), (y1, y2), (z1, z2), (yaw1, yaw2), (s1, s2), (r1, r2)]:\n output += squared_diff(i, j)\n output = 1./(1. + (output ** (1 / 2.)))\n # print('distance {}'.format(o))\n return(output)\n\n\n@jit\ndef distance(bb_test_, bb_gt_):\n # hard coded selection of method to compute similarity\n method = 'euclidean'\n if method == 'iou':\n # iou is currently NOT defined for bboxes in different orientations\n o = iou(bb_test_, bb_gt_)\n elif method == 'euclidean':\n o = euclidean(bb_test_, bb_gt_)\n return o\n\n\n\n@jit\ndef get_bbox(bbox):\n \"\"\"Drop score from bbox (if any, last index)\n [x,y,h,w, yaw[,score]] --> [x1,y1, x2, y2\"\"\"\n # warnings.warn(str(len(bbox)))\n if len(bbox) > 6:\n x, y, z, h, w, yaw, score = bbox\n else:\n x, y, z, h, w, yaw = bbox\n return [x, y, z, h, w, yaw]\n\n\ndef convert_bbox_to_z(bbox):\n \"\"\"\n Takes a bounding box in the form [x,y,w,h,yaw] and returns z in the form\n [x,y,s,r,yaw] where x,y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n\n\n [x,y,z, w,h,yaw] -> [x,y,z,s,r,yaw]\n\n \"\"\"\n w = bbox[3]\n h = bbox[4]\n x = bbox[0]\n y = bbox[1]\n z = bbox[2]\n s = w * h # scale is just area\n r = w / float(h)\n # return np.array([x, y, s, r]).reshape((4, 1))\n yaw = bbox[5]\n return np.array([x, y, z, s, r, yaw]).reshape((6, 1))\n\n\ndef convert_x_to_bbox(x, score=None):\n \"\"\"\n Takes a bounding box in the centre form [x,y, z, s,r, yaw] and returns it in the form\n [x, y, z, w, h, yaw] where x, y, z is the center\n \"\"\"\n w = np.sqrt(x[3] * x[4])\n h = x[3] / w\n yaw = x[5]\n if(score is None):\n return np.array([x[0], x[1], x[2], w, h, yaw]).reshape((1, 6))\n else:\n return np.array([x[0], x[1], x[2], w, h, yaw, score]).reshape((1, 7))\n\n\nclass KalmanBoxTracker(object):\n \"\"\"\n This class represents the internel state of individual tracked objects observed as bbox.\n \"\"\"\n count = 0\n\n def __init__(self, bbox, attrs):\n \"\"\"\n Initialises a tracker using initial bounding box.\n \"\"\"\n # define constant velocity model\n # originalx : [u, v, s, r, |dot{u}, \\dot{v}, \\dot{s}]\n # adding \\yaw, \\dot{\\yaw}\n # new x: [u, v, z, s, r, \\yaw, |dot{u}, \\dot{v}, \\dot{s}, \\dot{\\yaw}]\n # assume r constant\n # dim_x : length of x vector\n # dim_z: numer of sensors measurements [x, y, z, s, r, yaw]\n self.kf = KalmanFilter(dim_x=10, dim_z=6)\n self.kf.F = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n\n # dim H: (dim_z, dim_x)\n self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]])\n\n self.kf.R[3:, 3:] *= 10.\n self.kf.P[6:, 6:] *= 1000. # give high uncertainty to the unobservable initial velocities\n self.kf.P *= 10.\n self.kf.Q[-1, -1] *= 0.01\n self.kf.Q[6:, 6:] *= 0.01\n\n self.kf.x[:6] = convert_bbox_to_z(bbox)\n self.time_since_update = 0\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n\n self.unused_box_attrs = attrs\n\n def update(self, bbox, attrs):\n \"\"\"\n Updates the state vector with observed bbox.\n \"\"\"\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n self.kf.update(convert_bbox_to_z(bbox))\n\n self.unused_box_attrs = attrs\n\n def predict(self):\n \"\"\"\n Advances the state vector and returns the predicted bounding box estimate.\n \"\"\"\n if((self.kf.x[8] + self.kf.x[3]) <= 0):\n self.kf.x[8] *= 0.0\n self.kf.predict()\n self.age += 1\n if(self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n return self.history[-1]\n\n def get_state(self):\n \"\"\"\n Returns the current bounding box estimate.\n \"\"\"\n return convert_x_to_bbox(self.kf.x)\n\n\n# @jit\ndef associate_detections_to_trackers(\n detections, trackers, distance_threshold=0.3):\n \"\"\"\n Assigns detections to tracked object (both represented as bounding boxes)\n\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\n \"\"\"\n if(len(trackers) == 0):\n return np.empty((0, 2), dtype=int), np.arange(\n len(detections)), np.empty((0, 6), dtype=int)\n distance_matrix = np.zeros(\n (len(detections), len(trackers)), dtype=np.float32)\n\n for d, det in enumerate(detections):\n for t, trk in enumerate(trackers):\n distance_matrix[d, t] = distance(det, trk)\n print('distance of new det:{} to tracker {} = {}'.format(\n d, t, distance_matrix[d, t]))\n\n # warnings.warn(str(distance_matrix))\n # warnings.warn('tracking')\n\n matched_indices = linear_assignment(-distance_matrix)\n\n unmatched_detections = []\n for d, det in enumerate(detections):\n if(d not in matched_indices[:, 0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t, trk in enumerate(trackers):\n if(t not in matched_indices[:, 1]):\n unmatched_trackers.append(t)\n\n # filter out matched with low distance\n matches = []\n for m in matched_indices:\n if(distance_matrix[m[0], m[1]] < distance_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1, 2))\n if(len(matches) == 0):\n matches = np.empty((0, 2), dtype=int)\n else:\n matches = np.concatenate(matches, axis=0)\n\n return matches, np.array(\n unmatched_detections), np.array(unmatched_trackers)\n\n\nclass Sort(object):\n def __init__(self, max_age=1, min_hits=3, distance_threshold=.3):\n \"\"\"\n Sets key parameters for SORT\n \"\"\"\n self.max_age = max_age\n self.min_hits = min_hits\n self.trackers = []\n self.frame_count = 0\n self.distance_threshold = distance_threshold\n\n def update(self, detections):\n \"\"\"\n Params:\n Args:\n dets (:obj:`numpy.array`) : an array of detected bounding\n boxes. Each row correspond to a detection of the form\n `[tx, ty, tz, w, l, rz, h, rx, ry]`. The order of attributes\n (columns) should be strictly followed.\n\n Requires: this method must be called once for each frame even with empty detections.\n Returns the a similar array, where the last column is the object ID.\n\n NOTE: The number of objects returned may differ from the number of detections provided.\n \"\"\"\n self.frame_count += 1\n # get predicted locations from existing trackers.\n trks = np.zeros((len(self.trackers), 7))\n\n to_del = []\n ret = []\n ret_attrs = []\n # print('trks', trks)\n for t, trk in enumerate(trks):\n pos = self.trackers[t].predict()[0]\n trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5], 0]\n if(np.any(np.isnan(pos))):\n to_del.append(t)\n # warnings.warn('popping trackers {}'.format(t))\n\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\n for t in reversed(to_del):\n self.trackers.pop(t)\n # warnings.warn('popping trackers {}'.format(t))\n\n # separate unused attributes\n dets, attrs = self.split_detections(detections)\n # print('attrs',attrs.shape)\n\n # print(dets.shape, trks.shape)\n # warnings.warn('before computing trackers')\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(\n dets, trks, distance_threshold=self.distance_threshold)\n # print(matched, unmatched_dets, unmatched_trks)\n\n # update matched trackers with assigned detections\n for t, trk in enumerate(self.trackers):\n if(t not in unmatched_trks):\n d = matched[np.where(matched[:, 1] == t)[0], 0]\n trk.update(dets[d, :][0], attrs[d, :][0])\n # print('printing attrs')\n # print(attrs[d, :][0])\n # print('*')\n # print(trk.unused_box_attrs)\n\n # create and initialise new trackers for unmatched detections\n # print(dets.shape)\n for i in unmatched_dets:\n trk = KalmanBoxTracker(dets[i, :6], attrs[i, :])\n self.trackers.append(trk)\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n d = trk.get_state()[0]\n d_attr = trk.unused_box_attrs\n # print('printing d_attr')\n # print(d_attr)\n if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):\n # +1 as MOT benchmark requires positive\n ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1))\n ret_attrs.append(d_attr)\n i -= 1\n # remove dead tracklet\n if(trk.time_since_update > self.max_age):\n self.trackers.pop(i)\n if(len(ret) > 0):\n # return np.concatenate(ret)\n # last column is track id\n tracked_detections = np.concatenate(ret)[:,:-1]\n tracked_attrs = np.array(ret_attrs)\n # print('output shape')\n # print(tracked_detections.shape, tracked_attrs.shape)\n # print(np.concatenate((tracked_detections,tracked_attrs),axis=1).shape)\n return np.concatenate((tracked_detections, tracked_attrs),axis=1), np.concatenate(ret)[:,-1]\n return np.empty((0, 6)), np.empty((0, 1))\n\n def split_detections(self, detections):\n \"\"\"Splits detections to two numpy arrays.\n\n Args:\n detections (:obj:`numpy.array`) : an array of detected bounding\n boxes. Each row correspond to a detection of the form\n `[tx, ty, tz, w, l, rz, h, rx, ry]`. The order of attributes\n (columns) should be strictly followed.\n\n Note:\n The order of columns corresponding to bounding box attributes\n is important because same order is followed while extracting\n columns in kalman filter update method. Reason for altering\n (mangling) the column order is to make it easier to slice the\n numpy array of detections into a subset that is used by kalman\n filter `[tx, ty, tz, w, l, rz]`, and one that is not used by\n Kalman Filter `[h, rx, ry]`. The second slice does not\n play any role in Kalman Filter update, merely passed through\n to enable publishing tracked obstacle, and writing xml.\n \"\"\"\n if detections.size > 0:\n dets = detections[:, :6]\n dets = np.insert(dets, 6, 0., axis=1)\n attrs = detections[:, 6:]\n return dets, attrs\n else:\n return [], []\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"utils/didiros/src/ped_tracker/scripts/multi_object_tracker.py","file_name":"multi_object_tracker.py","file_ext":"py","file_size_in_byte":13789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35885427","text":"def maxProfit(prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) <= 1: return 0\n transactions = 2\n dp = [[0 for _ in range(len(prices)+1)]for _ in range(transactions+1)]\n for k in range(transactions):\n tmpMax = dp[k][1] - prices[0]\n for i in range(1, len(prices)+1):\n dp[k+1][i] = max(dp[k+1][i-1], prices[i-1] + tmpMax)\n tmpMax = max(tmpMax, dp[k][i]-prices[i-1])\n return dp[transactions][len(prices)]\n\nnums = [ch for ch in input().split(' ')]\nflag = True\nfor ch in nums:\n if not ch.isdigit():\n flag = False\n break\nif flag:\n nums = [int(ch) for ch in nums]\n print(maxProfit(nums))\nelse:\n print(0)","sub_path":"python/小米笔试题1.py","file_name":"小米笔试题1.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335228212","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plot\r\nimport time\r\nimport math\r\n\r\n# Globale Variablen\r\n#\r\n# secondsperframe & framespersecond\r\n\r\nfps = 25\r\nspf = 1 / fps\r\n\r\n# definiert die ausgangsattribute\r\n\r\nbase_speed = 20 # pixel/second\r\nbase_health = 200\r\nbase_fov = 30\r\nbase_met_speed = 0.04\r\nbase_met_fov = 0.01\r\nbase_variance = 0.2 # mutation rate\r\nbase_foodamount = 30\r\nbase_foodgenerationrate = 10\r\n\r\n# zähler für die lebenden viecher und essen\r\n\r\ncritters_alive = 0\r\nfood_alive = 0\r\n\r\n# map size definition\r\n\r\nsize_x = 1000\r\nsize_y = 1000\r\n\r\n# erschafft array für die critterdaten mit 1000 spalten 8 reihen\r\narray_critters = np.zeros([1000, 7])\r\n\r\n#array für die fooddaten\r\narray_food = np.zeros([1000, 4])\r\n\r\n# anfangsbedingungen\r\nfood_initial = 200\r\ncritters_initial = 200\r\n\r\n\r\n# Create function for distance between two points\r\ndef math_calc_dist(p1, p2):\r\n return math.sqrt(math.pow((p2[0] - p1[0]), 2) +\r\n math.pow((p2[1] - p1[1]), 2))\r\n\r\n\r\n# Create function for normalisation of a vector\r\ndef normalize(a, b):\r\n math.pow(a, 2) + math.pow(b, 2)\r\n return [a / math.sqrt(math.pow(a, 2) + math.pow(b, 2)), b / math.sqrt(math.pow(a, 2) + math.pow(b, 2))]\r\n\r\n\r\n# critters erschaffen\r\n\r\nfor f in range(critters_initial):\r\n # health auf zwei dezimalstellen gerundet\r\n array_critters[critters_alive, 0] = np.around(base_health + base_variance * base_health * np.random.uniform(-1, 1),\r\n 2)\r\n\r\n # speed auf zwei dezimalstellen gerundet\r\n array_critters[critters_alive, 1] = np.around(base_speed + base_variance * base_speed * np.random.uniform(-1, 1), 2)\r\n\r\n # fov auf eine dezimalstelle gerundet\r\n array_critters[critters_alive, 2] = np.around(base_fov + base_variance * base_fov * np.random.uniform(-1, 1), 1)\r\n\r\n # Coordiantes 3 = x 4 = y\r\n array_critters[critters_alive, 3] = np.random.uniform() * size_x\r\n array_critters[critters_alive, 4] = np.random.uniform() * size_y\r\n\r\n # Directions\r\n rnddirection = np.random.uniform(0, 2 * math.pi)\r\n array_critters[critters_alive, 5] = math.sin(rnddirection)\r\n array_critters[critters_alive, 6] = math.cos(rnddirection)\r\n\r\n critters_alive += 1\r\n\r\n# ESSEN erschaffen und random auf der map verteilen\r\n\r\nfor f in range(food_initial):\r\n # Koordinaten\r\n array_food[food_alive, 0] = np.random.uniform() * size_x\r\n array_food[food_alive, 1] = np.random.uniform() * size_y\r\n array_food[food_alive, 2] = base_foodamount\r\n\r\n# starttheloop\r\n\r\nwhile True:\r\n\r\n for critter in range(critters_alive):\r\n\r\n critter_pos = [array_critters[critter, 3], array_critters[critter, 4]]\r\n\r\n # Find the Food\r\n\r\n for food in range(food_alive):\r\n\r\n food_pos = [array_food[food, 0], array_food[food, 1]]\r\n\r\n # if distance to food is smaller than fov set new normalized walking direction:\r\n if math_calc_dist(critter_pos, food_pos) < array_critters[critter, 2]:\r\n direction = normalize(food_pos[0] - critter_pos[0], food_pos[1] - critter_pos[1])\r\n\r\n array_critters[critter, 5] = direction[0]\r\n array_critters[critter, 6] = direction[1]\r\n\r\n # wenn das essen näher ist als der walking speed wird es gegessen\r\n if math_calc_dist(critter_pos, food_pos) < array_critters[critter, 1]:\r\n array_critters[critter, 0] += array_food[food, 2]\r\n break\r\n break\r\n\r\n # Critter loses health through metabolism:\r\n array_critters[critter, 0] += -(1 + base_met_speed * array_critters[critter, 1] + base_met_fov * array_critters[\r\n critter, 2])\r\n\r\n # If health at or below zero, critter dies:\r\n if array_critters[critter, 0] <= 0:\r\n array_critters[critter, 0:8] = array_critters[critters_alive, 0:8]\r\n critters_alive += -1\r\n\r\n # Wenn Health über 300 wird ein neuer Critter gemacht:\r\n if array_critters[critter, 0] >= 300:\r\n array_critters[critter, 0] = 300 - base_health\r\n\r\n # health auf zwei dezimalstellen gerundet\r\n array_critters[critters_alive, 0] = np.around(\r\n base_health + base_variance * base_health * np.random.uniform(-1, 1),\r\n 2)\r\n\r\n # speed auf zwei dezimalstellen gerundet\r\n array_critters[critters_alive, 1] = np.around(\r\n array_critters[critter, 1] + base_variance * array_critters[critter, 1] * np.random.uniform(-1, 1), 2)\r\n\r\n # fov auf eine dezimalstelle gerundet\r\n array_critters[critters_alive, 2] = np.around(\r\n array_critters[critter, 2] + base_variance * array_critters[critter, 2] * np.random.uniform(-1, 1), 1)\r\n\r\n # Coordiantes 3 = x 4 = y\r\n array_critters[critters_alive, 3] = array_critters[critter, 3]\r\n array_critters[critters_alive, 4] = array_critters[critter, 4]\r\n\r\n # Directions\r\n rnddirection = np.random.uniform(0, 2 * math.pi)\r\n array_critters[critters_alive, 5] = math.sin(rnddirection)\r\n array_critters[critters_alive, 6] = math.cos(rnddirection)\r\n\r\n critters_alive += 1\r\n\r\n # Movement testing if critter runs off map and then moving:\r\n movement_vector = [array_critters[critter, 5], array_critters[critter, 6]]\r\n movement_vector *= spf * array_critters[critter, 1]\r\n testingpos = critter_pos + movement_vector\r\n\r\n\r\n #critter changes direction instead of running out of map\r\n if testingpos[0] <= 0 or testingpos[0] >= size_x:\r\n movement_vector[0] -= 2 * movement_vector[0]\r\n\r\n if testingpos[1] <= 0 or testingpos[1] >= size_y:\r\n movement_vector[1] -= 2 * movement_vector[1]\r\n\r\n critter_pos += np.around(movement_vector, 2)\r\n array_critters[critter, 3:5] = critter_pos\r\n\r\n #draw\r\n\r\n #TODO\r\n\r\n #FPS CONTROL\r\n\r\n time.sleep(spf)\r\n\r\n # Exit function:\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n # Pause function:\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('p'):\r\n time.sleep(0.3)\r\n while True:\r\n time.sleep(0.3)\r\n if cv2.waitKey(1) & 0xFF == ord('p'):\r\n break\r\n\r\n# plot the distribution of speed, fov\r\n\r\n# plot.hist(array_critters[:, 0], 40, [100, 300])\r\nplot.hist(array_critters[:, 1], 40, [1, 30], edgecolor=[0, 0, 0])\r\nplot.hist(array_critters[:, 2], 40, [1, 30], edgecolor=[0, 0, 0])\r\n\r\nplot.show()\r\n","sub_path":"Evolution.py","file_name":"Evolution.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"444399256","text":"import numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\ndef calcv (grid,mid_panel,airfoil,theta,thetat,sine,cosine,l_pan,gamma,aoa,vfree):\n npoints = len(grid)-1\n m = len(airfoil)-1\n midx = mid_panel[:, 0];midy = mid_panel[:, 1]\n foilx = airfoil[:, 0];foily = airfoil[:, 1]\n gridx = grid[:,0]; gridy = grid[:,1]\n Vn = np.zeros(shape=[len(grid)])\n Vn_gamma = np.zeros(shape=[len(grid)])\n Vn_inf = np.zeros(shape=[len(grid)])\n Vt = np.zeros(shape=[len(grid)])\n Vt_gamma = np.zeros(shape=[len(grid)])\n Vt_inf = np.zeros(shape=[len(grid)])\n\n for i in range(0,npoints):\n # calculate potential\n tempn = 0\n tempt = 0\n for j in range(0,m):\n tempn1 = 0\n tempt1 = 0\n tempn2 = 0\n tempt2 = 0\n a = -(gridx[i] - foilx[j]) * cosine[j] - (gridy[i] - foily[j]) * sine[j]\n b = (gridx[i] - foilx[j]) ** 2 + (gridy[i] - foily[j]) ** 2\n c = np.sin(thetat[i] - theta[j])\n d = np.cos(thetat[i] - theta[j])\n e = (gridx[i] - foilx[j]) * sine[j] - (gridy[i] - foily[j]) * cosine[j]\n f = np.log(1 + l_pan[j] * (l_pan[j] + 2 * a) / b)\n g = np.arctan2(e * l_pan[j], b + a * l_pan[j])\n p = (gridx[i]-foilx[j])*np.sin(thetat[i]-2*theta[j])+(gridy[i]-foily[j])*np.cos(thetat[i]-2*theta[j])\n q = (gridx[i]-foilx[j])*np.cos(thetat[i]-2*theta[j])-(gridy[i]-foily[j])*np.sin(thetat[i]-2*theta[j])\n tempn2 = tempn2 + d + 0.5*q*f/l_pan[j] - (a*c+d*e)*g/l_pan[j]\n tempn1 = tempn1 + 0.5*d*f + c*g - tempn2\n tempt2 = tempt2 + c + 0.5*p*f/l_pan[j] + (a*d-c*e)*g/l_pan[j]\n tempt1 = tempt1 + 0.5*c*f - d*g - tempt2\n tempt = tempt + tempt1*gamma[j] + tempt2*gamma[j+1]\n tempn = tempn + tempn1*gamma[j] + tempn2*gamma[j+1]\n\n # Calculate Velocity\n Vt_gamma[i] = tempt\n Vn_gamma[i] = tempn\n Vt_inf[i] = np.cos(thetat[i]-aoa)\n Vn_inf[i] = -np.sin(thetat[i]-aoa)\n Vn[i] = vfree*(Vn_inf[i]+Vn_gamma[i])\n Vt[i] = vfree*(Vt_inf[i]+Vt_gamma[i])\n V = np.sqrt(Vn**2 + Vt**2)\n\n return (Vn,Vt,V)","sub_path":"dom_velo.py","file_name":"dom_velo.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"380734327","text":"import csv\nfrom datetime import datetime\nimport json\n\nwith open('ChicagoData.csv') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',') \n mydict = {}\n i = 0\n for row in spamreader: \n date = row[3].split()[0]\n i += 1\n try:\n date = datetime.strptime(date, '%d/%m/%Y') \n except:\n print('Wrong Format')\n continue \n date = date.replace(day=1).strftime('%d/%m/%Y')\n beat = row[11]\n \n key = (date, beat)\n if key in mydict:\n mydict[key] += 1\n else:\n mydict[key] = 1 \n if i >= 10000:\n print('Reached 10000')\n i = 0\n print(mydict)\n # for key, value in mydict.items():\n # date, beat = key\n # print (date, end=', ')\n # print (beat, end=', ') \n # print(value)\n with open('filtered_data.csv', 'w', newline='') as output:\n spamwriter = csv.writer(output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(['beat', 'date', 'crimes'])\n for key, value in mydict.items():\n date, beat = key\n spamwriter.writerow([beat, date, value])\n \n\n\n # with open('result.json', 'w') as file:\n # json.dump(mydict, file)","sub_path":"data/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39242307","text":"import sys\nimport os\nimport json\nfrom _thread import *\nfrom socket import *\nfrom datetime import datetime,date,timedelta\nfrom threading import Timer\n\ntempid = {}\ndef loginserver(username, password, clientSocket):\n sentence = {\n 'messagetype':'credentials',\n 'username': username,\n 'password': password\n }\n clientSocket.send(bytes(json.dumps(sentence),encoding='utf-8'))\n while 1:\n receivedSentence = clientSocket.recv(1024)\n received = receivedSentence.decode('utf-8')\n #print('>>>>>>>>>>>>>>>>>>>>>>')\n #print(received)\n if received == 'Y':\n print('> Welcome to the BlueTrace Simulator')\n break\n elif received == 'N':\n print('> Invalid Passsword. Please try again')\n newpassword = input('>password: ')\n sentence['password'] = newpassword\n clientSocket.send(bytes(json.dumps(sentence),encoding='utf-8'))\n elif received == 'NB':\n print('> Invalid Passsword. Your account has been blocked. Please try again later')\n exit()\n elif received == 'B':\n print('> Your account is blocked due to multiple login failures. Please try again later')\n exit()\n\ndef download(username, clientSocket):\n global tempid\n sentence = {\n 'messagetype':'downloadtempid',\n 'username': username\n }\n clientSocket.send(bytes(json.dumps(sentence),encoding='utf-8'))\n receivedSentence = clientSocket.recv(1024)\n tempidline = receivedSentence.decode('utf-8')\n pairs = tempidline.split(',')\n tempid['tempid'] = pairs[0]\n tempid['createtime'] = pairs[1].strip()\n tempid['expiredtime'] = pairs[2].strip()\n print(f'> TempID: \\n{pairs[0]}')\n\ndef upload(username, clientSocket):\n if not os.path.exists('z5223796_contactlog.txt'):\n print('contact log is empty')\n return\n if os.path.getsize('z5223796_contactlog.txt') == 0:\n print('contact log is empty')\n return\n if tempid == {}:\n print('tempid is empty,try download_tempid')\n return\n log = {}\n with open('z5223796_contactlog.txt') as file:\n for line in file.readlines():\n time = {}\n pairs = line.split(' ')\n starttime = pairs[1].strip() + ' ' + pairs[2].strip()\n expiredtime = pairs[3].strip() + ' ' + pairs[4].strip()\n time['createtime'] = starttime\n time['expiredtime'] = expiredtime\n print(f'{pairs[0]},\\n{starttime},\\n{expiredtime};\\n')\n log[pairs[0]] = time\n sentence = {\n 'messagetype':'uploadcontactlog',\n 'username': username,\n 'log': log\n }\n clientSocket.send(bytes(json.dumps(sentence),encoding='utf-8'))\n\ndef logout(username, clientSocket):\n sentence = {\n 'messagetype':'logout',\n 'username': username\n }\n clientSocket.send(bytes(json.dumps(sentence),encoding='utf-8'))\n #print(tempid)\n print('You have log out.Goodbye.')\n clientSocket.close()\n exit()\n\ndef sendbeacon(udpip,udpport):\n serverName = udpip\n serverPort = int(udpport)\n clientSocket = socket(AF_INET, SOCK_DGRAM)\n if tempid == {}:\n print('You do not have a tempid, will send all 0s and invalid time')\n message = '1,00000000000000000000,01/01/1970 00:00:00,01/01/1970 00:00:00'\n print(f'00000000000000000000,\\n01/01/1970 00:00:00,\\n01/01/1970 00:00:00.\\n')\n clientSocket.sendto(bytes(message,encoding='utf-8'),(serverName, serverPort))\n clientSocket.close()\n return\n createtime = tempid['createtime']\n expiredtime = tempid['expiredtime']\n tid = tempid['tempid']\n message = f'1,{tid},{createtime},{expiredtime}'\n print(f'{tid},\\n{createtime},\\n{expiredtime}.\\n')\n clientSocket.sendto(bytes(message,encoding='utf-8'),(serverName, serverPort))\n clientSocket.close()\n return\n\ndef udpserver(udpport):\n hostname = gethostname()\n udpclientip = gethostbyname(hostname)\n serverSocket = socket(AF_INET, SOCK_DGRAM)\n #print(f'working on {udpclientip}')\n serverSocket.bind((udpclientip, udpport))\n start_new_thread(revbeacon,(serverSocket, ))\n\n\ndef revbeacon(serverSocket): \n while 1:\n message, clientAddress = serverSocket.recvfrom(2048)\n beaconmessage = message.decode('utf-8')\n pair = beaconmessage.split(',')\n tid = pair[1]\n ctime = pair[2]\n etime = pair[3]\n timenow = datetime.now()\n strtimenow = timenow.strftime('%d/%m/%Y %H:%M:%S')\n ctimeint = datetime.strptime(ctime,'%d/%m/%Y %H:%M:%S')\n etimeint = datetime.strptime(etime,'%d/%m/%Y %H:%M:%S')\n logstr = tid +' ' + ctime + ' '+ etime + '\\n'\n print(f' received beacon:\\n{tid},\\n{ctime},\\n{etime}.\\nCurrent time is:\\n{strtimenow}.\\n')\n if timenow >= ctimeint and timenow <= etimeint:\n print('The beacon is valid.')\n if not os.path.exists('z5223796_contactlog.txt'):\n log = open('z5223796_contactlog.txt','w')\n else:\n log = open('z5223796_contactlog.txt','a')\n log.write(logstr)\n log.close()\n t = Timer(180,dellog,(logstr,))\n t.start()\n else:\n print('The beacon is invalid.')\n \ndef dellog(string):\n oldlog = open('z5223796_contactlog.txt','r')\n lines = oldlog.readlines()\n oldlog.close()\n newlog = open('z5223796_contactlog.txt','w')\n for line in lines:\n if line.strip('\\n') != string.strip('\\n'):\n newlog.write(line)\n newlog.close()\n\ndef connectwithserver(host, port, udpport):\n serverName = host\n serverPort = port\n # connection with tcp server\n clientSocket = socket(AF_INET, SOCK_STREAM)\n clientSocket.connect((serverName, serverPort))\n username = input('> username: ')\n password = input('> password: ')\n loginserver(username, password, clientSocket)\n \n # print('login succful')\n #open udp server\n udpserver(udpport)\n f = open('z5223796_contactlog.txt','w')\n f.close()\n #start_new_thread(udpserver, udpport)\n while 1:\n command = input('> ')\n beaconlist = command.split()\n if command == 'Download_tempID':\n download(username, clientSocket)\n continue\n elif command == 'Upload_contact_log':\n upload(username, clientSocket)\n continue\n elif command == 'logout':\n logout(username, clientSocket)\n elif beaconlist[0] == 'Beacon':\n if len(beaconlist) != 3:\n print(\"require ip and port. Try again\")\n continue\n udpip = beaconlist[1]\n udpport = beaconlist[2]\n sendbeacon(udpip,udpport)\n continue\n else:\n print('Error. Invalid command')\n continue\n clientSocket.close()\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print('required host prot UDP port')\n exit()\n host = sys.argv[1]\n serverport = int (sys.argv[2])\n udpport = int (sys.argv[3])\n connectwithserver(host, serverport,udpport)","sub_path":"ass1/final/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649359803","text":"# encoding: utf-8\n# [models.word2vec – Deep learning with word2vec](https://radimrehurek.com/gensim/models/word2vec.html)\n\nfrom gensim.models import Word2Vec\nfrom sklearn.decomposition import PCA # decomposition 分解\nfrom matplotlib import pyplot as plt\n\n\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n\n\nmodel = Word2Vec.load('wiki.zh.text.model')\n\n# X = model[model.wv.vocab]\n\n# pca = PCA(n_components=2)\n# result = pca.fit_transform(X)\n# plt.scatter(result[:, 0], result[:, 1])\n# plt.show()\n\n\nword = [\"爸爸\", \"妈妈\"]\n# 寻找出最相似的多个词\nwords = [wp[0] for wp in model.most_similar(word, topn=20)]\n# print(words)\n'''\n['老公', '奶奶', '爷爷', '阿姨', '老婆', '老爸', '母亲', '保姆', '大叔', '哥哥', '外婆', '爸妈', '姊姊', '妈咪', '婆婆', '太太', '妹妹', '小宝宝', '小兔', '女儿']\n'''\n\n# 提取出词对应的词向量\nwords_in_vector = [model[word] for word in words]\n# print(words_in_vector)\n\n# print(model['爸爸'])\n''' size长度为400的词向量\n[ 1.22842872e+00 9.84687269e-01 9.24556017e-01 -2.57590771e-01\n ...\n 1.67887378e+00 -2.94714928e+00 -1.82157099e+00 -4.83914346e-01]\n'''\n\n# 训练 PCA 模型进行降维\npca = PCA(n_components=2) # 只保留2个维度\npca.fit(words_in_vector)\n\nX = pca.transform(words_in_vector)\n\n# 绘制图形\nxs = X[:, 0]\nys = X[:, 1]\n\nplt.figure(figsize=(10, 6))\nplt.scatter(xs, ys, marker='o')\n\n# 遍历所有的词添加点注释\nfor i, w in enumerate(words):\n plt.annotate(\n w,\n xy=(xs[i], ys[i]), xytext=(6, 6),\n textcoords='offset points', ha='left', va='top',\n **dict(fontsize=10)\n )\nplt.show()\n\n\n","sub_path":"06_pca_visual.py","file_name":"06_pca_visual.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620784559","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#%matplotlib inline\r\n\r\nfrom pandas.plotting import register_matplotlib_converters\r\nregister_matplotlib_converters()\r\n\r\ndf=pd.read_csv('C:\\\\Users\\\\HP\\\\OneDrive\\\\Desktop\\\\proc\\\\^BSESN.csv')\r\ndf.index=df['Date']\r\n\r\nff = pd.DataFrame(df, columns=['Close'])\r\nff=ff.reset_index()\r\nff['Date']=pd.to_datetime(ff.Date,format='%Y-%m-%d')\r\nff.isna().values.any()\r\nff.dropna(inplace=True)\r\nimport matplotlib.dates as mdates\r\n\r\nyears = mdates.YearLocator() # Get every year\r\nyearsFmt = mdates.DateFormatter('%Y') # Set year format\r\n\r\n# Create subplots to plot graph and control axes\r\nfig, ax = plt.subplots()\r\nax.plot(ff['Date'], ff['Close'])\r\n\r\n# Format the ticks\r\nax.xaxis.set_major_locator(years)\r\nax.xaxis.set_major_formatter(yearsFmt)\r\n\r\nlo=str(ff.Date.dt.year[0])\r\n\r\nhi=str(ff.Date.dt.year[len(ff.Date)-1])\r\n\r\n\r\n# Set figure title\r\nplt.title('Close Number Of Users History '+lo+'-'+hi, fontsize=16)\r\n# Set x label\r\nplt.xlabel('Date', fontsize=14)\r\n# Set y label\r\nplt.ylabel('Closing Number Of Users ', fontsize=14)\r\n\r\n# Rotate and align the x labels\r\nfig.autofmt_xdate()\r\n\r\n# Show plot\r\nplt.show()\r\n\r\n\r\n# Import package for splitting data set\r\nfrom sklearn.model_selection import train_test_split\r\n#from sklearn.svm import SVR\r\n\r\ntrain, test = train_test_split(ff, test_size=0.20)\r\n\r\n# Reshape index column to 2D array for .fit() method\r\nX_train = np.array(train.index).reshape(-1, 1)\r\ny_train = train['Close']\r\n\r\n\r\n\r\nX_test = np.array(test.index).reshape(-1, 1)\r\ny_test = test['Close']\r\n\r\ny_train=y_train.astype('int')\r\ny_test=y_test.astype('int')\r\n\r\n\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nclf = LogisticRegression(solver='lbfgs', multi_class='ovr')\r\n\r\n# Train Decision Tree Classifer\r\nclf.fit(X_train,y_train)\r\n\r\n\r\ny_pred = clf.predict(X_test)\r\nff['Prediction'] = clf.predict(np.array(ff.index).reshape(-1, 1))\r\nrandints = np.random.randint(2550, size=25)\r\n\r\n# Select row numbers == random numbers\r\ndf_sample = ff[ff.index.isin(randints)]\r\n\r\n# Create subplots to plot graph and control axes\r\nfig, ax = plt.subplots()\r\ndf_sample.plot(x='Date', y=['Close', 'Prediction'], kind='bar', ax=ax)\r\n\r\n# Set figure title\r\nplt.title('Logistic ->Comparison Predicted vs Actual Number Of Users in Sample data selection ', fontsize=16)\r\n\r\n# \r\n\r\n# Set x label\r\nplt.xlabel('Date', fontsize=14)\r\n\r\n# Set y label\r\nplt.ylabel('Number Of Users', fontsize=14)\r\n\r\n# Show plot\r\nplt.show()\r\n\r\n\r\n\r\n\r\nfrom sklearn.metrics import explained_variance_score\r\nprint('Accuracy by LogisticRegression Model -> ',explained_variance_score(y_test, y_pred))\r\n","sub_path":"new/LogisticModel.py","file_name":"LogisticModel.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164663081","text":"import firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import db\r\nfrom firebase_admin import firestore\r\n\r\n# Fetch the service account key JSON file contents\r\ncred = credentials.Certificate('G:/ServiceAccountKey.json')\r\napp = firebase_admin.initialize_app(cred)\r\ndb = firestore.client()\r\nresponse = getQuote()\r\nquote = response.body['quote']\r\nauthor = response.body['author']\r\n\r\ndoc_ref = db.collection(u'sampleData').document(u'inspiration')\r\ndoc_ref.self ({\r\n u'quote' : quote,\r\n u'author' : author,\r\n })\r\nprint(quote+ \" and \"+author+\" successfully written to the database\")\r\n","sub_path":"Cab Safety/firebaseTest.py","file_name":"firebaseTest.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"904527","text":"import requests\r\nfrom fake_useragent import UserAgent\r\nfrom bs4 import BeautifulSoup\r\nfrom xlsxwriter import Workbook\r\nimport xlrd\r\nimport re\r\nimport json\r\nfrom openpyxl import load_workbook\r\nimport time\r\n\r\n\r\nfile_name = 'gambling_1-10_1000.xlsx'\r\n\r\nworkbook = xlrd.open_workbook(file_name)\r\nworksheet = workbook.sheet_by_index(0)\r\nrows = worksheet.nrows\r\n\r\nlinks = []\r\nfor i in range (0,rows - 1):\r\n links.append(worksheet.cell_value(1 + i,2))\r\n\r\n\r\nua = UserAgent()\r\nheader = {'user-agent': ua.chrome}\r\ncompany_links = []\r\n\r\nfor i in range(0, len(links)):\r\n\r\n while True:\r\n ua = UserAgent()\r\n header = {'user-agent' : ua.chrome }\r\n page = requests.get(\r\n links[i],\r\n headers=header)\r\n soup = BeautifulSoup(page.content, 'lxml')\r\n # print(page.status_code)\r\n a = soup.find_all('code', {\"id\" : \"stream-footer-embed-id-content\" })\r\n\r\n\r\n # print(a.__len__())\r\n\r\n # b= a.__len__()\r\n if len(a) == 0:\r\n if len(soup.find_all('code')) > 0 :\r\n print(soup.find_all('code'))\r\n pass\r\n # print('!!!!!!!')\r\n # print(page.cookies)\r\n # f = open('out.html', 'w')\r\n # f.write(soup.prettify())\r\n # f.close()\r\n else:\r\n break\r\n time.sleep(1)\r\n\r\n\r\n\r\n a = soup.find_all('code', {\"id\" : \"stream-footer-embed-id-content\" })\r\n\r\n m = re.search(\"<!\\-\\-.+\\-\\->\", str(a))\r\n\r\n\r\n if m:\r\n parsedStr = m.group(0)[4:-3]\r\n parsedJson = json.loads(parsedStr)\r\n jsonKeys = parsedJson.keys()\r\n if 'website' in jsonKeys:\r\n company_links.append(parsedJson['website'])\r\n print('progress=',len(company_links),'/',len(links))\r\n else:\r\n company_links.append('No site')\r\n else:\r\n company_links.append('No site')\r\n\r\n wb = load_workbook(filename=file_name)\r\n sheet = wb.active\r\n for i in range(0,len(company_links)):\r\n sheet.cell(row=2 + i, column=4).value = company_links[i]\r\n wb.save(file_name)\r\n\r\n\r\n\r\n\r\n","sub_path":"CompanyLink(16.01).py","file_name":"CompanyLink(16.01).py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648197779","text":"from invoke import task\n\n@task\ndef start(c):\n c.run(\"dfx identity new id_alice || true\")\n print(\"\\033[0;32;40m start \\033[0m\")\n\n@task(start)\ndef build(c):\n c.run(\"dfx canister --no-wallet create --all\")\n c.run(\"dfx build --all\")\n print(\"\\033[0;32;40m build completed\\033[0m\")\n\n@task(build)\ndef install(c):\n c.run(\"dfx canister --no-wallet install Bridge\")\n c.run(\"dfx canister --no-wallet install ERC20Handler\")\n print(\"\\033[0;32;40m install completed\\033[0m\")\n\n\n@task(build)\ndef upgrade(c):\n c.run(\"dfx canister --no-wallet install Bridge -m reinstall --all\")\n c.run(\"dfx canister --no-wallet install ERC20Handler -m reinstall --all\")\n print(\"\\033[0;32;40m upgrade completed\\033[0m\")\n\n@task(upgrade, default=True)\ndef deposit(c):\n print(\"\\033[0;32;40m set resoure ...\\033[0m\")\n canister_id = c.run(\"dfx canister id ERC20Handler\").stdout.replace(\"\\n\", \"\")\n principal_id = c.run(\"dfx identity get-principal\").stdout.replace(\"\\n\", \"\")\n recipient_id = c.run(\"dfx --identity id_alice identity get-principal\").stdout.replace(\"\\n\", \"\")\n print(\"\\033[0;32;40m ERC20Handler \\\"\" + canister_id + \"\\\" \\033[0m\")\n print(\"\\033[0;32;40m depositer_id \\\"\" + principal_id + \"\\\" \\033[0m\")\n print(\"\\033[0;32;40m recipient_id \\\"\" + recipient_id + \"\\\" \\033[0m\")\n rid = c.run(\"dfx canister call ERC20Handler setResource '(1,\\\"WICP Name\\\",\\\"WICP\\\",8,1000000,principal \\\"\" + principal_id + \"\\\")'\").stdout\n resource_id = rid.replace(\"\\n\", \"\").replace(\" \", \"\").replace(\n \"(\\\"\", \"\").replace(\n \"\\\")\", \"\")\n print(\"\\033[0;32;40m resource_id \\\"\" + resource_id + \"\\\" \\033[0m\")\n tokenAddr = resource_id.split(\"_\")[1]\n print(\"\\033[0;32;40m get token address \\\"\" + tokenAddr + \"\\\" \\033[0m\")\n print(\"\\033[0;32;40m token approve start ....\\033[0m\")\n assert \"true\" in c.run(\"dfx canister call \" + tokenAddr + \" approve '(principal \\\"\" + canister_id + \"\\\",1000000:nat)'\").stdout\n assert \"true\" in c.run(\"dfx canister call \" + tokenAddr + \" approve '(principal \\\"\" + principal_id + \"\\\",1000000:nat)'\").stdout\n assert \"true\" in c.run(\"dfx canister call \" + tokenAddr + \" approve '(principal \\\"\" + recipient_id + \"\\\",1000000:nat)'\").stdout\n \n print(\"\\033[0;32;40m deposit start ....\\033[0m\")\n\n depositer_banlance = c.run(\"dfx canister call \" + tokenAddr + \" balanceOf '(principal \\\"\" + principal_id + \"\\\")'\").stdout\n print(\"\\033[0;32;40m depositer init banlance \\\"\" + depositer_banlance + \"\\\" \\033[0m\")\n\n contract_banlance = c.run(\"dfx canister call \" + tokenAddr + \" balanceOf '(principal \\\"\" + canister_id + \"\\\")'\").stdout\n print(\"\\033[0;32;40m contract init banlance \\\"\" + contract_banlance + \"\\\" \\033[0m\")\n\n\n record_r = c.run(\"dfx canister call Bridge deposit '(\\\"\" + resource_id + \"\\\",1,principal \\\"\" + principal_id + \"\\\",principal \\\"\" + recipient_id + \"\\\",1000,10)'\").stdout\n deposit_id = record_r.replace(\"opt\", \"\").replace(\"\\n\", \"\").replace(\" \", \"\").replace(\n \"(\\\"\", \"\").replace(\n \"\\\")\", \"\")\n print(\"\\033[0;32;40m deposit_id \\\"\" + deposit_id + \"\\\" \\033[0m\")\n deposit_nonces = deposit_id.split(\"_\")\n chain_id = deposit_nonces[0]\n chain_nonce = deposit_nonces[1]\n deposit_record = c.run(\"dfx canister call Bridge getDepositRecord '(\\\"\" + resource_id + \"\\\",\" + chain_id + \",\" + chain_nonce + \")'\").stdout\n print(\"\\033[0;32;40m deposit_record \\\"\" + deposit_record + \"\\\" \\033[0m\")\n\n depositer_banlance = c.run(\"dfx canister call \" + tokenAddr + \" balanceOf '(principal \\\"\" + principal_id + \"\\\")'\").stdout\n print(\"\\033[0;32;40m depositer deposit banlance \\\"\" + depositer_banlance + \"\\\" \\033[0m\")\n\n contract_banlance = c.run(\"dfx canister call \" + tokenAddr + \" balanceOf '(principal \\\"\" + canister_id + \"\\\")'\").stdout\n print(\"\\033[0;32;40m contract deposit banlance \\\"\" + contract_banlance + \"\\\" \\033[0m\")\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"103642428","text":"#Author: Pedro Ramos Krauze Diehl\n#Code refactoration and class creation: Daniel Moraes\nimport numpy as np\n\n\nclass Algebra:\n def lu_decomposition(self, A):\n if A.shape[0] != A.shape[1]:\n raise\n\n dimension = A.shape[0]\n\n L = np.zeros(shape=(dimension, dimension))\n U = np.zeros(shape=(dimension, dimension))\n\n for i in range(dimension):\n L[i, i] = 1.0\n\n for i in range(dimension):\n for j in range(dimension):\n sum = 0.0\n\n if j >= i:\n for k in range(i + 1):\n sum += L[i, k] * U[k, j]\n\n U[i, j] = A[i, j] - sum\n else:\n for k in range(j + 1):\n sum += L[i, k] * U[k, j]\n\n L[i, j] = (A[i, j] - sum) / U[j, j]\n\n return L, U\n\n def solve_reverse(self, L, b):\n dimension = L.shape[0]\n\n y = np.zeros(shape=(dimension, 1))\n\n for i in range(dimension):\n sum = 0\n\n for j in range(dimension):\n if j < i:\n sum += L[i][j] * y[j]\n\n y[i] = b[i] - sum\n\n return y\n\n def solve_normal(self, U, y):\n dimension = U.shape[0]\n\n x = np.zeros(shape=(dimension, 1))\n\n for i in range(dimension - 1, -1, -1): # range([start], stop[, step])\n sum = 0\n\n for j in range(0, dimension):\n if not(j == i):\n sum += U[i][j] * x[j]\n\n x[i] = (y[i] - sum) / U[i][i]\n\n return x\n\n def solve_system(self, A, b):\n l, u = self.lu_decomposition(A)\n y = self.solve_reverse(l, b)\n x = self.solve_normal(u, y)\n\n return x\n","sub_path":"Trabalho 6 (Daniel M.)/algebra.py","file_name":"algebra.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"490375363","text":"from django.db import models\nfrom django.urls import reverse\nfrom users.models import UserProfile\nfrom equipment.models import Camera, Film, Lens\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=20, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Photo(models.Model):\n\n class Meta:\n ordering = ['-posted_on']\n\n author = models.ForeignKey(\n UserProfile,\n on_delete=models.CASCADE,\n related_name='photos',\n null=True\n )\n\n image = models.ImageField(upload_to='photos', blank=False, null=False)\n\n title = models.CharField(max_length=50, blank=True)\n caption = models.CharField(max_length=200, blank=True)\n posted_on = models.DateTimeField(auto_now=True)\n\n tags = models.ManyToManyField(\n Tag, related_name='photos', blank=True\n )\n\n # Equipment\n\n camera = models.ForeignKey(\n Camera, related_name='photos',\n on_delete=models.PROTECT,\n null=True, blank=True\n )\n film = models.ForeignKey(\n Film, related_name='photos',\n on_delete=models.PROTECT,\n null=True, blank=True\n )\n lens = models.ForeignKey(\n Lens, related_name='photos',\n on_delete=models.PROTECT,\n null=True, blank=True\n )\n\n # Capture data\n\n APERTURES = (\n (14, '1.4'),\n (20, '2'),\n (28, '2.8'),\n (40, '4'),\n (56, '5.6'),\n (80, '8'),\n (110, '11'),\n (160, '16'),\n (220, '22'),\n )\n SHUTTER_SPEEDS = (\n (1, '1'),\n (2, '1/2'),\n (4, '1/4'),\n (8, '1/8'),\n (15, '1/15'),\n (30, '1/30'),\n (60, '1/60'),\n (125, '1/125'),\n (250, '1/250'),\n (500, '1/500'),\n (1000, '1/1000'),\n (2000, '1/2000'),\n (4000, '1/4000'),\n )\n EXPOSURES = (\n (100, '100'),\n (200, '200'),\n (400, '400'),\n (800, '800'),\n (1600, '1600'),\n )\n aperture = models.PositiveIntegerField(\n choices=APERTURES,\n null=True, blank=True\n )\n shutter_speed = models.PositiveIntegerField(\n choices=SHUTTER_SPEEDS,\n null=True, blank=True\n )\n exposure = models.PositiveIntegerField(\n choices=EXPOSURES,\n null=True, blank=True\n )\n\n def __str__(self):\n return self.title + ', taken by ' + self.author.user.username\n\n def get_absolute_url(self):\n return reverse('photos:photo_detail', kwargs={'pk': self.id})\n\n\nclass Comment(models.Model):\n author = models.ForeignKey(\n UserProfile,\n on_delete=models.CASCADE,\n related_name='comments',\n null=True\n )\n photo = models.ForeignKey(\n Photo,\n on_delete=models.CASCADE,\n related_name='comments'\n )\n\n text = models.CharField(max_length=200)\n posted_on = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.author.user.username + ' said \\\"' + self.text\n\n def get_absolute_url(self):\n return reverse('photos:photo_detail', kwargs={'pk': self.photo.id})\n","sub_path":"35px/photos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"50597390","text":"import random\nimport numpy as np\n#Calculo de probabilidades\ndef probabilidades(ph,ben,n):\n total=0\n for a in range(0,N):\n temp=ph[a]*ben[a]\n total += temp\n probabilidades=[((ph[b]*ben[b])/total) for b in range(0,N)]\n total=0\n com=np.cumsum(probabilidades)\n total=0\n return com\n#################################################\nc=15\nants=15\nn_list=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])\npesos=np.array([3,5,1,5,2,1,4,5,5,2,3,4,5,1,2,4,2,1,3,4])\nminimo=np.amin(pesos)#peso minimo de los ladrillos\nN=len(n_list)\ncosto=np.array([2,5,3,10,2,8,4,1,1,6,4,10,10,3,7,8,4,8,8,9])\nferormona=0.1*np.ones(N)\nbeneficio=(1/10)*costo#factor de seleccion relacionado con el costo\ntrayectorias=[]\n#numero de iteraciones de todas las hormigas\nfor z in range(0,10):# 5 iteraciones totales que hacen las hormigas para encontrar la solucion\n compuertas=probabilidades(ferormona,beneficio,N)#llama la funcion para las probabilidades de cada bloque\n trayectorias.clear()#vacia la matriz de trayectorias\n ganancias=np.empty(0,int)#lista de ganancia correspondiente a cada una de las hormigas\n for k in range(0,ants):#ciclo para que todas las hormigas hagan una trayectoria\n cdisponible=c#capacidad disponible inicial que tendra la mochila\n camino_ant=np.empty(0,int)#lista de bloques escogidos por cada hormiga\n ganancia=0# la ganancia obtenida en el recorrido por cada hormiga\n while cdisponible>minimo:\n select=random.random()#Numero aleatorio entre 0 y 1 para seleccionar el bloque\n #Seleccion del ladrillo\n for i in range(0,N):\n if select<=compuertas[i]:#seleccion de la compuerta\n bloque=i+1\n if bloque not in camino_ant:#revisa si el bloque seleccionado de manera aleatoria no se ha escogido\n if((cdisponible-pesos[i])<=0):#verificar si con el bloque seleccionado se sobre pase la capacidad de la mochila\n break\n else:\n camino_ant=np.append(camino_ant,bloque)\n cdisponible -=pesos[i]\n ganancia += costo[i]\n break\n else:\n select=random.random()#volver a escribir numero aleatorio\n i=0#reinicia la seleccion de bloques\n trayectorias.append(camino_ant)#añadir el camino de cada hormiga a la matriz de trayectorias\n ganancias=np.append(ganancias,ganancia)#llena la lista con la ganancia de cada hormiga\n solution=np.amax(ganancias)#escoge el mayor de las ganancias\n solution_way=trayectorias[list(ganancias).index(solution)]\n for block in n_list:#actualizar la ferormona de cada bloque\n if block in solution_way:#refurza si el bloque fue seleccionado durante la trayectoria\n ferormona[block-1] = 0.9*ferormona[block-1]+beneficio[block-1]\n else: #evapora si no fue sleccionado\n ferormona[block-1] = 0.9*ferormona[block-1]\n#archivo=open('Solucion_1.txt','a')\n#archivo.write(str(solution))\n#archivo.write('\\n')\n#archivo.close()","sub_path":"Mochila por colonia de hormigas_v5.py","file_name":"Mochila por colonia de hormigas_v5.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411245731","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib.auth.views import login, logout\n\nfrom django.views.generic.simple import direct_to_template\nfrom django.contrib.auth.views import logout_then_login\nfrom django.conf import settings\n\n\n#from cloudbay import register\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'cloudbay.views.home', name='home'),\n # url(r'^cloudbay/', include('cloudbay.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n\turl(r'^admin/', include(admin.site.urls)),\n\turl(r'^$','register.views.index' ),\n\turl(r'^home/','register.views.index' ),\n\turl(r'^condor/','condor.views.ViewMachines' ),\n\turl(r'^accounts/register/','register.views.register' ),\n\turl(r'^provider_register/','provider.views.ProviderRegister'),\t\n\turl(r'^resource_register/','resources.views.ResourceRegister'),\t\n\turl(r'^bidding_details/','bidding.views.ResourceBidRegister'),\n\turl(r'^bidding/','bidding.views.BiddingIndex'),\t\n\turl(r'^machine/(?P<machineID>[^/]+)/','bidding.views.getCondorStatusByMachineName', name = \"detailsMachine\"),\t\n\turl(r'^virtual_currency/','transactions.views.BuyVirtualCurrency'),\n\turl(r'^results/','bidding.views.Bidding', name = \"online_machines\"),\n\turl( r'^site_media/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT } ),\t\n\turl(r'^login/$',login,{'redirect_field_name' : 'templates/home.html'}),\n\turl(r'^logout/$',logout),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383592705","text":"import peewee\nfrom unittest import TestCase\nimport database_config\ndatabase_config.database_path = 'database/test_arts.db'\n\nimport db_manager\nfrom model_artist import Artist\n\nclass DatabaseTests(TestCase):\n\n def setUp(self):\n database_config.database_path = 'database/test_arts.db'\n Artist.delete().execute()\n\n def test_add_artist(self): \n new_artist = Artist(name='abdala', email='abdala@gmail.com')\n new_artist.save()\n self.assertEquals(new_artist.id, db_manager.check_if_artis_exist(new_artist.name))\n\n def test_add_artist_duplicate_name(self):\n new_artist = Artist(name='abdala', email='mtn@gmail.com')\n new_artist.save()\n with self.assertRaises(peewee.IntegrityError):\n same_artist = Artist(name='abdala', author='abdala@yahoo.com')\n same_artist.save()\n \n def test_add_artist_duplicate_email(self):\n new_artist = Artist(name='abdala', email='mtn@gmail.com')\n new_artist.save()\n with self.assertRaises(peewee.IntegrityError):\n same_artist = Artist(name='jama', author='mtn@gmail.com')\n same_artist.save()\n\n def test_case_sensitivity_constraint(self):\n new_artist = Artist(name='abdala', email='mtn@gmail.com')\n new_artist.save()\n with self.assertRaises(peewee.IntegrityError):\n same_artist = Artist(name='AbDaLA', author='mtn@gmail.com')\n same_artist.save()\n","sub_path":"test_artist.py","file_name":"test_artist.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102892411","text":"import sys\n\n# Read stdin and compare to pre-calculated result of test data.\n\ndata = []\nchunk = sys.stdin.readline()\nwhile chunk:\n\tdata.append(chunk)\n\tchunk = sys.stdin.readline()\n\n# Have to sort data for assert because multi-threading doesn't preserve order\nsorted_data = ''.join(sorted(data))\n\n# Not the prettiest testing structure, but does the job as described in the problem description. \n# unittest suite would be created for production code.\nassert sorted_data == ',\\narches,utah\\nbadlands,north dakota\\ncomma park,oregon\\neverglades,miami\\ntes\\\\\\\\nting,new\\\\\\\\nline\\ntes\\\\\\\\ting,t\\\\\\\\tab\\nyosemite,california\\n', sorted_data\n","sub_path":"ht/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396788601","text":"#!/usr/bin/env python\n#-*- coding:utf8 -*-\n\nsocketio_config = {\n \"async_mode\": \"gevent_uwsgi\",\n \"message_queue\": \"redis://socketio-redis:6379/1\",\n \"engineio_logger\": True,\n \"socketio_ping_interval\": 25,\n \"socketio_ping_timeout\": 60,\n}\n\ndemo_namespace = \"/\"\n","sub_path":"src/socketio-demo/etc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397203911","text":"from floatInput import *\n\nnumbers = []\n\ndef getInputs():\n\tstupidBool = True\n\tx = 0\n\twhile stupidBool:\n\t\tnumbers.append(floatInput(\"Enter a number, Press enter to stop entering numbers.: \", empty = True))\n\t\tif numbers[x] == \"\":\n\t\t\tstupidBool = False\n\t\t\tnumbers.pop(x)\n\t\tx += 1\n\ndef count():\n\treturnValue = 0\n\tfor number in range(len(numbers)):\n\t\treturnValue += numbers[number]\n\treturnValue = returnValue/len(numbers)\n\treturn(returnValue)\n\ngetInputs()\nprint(count())\ninput(\"Press enter to exit. : \")\t","sub_path":"Math Stuff/Medelvärde.py","file_name":"Medelvärde.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511180860","text":"\"\"\"Inference script for extracting segment prototypes with SegSort.\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport math\nimport os\n\nimport network.segsort.common_utils as common_utils\nimport network.segsort.eval_utils as eval_utils\nimport numpy as np\nimport tensorflow as tf\n\nfrom seg_models.models.pspnet import pspnet_resnet101 as model\nfrom seg_models.image_reader import SegSortImageReader\nfrom tqdm import tqdm\n\n\nIMG_MEAN = np.array((122.675, 116.669, 104.008), dtype=np.float32)\n\n\ndef get_arguments():\n \"\"\"Parses all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Extracting Prototypes for Semantic Segmentation')\n parser.add_argument('--data_dir', type=str, default='',\n help='/path/to/dataset.')\n parser.add_argument('--data_list', type=str, default='',\n help='/path/to/datalist/file.')\n parser.add_argument('--input_size', type=str, default='512,512',\n help='Comma-separated string with H and W of image.')\n parser.add_argument('--strides', type=str, default='512,512',\n help='Comma-separated string with strides of H and W.')\n parser.add_argument('--num_classes', type=int, default=21,\n help='Number of classes to predict.')\n parser.add_argument('--ignore_label', type=int, default=255,\n help='Index of label to ignore.')\n parser.add_argument('--restore_from', type=str, default='',\n help='Where restore model parameters from.')\n parser.add_argument('--save_dir', type=str, default='',\n help='/path/to/save/predictions.')\n parser.add_argument('--colormap', type=str, default='',\n help='/path/to/colormap/file.')\n # SegSort parameters.\n parser.add_argument('--embedding_dim', type=int, default=32,\n help='Dimension of the feature embeddings.')\n parser.add_argument('--num_clusters', type=int, default=5,\n help='Number of kmeans clusters along each axis.')\n parser.add_argument('--kmeans_iterations', type=int, default=10,\n help='Number of kmeans iterations.')\n\n return parser.parse_args()\n\n\ndef load(saver, sess, ckpt_path):\n \"\"\"Loads the trained weights.\n \n Args:\n saver: TensorFlow saver object.\n sess: TensorFlow session.\n ckpt_path: path to checkpoint file with parameters.\n \"\"\" \n saver.restore(sess, ckpt_path)\n print('Restored model parameters from {}'.format(ckpt_path))\n\n\ndef parse_commastr(str_comma):\n \"\"\"Reads comma-sperated string.\"\"\"\n if '' == str_comma:\n return None\n else:\n a, b = map(int, str_comma.split(','))\n\n return [a,b]\n\ndef main():\n \"\"\"Creates the model and start the inference process.\"\"\"\n args = get_arguments()\n \n # Parse image processing arguments.\n input_size = parse_commastr(args.input_size)\n strides = parse_commastr(args.strides)\n assert(input_size is not None and strides is not None)\n h, w = input_size\n innet_size = (int(math.ceil(h/8)), int(math.ceil(w/8)))\n\n\n # Create queue coordinator.\n coord = tf.train.Coordinator()\n\n # Load the data reader.\n with tf.name_scope('create_inputs'):\n reader = SegSortImageReader(\n args.data_dir,\n args.data_list,\n None,\n False, # No random scale\n False, # No random mirror\n False, # No random crop, center crop instead\n args.ignore_label,\n IMG_MEAN)\n\n image = reader.image\n label = reader.label\n image_list = reader.image_list\n image_batch = tf.expand_dims(image, dim=0)\n label_batch = tf.expand_dims(label, dim=0)\n\n # Create input tensor to the Network.\n crop_image_batch = tf.placeholder(\n name='crop_image_batch',\n shape=[1,input_size[0],input_size[1],3],\n dtype=tf.float32)\n\n # Create network and output prediction.\n outputs = model(crop_image_batch,\n args.embedding_dim,\n False,\n True)\n\n # Grab variable names which should be restored from checkpoints.\n restore_var = [\n v for v in tf.global_variables() if 'crop_image_batch' not in v.name]\n \n # Output predictions.\n output = outputs[0]\n output = tf.image.resize_bilinear(\n output,\n [input_size[0], input_size[1]])\n\n # Input full-sized embedding.\n label_input = tf.placeholder(\n tf.int32, shape=[1, None, None, 1])\n embedding_input = tf.placeholder(\n tf.float32, shape=[1, None, None, args.embedding_dim]) #only 1 embedding (aka 1 image)\n embedding = common_utils.normalize_embedding(embedding_input)\n loc_feature = tf.placeholder(\n tf.float32, shape=[1, None, None, 2])\n\n # Combine embedding with location features and kmeans.\n shape = tf.shape(embedding)\n cluster_labels = common_utils.initialize_cluster_labels(\n [args.num_clusters, args.num_clusters],\n [shape[1], shape[2]])\n embedding = tf.reshape(embedding, [-1, args.embedding_dim])\n labels = tf.reshape(label_input, [-1])\n cluster_labels = tf.reshape(cluster_labels, [-1])\n location_features = tf.reshape(loc_feature, [-1, 2])\n\n # Extract prototype features and labels from embeddings.\n (prototype_features,\n prototype_labels,\n _) = eval_utils.extract_trained_prototypes(\n embedding, location_features, cluster_labels,\n args.num_clusters * args.num_clusters,\n args.kmeans_iterations, labels,\n 1, args.ignore_label,\n 'semantic')\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Create directory for saving prototypes.\n save_dir = os.path.join(args.save_dir, 'prototypes')\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n \n # Iterate over testing steps.\n with open(args.data_list, 'r') as listf:\n num_steps = len(listf.read().split('\\n'))-1\n\n\n pbar = tqdm(range(num_steps))\n for step in pbar:\n image_batch_np, label_batch_np = sess.run(\n [image_batch, label_batch])\n\n img_size = image_batch_np.shape\n padded_img_size = list(img_size) # deep copy of img_size\n\n if input_size[0] > padded_img_size[1]:\n padded_img_size[1] = input_size[0]\n if input_size[1] > padded_img_size[2]:\n padded_img_size[2] = input_size[1]\n padded_img_batch = np.zeros(padded_img_size,\n dtype=np.float32)\n img_h, img_w = img_size[1:3]\n padded_img_batch[:, :img_h, :img_w, :] = image_batch_np\n\n stride_h, stride_w = strides\n npatches_h = math.ceil(1.0*(padded_img_size[1]-input_size[0])/stride_h) + 1\n npatches_w = math.ceil(1.0*(padded_img_size[2]-input_size[1])/stride_w) + 1\n\n # Create the ending index of each patch.\n patch_indh = np.linspace(\n input_size[0], padded_img_size[1], npatches_h, dtype=np.int32)\n patch_indw = np.linspace(\n input_size[1], padded_img_size[2], npatches_w, dtype=np.int32)\n \n # Create embedding holder.\n padded_img_size[-1] = args.embedding_dim\n embedding_all_np = np.zeros(padded_img_size,\n dtype=np.float32)\n for indh in patch_indh:\n for indw in patch_indw:\n sh, eh = indh-input_size[0], indh # start & end ind of H\n sw, ew = indw-input_size[1], indw # start & end ind of W\n cropimg_batch = padded_img_batch[:, sh:eh, sw:ew, :]\n\n embedding_np = sess.run(output, feed_dict={\n crop_image_batch: cropimg_batch})\n embedding_all_np[:, sh:eh, sw:ew, :] += embedding_np\n\n embedding_all_np = embedding_all_np[:, :img_h, :img_w, :]\n loc_feature_np = common_utils.generate_location_features_np([padded_img_size[1], padded_img_size[2]])\n feed_dict = {label_input: label_batch_np,\n embedding_input: embedding_all_np,\n loc_feature: loc_feature_np}\n\n (batch_prototype_features_np,\n batch_prototype_labels_np) = sess.run(\n [prototype_features, prototype_labels],\n feed_dict=feed_dict)\n\n if step == 0:\n prototype_features_np = batch_prototype_features_np\n prototype_labels_np = batch_prototype_labels_np\n else:\n prototype_features_np = np.concatenate(\n [prototype_features_np, batch_prototype_features_np], axis=0)\n prototype_labels_np = np.concatenate(\n [prototype_labels_np,\n batch_prototype_labels_np], axis=0)\n\n\n print ('Total number of prototypes extracted: ',\n len(prototype_labels_np))\n np.save(\n tf.gfile.Open('%s/%s.npy' % (save_dir, 'prototype_features'),\n mode='w'), prototype_features_np)\n np.save(\n tf.gfile.Open('%s/%s.npy' % (save_dir, 'prototype_labels'),\n mode='w'), prototype_labels_np)\n\n\n coord.request_stop()\n coord.join(threads)\n \nif __name__ == '__main__':\n main()\n","sub_path":"pyscripts/inference/extract_prototypes.py","file_name":"extract_prototypes.py","file_ext":"py","file_size_in_byte":9198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433750232","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# made with python 2\n# pylint: disable=C1001\n# pylint: disable=C0111\n# pylint: disable=C0412\n# pylint: disable=C0301\n# pylint: disable=R0904\n\"\"\"Methods for the CommandHandler\"\"\"\nimport subprocess\nimport random\nimport os\nimport nltk\nimport logging\nfrom datetime import datetime\nfrom os import listdir\nfrom time import gmtime\nfrom os.path import isfile, join\nfrom telegram_tweet import TweetFromTelegram\nfrom special_actions import SpecialActions\nfrom almacenamiento import Almacenamiento, User, UserGroup\n\n\nclass BotActions(object):\n \"\"\"Makes actions with the bot\"\"\"\n dict_pole = {}\n dict_porro = {}\n dict_pi = {}\n data = None\n stickers = ['CAADBAADJQADuE-EEuya2udZTudYAg', 'CAADBAADLAADuE - EElvaPQABlkaHMAI', 'CAADBAADQAADuE-EEs7AEGXnB5sOAg']\n logging.basicConfig(filename=\"botActions.log\", level=logging.DEBUG)\n\n # CAADBAADJQADuE-EEuya2udZTudYAg reverted\n # CAADBAADLAADuE - EElvaPQABlkaHMAI\n # CAADBAADQAADuE-EEs7AEGXnB5sOAg\n\n @staticmethod\n def start(bot, update):\n \"\"\"Initialize the bot\"\"\"\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(text='Hola, mundo!', chat_id=chat_id)\n\n @staticmethod\n def hola(bot, update):\n \"\"\"Reply with a cordial salute\"\"\"\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id, text='Hola, {}!'.format(update.message.from_user.first_name))\n\n @staticmethod\n def macho(bot, update):\n \"\"\"Reply if you are altered\"\"\"\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_audio(chat_id=chat_id, audio=open('/home/pi/Documentos/pytel_stuff/macho.mp3', 'rb'))\n\n @staticmethod\n def send_memes(bot, update):\n # WORKING\n \"\"\"Reply with a random meme\"\"\"\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_nudes(user_id, chat_id)\n file_name = BotActions.random_file_name('/home/pi/Documentos/pytel_stuff/Memes')\n bot.send_photo(chat_id=chat_id, photo=open(file_name, 'rb'))\n\n @staticmethod\n def random_file_name(path):\n \"\"\"Search a random file inside a path\"\"\"\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f)) and f != '.DS_Store']\n lines = len(onlyfiles)\n random_file = int(round(random.random() * lines, 0))\n return path + \"/\" + onlyfiles[random_file]\n\n @staticmethod\n def ping(bot, update):\n # WORKING\n \"\"\"Reply with a pong.\"\"\"\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n bot.send_message(chat_id=update.message.chat.id, text=\"Pong!\")\n BotActions.incrementa_ping(user_id, chat_id)\n\n @staticmethod\n def id_user(bot, update):\n # WORKING\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id, text='`' + str(update.message.from_user.id) +\n '`', reply_to_message_id=update.message.message_id,\n parse_mode='Markdown')\n\n @staticmethod\n def id_chat(bot, update):\n # WORKING\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id, text='`' + str(chat_id) + '`',\n reply_to_message_id=update.message.message_id, parse_mode='Markdown')\n\n @staticmethod\n def help(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n help_text = BotActions.help_commands()\n bot.send_message(chat_id=user_id, text=help_text)\n\n @staticmethod\n def animals(bot, update):\n # WORKING\n \"\"\"Reply with a random animal image\"\"\"\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_animales(user_id, chat_id)\n file_name = BotActions.random_file_name('/home/pi/Documentos/pytel_stuff/Animals')\n bot.send_photo(chat_id=chat_id, photo=open(file_name, 'rb'))\n\n @staticmethod\n def help_commands():\n help_text = u\"/start Inicializa el bot\\n\"\n help_text += u\"/ping Comprueba si el bot está encendido\\n\"\n help_text += u\"/hola Te saluda cordialmente\\n\"\n help_text += u\"/macho Te manda un audio para que te vayas a la mierda\\n\"\n help_text += u\"/nudes Te manda un meme aleatorio de un repertorio de memes\\n\"\n help_text += u\"/animals Te manda un animal aleatorio de un repertorio de animalitos\\n\"\n help_text += u\"/id Manda el ID del usuario que ha ejecutado el comando\\n\"\n help_text += u\"/id_c Manda el ID del chat en el que se ha ejecutado el comando\\n\"\n help_text += u\"/search Manda un meme con el texto que le introduzcas\\n\"\n help_text += u\"/sad Manda un meme de sad reacts only\\n\"\n help_text += u\"/tweet @pytwe_bot manda un tweet con el texto tras el comando, ahora con soporte de utf-8\\n\"\n help_text += u\"/pole Le da la pole a aquella persona que consiga mandar el primer mensaje del día\\n\"\n help_text += u\"/porro Le da la hora porro al primero en usar el comando en la hora porro ;)\\n\"\n help_text += u\"/pi Le da la horacio pi al primero en usar el comando en la horacio pi :O\\n\"\n help_text += u\"/set_tw_acc Agrega a la base de datos un usuario de twitter con el formato @Twitter_User\\n\"\n help_text += u\"/info Te manda toda la información acerca de tu cuenta\\n\"\n help_text += u\"/twitter_acc Te manda por privado la cuenta que tienes puesta de twitter actualmente\\n\"\n help_text += u\"/comunist Te manda el mejor meme comunista actual\\n\"\n help_text += u\"/current_status Te manda la información actual de la raspberry pi\"\n help_text += u\"Además interactúa con: :), :(, botijos...\\n\"\n return help_text\n\n @staticmethod\n def tweet(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n list_id = BotActions.read_ids_from_file(\"ids.txt\")\n if update.message.from_user.id in list_id:\n to_twitter = TweetFromTelegram()\n text_to_tweet = update.message.text[7:]\n text_to_tweet = text_to_tweet.encode('utf-8')\n link = to_twitter.new_tweet(text_to_tweet)\n if link == \"error\":\n bot.send_message(chat_id=update.message.chat.id,\n text=\"Intenta no poner carácteres especiales :)\",\n reply_to_message_id=update.message.message_id)\n else:\n mensaje = \"Ya he publicado tu tweet: \" + link\n BotActions.tweet_to_log(link, update.message.from_user.first_name)\n bot.send_message(chat_id=update.message.chat.id, text=mensaje,\n reply_to_message_id=update.message.message_id)\n else:\n bot.send_message(chat_id=update.message.chat.id,\n text=\"Creo que no se te permite enviar tweets... :s\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def tweet_to_log(link, user_name):\n opened_file = open(\"tweets.log\", \"a\")\n hour = str(gmtime().tm_hour + 2)\n minute = str(gmtime().tm_min)\n secs = str(gmtime().tm_sec)\n month = str(gmtime().tm_mon)\n day = str(gmtime().tm_mday)\n year = str(gmtime().tm_year)\n log_string = hour + \":\" + minute + \":\" + secs + \" at \" + day + \"/\" + month + \"/\" + year + \": \"\n log_string += user_name + \", \" + link + \"\\n\"\n opened_file.write(log_string)\n\n @staticmethod\n def read_ids_from_file(file_name):\n opened_file = open(file_name, 'r')\n ids = []\n has_next = True\n while has_next:\n line = opened_file.readline()\n if not line:\n has_next = False\n else:\n ids.append(int(line))\n return ids\n\n @staticmethod\n def search(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n # si en el grupo hay más de un bot hay que arreglar la mención de /search@PyTel_bot\n text = update.message.text[8:]\n text = text.encode('utf-8')\n SpecialActions.create_image_search(\"meme_template_search.png\", text)\n bot.send_photo(chat_id=chat_id,\n photo=open(\"generated_meme_search.png\", 'rb'),\n reply_to_message_id=update.message.message_id)\n os.remove(\"generated_meme_search.png\")\n\n @staticmethod\n def sad_reacts(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n video = open(\"/home/pi/Documentos/pytel_stuff/sad_reactions_only.mp4\", 'rb')\n bot.send_video(chat_id=chat_id,\n reply_to_message_id=update.message.message_id,\n video=video, caption=\"sad reacts only\")\n\n @staticmethod\n def pole(bot, update):\n # Working\n current_time = update.message.date\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n if chat_id != user_id:\n if current_time.hour == 0 and (0 <= current_time.minute < 15):\n if update.message.chat.id not in BotActions.dict_pole:\n BotActions.dict_pole[update.message.chat.id] = update.message.from_user.id\n BotActions.incrementa_pole(user_id, chat_id)\n pole_text = u\"Muy bien crack has hecho la pole\"\n to_twitter = TweetFromTelegram()\n text_to_tweet = u\"¡La pole se la ha llevado \"\n text_to_tweet += BotActions.get_twitter_acc(update.message.from_user.id)\n text_to_tweet += u\" desde el grupo \"\n text_to_tweet += update.message.chat.title + \"!\"\n text_to_tweet = text_to_tweet.encode('utf-8')\n to_twitter.new_tweet(text_to_tweet)\n else:\n pole_text = u\"nice try, máquina\"\n else:\n pole_text = u\"No estás en horario de pole... :S\"\n else:\n pole_text = u\"Esta macro solo funciona en grupos\"\n bot.send_message(chat_id=update.message.chat.id,\n reply_to_message_id=update.message.message_id,\n text=pole_text)\n\n @staticmethod\n def happy(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=update.message.chat.id,\n text=\"cállate ya macho\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def not_happy(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=update.message.chat.id,\n text=\"alegra esa cara de comepollas que tienes\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def botijo_react(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=update.message.chat.id,\n text=\"like! ;)\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def hora_porro(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n current_time = update.message.date\n if chat_id != user_id:\n if current_time.hour == 4 and current_time.minute == 20:\n if update.message.chat.id not in BotActions.dict_porro:\n BotActions.dict_porro[update.message.chat.id] = update.message.from_user.id\n BotActions.incrementa_porro(user_id, chat_id)\n porro_text = u\"Vaya fiera, te has llevado la hora porro bro\"\n to_twitter = TweetFromTelegram()\n text_to_tweet = u\"¡La hora porro se la lleva \"\n text_to_tweet += BotActions.get_twitter_acc(update.message.from_user.id)\n text_to_tweet += u\" desde el grupo \"\n text_to_tweet += update.message.chat.title + \"!\"\n text_to_tweet = text_to_tweet.encode('utf-8')\n to_twitter.new_tweet(text_to_tweet)\n else:\n porro_text = u\"Ya se han llevado la hora porro ;)\"\n else:\n porro_text = u\"No estás en el horario necesario... >_<\"\n else:\n porro_text = u\"Esta macro solo funciona en grupos\"\n bot.send_message(chat_id=update.message.chat.id,\n reply_to_message_id=update.message.message_id,\n text=porro_text)\n\n @staticmethod\n def horacio_pi(bot, update):\n current_time = update.message.date\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n if chat_id != user_id:\n if current_time.hour == 3 and current_time.minute == 14:\n if update.message.chat.id not in BotActions.dict_pi:\n BotActions.dict_pi[update.message.chat.id] = update.message.from_user.id\n BotActions.incrementa_pi(user_id, chat_id)\n pi_text = u\"Te acabas de llevar la horacio pi :O\"\n to_twitter = TweetFromTelegram()\n text_to_tweet = u\"¡La hora pi se la lleva \"\n text_to_tweet += BotActions.get_twitter_acc(update.message.from_user.id)\n text_to_tweet += u\" desde el grupo \"\n text_to_tweet += update.message.chat.title + \"!\"\n text_to_tweet = text_to_tweet.encode('utf-8')\n to_twitter.new_tweet(text_to_tweet)\n else:\n pi_text = u\"Fuiste demasiado lento para la horacio pi :/\"\n else:\n pi_text = u\"Que te jodan, no estás en horario pi\"\n else:\n pi_text = u\"Esa macro solo funciona en grupos :(\"\n bot.send_message(chat_id=update.message.chat.id,\n reply_to_message_id=update.message.message_id,\n text=pi_text)\n\n @staticmethod\n def comunist_meme(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n video = open(\"/home/pi/Documentos/pytel_stuff/comunist_meme.mp4\", 'rb')\n bot.send_video(chat_id=chat_id,\n reply_to_message_id=update.message.message_id,\n video=video, caption=\"communism will prevail!\")\n\n @staticmethod\n def add_user(user_id, chat_id):\n # WORKING\n \"\"\"Add a new user into the Data Base. It also creates the communication between this class and the Data Base\"\"\"\n if BotActions.data is None:\n BotActions.data = Almacenamiento(\"/home/pi/Documentos/pytel_stuff/data.db\")\n user = User(user_id)\n if BotActions.data.obtener_usuario(user) is None:\n BotActions.data.insertar_usuario(user)\n if chat_id != user_id:\n user = UserGroup(user_id, chat_id)\n if BotActions.data.obtener_usuario_del_grupo(user) is None:\n BotActions.data.insertar_usuario_del_grupo(user)\n current_time = datetime.now()\n if not BotActions.dict_pole and ((current_time.hour == 0 and current_time.minute >= 15)\n or current_time.hour > 0):\n BotActions.dict_pole = {}\n if not BotActions.dict_pi and ((current_time.hour == 3 and current_time.minute >= 14)\n or current_time.hour > 3):\n BotActions.dict_pi = {}\n if not BotActions.dict_porro and ((current_time.hour == 4 and current_time.minute >= 20)\n or current_time.hour > 4):\n BotActions.dict_porro = {}\n\n @staticmethod\n def mensajes_callback(bot, update):\n user_id = update.message.from_user.id\n chat_id = update.message.chat.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n\n @staticmethod\n def incrementa_mensajes(user_id, chat_id):\n # WORKING\n if chat_id != user_id:\n user = UserGroup(user_id, chat_id)\n BotActions.data.aumentar_message_number(user)\n\n @staticmethod\n def incrementa_nudes(user_id, chat_id):\n user = User(user_id)\n BotActions.data.aumentar_nude_number(user)\n BotActions.incrementa_mensajes(user_id, chat_id)\n\n @staticmethod\n def incrementa_ping(user_id, chat_id):\n # Work\n user = User(user_id)\n BotActions.data.aumentar_ping_number(user)\n BotActions.incrementa_mensajes(user_id, chat_id)\n\n @staticmethod\n def incrementa_porro(user_id, chat_id):\n user = UserGroup(user_id, chat_id)\n BotActions.data.aumentar_porro_number(user)\n\n @staticmethod\n def incrementa_pole(user_id, chat_id):\n user = UserGroup(user_id, chat_id)\n BotActions.data.aumentar_pole_number(user)\n\n @staticmethod\n def incrementa_pi(user_id, chat_id):\n user = UserGroup(user_id, chat_id)\n BotActions.data.aumentar_pi_number(user)\n\n @staticmethod\n def incrementa_animales(user_id, chat_id):\n user = User(user_id)\n BotActions.data.aumentar_animal_number(user)\n BotActions.incrementa_mensajes(user_id, chat_id)\n\n @staticmethod\n def add_twitter_account(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n if chat_id != user_id:\n text = u\"Este comando solo se puede usar en un chat privado\"\n else:\n twitter_acc = update.message.text[12:]\n if not twitter_acc:\n text = u'No es un formato válido para una cuenta de twitter :('\n elif twitter_acc[0] != '@':\n text = u'No es un formato válido para una cuenta de twitter :('\n else:\n user = BotActions.get_user(user_id)\n user.twitter_user = twitter_acc\n BotActions.data.modificar_usuario(user)\n text = u'Se ha añadido la cuenta de twitter ' + twitter_acc\n bot.send_message(chat_id=chat_id,\n text=text)\n\n @staticmethod\n def get_twitter_acc(user_id):\n \"\"\"Return the twitter account from de Data Base\"\"\"\n user = BotActions.get_user(user_id)\n return user.twitter_user\n\n @staticmethod\n def get_user_group(user_id, chat_id):\n # WORK\n \"\"\"Return the User Group from the Data Base\"\"\"\n user = UserGroup(user_id, chat_id)\n user = BotActions.data.obtener_usuario_del_grupo(user)\n return user\n\n @staticmethod\n def get_user(user_id):\n # WORK\n \"\"\"Return the user from the Data Base\"\"\"\n user = User(user_id)\n user = BotActions.data.obtener_usuario(user)\n return user\n\n @staticmethod\n def get_messages(user_id, chat_id):\n # WORK\n \"\"\"Return a text with all the number of messages that sent that user\"\"\"\n user = BotActions.get_user_group(user_id, chat_id)\n mensajes = user.message_number\n message_text = \"Has enviado \" + str(mensajes) + \" mensajes!\"\n return message_text\n\n @staticmethod\n def get_pole(user_id, chat_id):\n # WORK\n \"\"\"Return a text with all the number of poles that made that user\"\"\"\n user = BotActions.get_user_group(user_id, chat_id)\n poles = user.pole_number\n pole_text = \"Has hecho \" + str(poles) + \" poles!\"\n return pole_text\n\n @staticmethod\n def get_porro(user_id, chat_id):\n # WORK\n \"\"\"Return a text with all the number of porros that made that user\"\"\"\n user = BotActions.get_user_group(user_id, chat_id)\n porros = user.porro_number\n porro_text = \"Has hecho \" + str(porros) + \" horas porro!\"\n return porro_text\n\n @staticmethod\n def get_pi(user_id, chat_id):\n # WORK\n \"\"\"Return a text with all the number of pis that made that user\"\"\"\n user = BotActions.get_user_group(user_id, chat_id)\n pi_number = user.pi_number\n pi_text = \"Has hecho \" + str(pi_number) + \" horas pi!\"\n return pi_text\n\n @staticmethod\n def info_user_group(bot, update):\n # WORKING\n \"\"\"Send a message with all the info from the user group\"\"\"\n user_id = update.message.from_user.id\n chat_id = update.message.chat.id\n if chat_id != user_id:\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n user_name = update.message.from_user.first_name + \"\\n\"\n info_text_group = BotActions.info_text(user_id, chat_id)\n info_text_personal = BotActions.info_text_personal(user_id)\n message_text = u\"Estas son las estadísticas grupales de \" + user_name + info_text_group\n message_text += u\"Estas son las estadísticas personales de \" + user_name + info_text_personal\n message_text = message_text.encode('utf-8')\n else:\n message_text = u\"Este comando solo se puede usar en un grupo :(\"\n bot.send_message(chat_id=chat_id, text=message_text)\n\n @staticmethod\n def info_text(user_id, chat_id):\n # WORKING\n info_text_group = BotActions.get_messages(user_id, chat_id) + \"\\n\"\n info_text_group += BotActions.get_pole(user_id, chat_id) + \"\\n\"\n info_text_group += BotActions.get_porro(user_id, chat_id) + \"\\n\"\n info_text_group += BotActions.get_pi(user_id, chat_id) + \"\\n\"\n return info_text_group\n\n @staticmethod\n def info_text_personal(user_id):\n # WORKING\n info_text_personal = BotActions.get_nudes(user_id) + \"\\n\"\n info_text_personal += BotActions.get_pings(user_id) + \"\\n\"\n info_text_personal += BotActions.get_animals(user_id) + \"\\n\"\n info_text_personal += BotActions.get_all_messages(user_id) + \"\\n\"\n return info_text_personal\n\n @staticmethod\n def get_nudes(user_id):\n # WORKING\n user = BotActions.get_user(user_id)\n nude_number = user.nude_number\n nudes_text = \"Has usado \" + str(nude_number) + \" el comando /nudes!\"\n return nudes_text\n\n @staticmethod\n def get_pings(user_id):\n # WORKING\n user = BotActions.get_user(user_id)\n ping_number = user.ping_number\n ping_text = \"Has usado \" + str(ping_number) + \" el comando /ping!\"\n return ping_text\n\n @staticmethod\n def get_animals(user_id):\n # WORKING\n user = BotActions.get_user(user_id)\n animal_number = user.animal_number\n animal_text = \"Has usado \" + str(animal_number) + \" el comando /animals!\"\n return animal_text\n\n @staticmethod\n def get_all_messages(user_id):\n user = BotActions.get_user(user_id)\n total_messages = BotActions.data.calcular_total_mensajes(user)\n mensaje_total = \"En total has enviado \" + str(total_messages) + \" mensajes en todos los grupos!\"\n return mensaje_total\n\n @staticmethod\n def send_twitter_acc(bot, update):\n chat_id = update.message.chat_id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n user = BotActions.get_user(user_id)\n twitter_account = user.twitter_user\n if not twitter_account:\n text = u\"No hay ninguna cuenta asociada actualmente :(\"\n else:\n text = u\"Ésta es la cuenta que tienes asociada actualmente: \" + twitter_account\n bot.send_message(chat_id=user_id, text=text)\n\n @staticmethod\n def easy_command(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id,\n text=\"que es facil\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def insulto_method(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n name = update.message.text[10:]\n insulto = BotActions.get_random_insult(\"insultos.txt\")\n bot.send_message(chat_id=chat_id,\n text=name + \" eres un \" + insulto)\n\n @staticmethod\n def get_random_insult(file_name):\n insults = BotActions.read_lines(file_name)\n lines = len(insults)\n random_pos = int(round(random.random() * lines, 0))\n return insults[random_pos][0:-1]\n\n @staticmethod\n def read_lines(file_name):\n list_ret = []\n opened_file = open(file_name, 'rb')\n has_next = True\n while has_next:\n line = opened_file.readline().lower().decode('utf-8')\n if not line:\n has_next = False\n else:\n list_ret.append(line)\n return list_ret\n\n @staticmethod\n def graciasReact(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id, text='de nada supollita', reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def when_te_pasa(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id,\n text=\"si xD\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def current_status(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id,\n text=BotActions.status_message())\n\n @staticmethod\n def status_message():\n uptime_command = subprocess.check_output([\"uptime\"])\n tokenizer = nltk.tokenize.RegexpTokenizer(r'[0-9:]+')\n tokenized_uptime = tokenizer.tokenize(uptime_command)\n actual_uptime = tokenized_uptime[1]\n\n current_temp = subprocess.check_output([\"/opt/vc/bin/vcgencmd\", \"measure_temp\"])\n current_mem = subprocess.check_output([\"free\", \"-h\"])\n current_mem = current_mem.splitlines()\n tokenizer = nltk.tokenize.RegexpTokenizer(r'[M0-9]+')\n tokenized_mem = tokenizer.tokenize(current_mem[1])\n cont = 0\n used_mem = None\n free_mem = None\n for items in tokenized_mem:\n if cont == 2:\n used_mem = items\n elif cont == 3:\n free_mem = items\n cont += 1\n message = u\"Current RPI 3 status: \\n\" + \"Used Memory: \" + str(used_mem)\n message += u\"\\nFree Memory: \" + str(free_mem) + \"\\n\" + str(current_temp) + \"\\n\"\n message += u\"Uptime: \" + str(actual_uptime) + \"\\n\"\n return message\n\n @staticmethod\n def thicc(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_photo(chat_id=chat_id,\n photo=open('/home/pi/Documentos/pytel_stuff/192.png'),\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def spain(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_photo(chat_id=chat_id,\n photo=open('/home/pi/Documentos/pytel_stuff/spainreact.jpg'),\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def cocaine(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_video(chat_id=chat_id,\n video=open('/home/pi/Documentos/pytel_stuff/cocaine.mp4'),\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def sad(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id,\n text=\"sad reacts only\",\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def reverte(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n rnd = int(round(random.random() * len(BotActions.stickers), 0)) - 1\n bot.sendSticker(chat_id=chat_id, sticker=BotActions.stickers[rnd],\n reply_to_message_id=update.message.message_id)\n\n @staticmethod\n def reverted(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_photo(chat_id=chat_id, photo=open('/home/pi/Documentos/pytel_stuff/reverted.png'))\n\n @staticmethod\n def xd_react(bot, update):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n BotActions.add_user(user_id, chat_id)\n BotActions.incrementa_mensajes(user_id, chat_id)\n bot.send_message(chat_id=chat_id, text=\"XD lol\")\n\n #\n # TODO\n # @staticmethod\n # def habeces(bot, update):\n # pass\n # @staticmethod\n # def gracias(bot, update):\n # pass\n","sub_path":"bot_actions.py","file_name":"bot_actions.py","file_ext":"py","file_size_in_byte":32643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"289103369","text":"import os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport argparse\nimport torch\nimport tqdm\nimport segmentation_models_pytorch as smp\nfrom torch.utils.data import DataLoader, Dataset\nimport cv2\nfrom shapely.wkt import loads as wkt_loads\nimport shapely.wkt\nimport rasterio\nimport shapely\nfrom rasterio import features\nimport shapely.geometry\nimport shapely.affinity\nfrom scipy import ndimage\nimport shutil\nimport gc\n\nsys.path.append('.')\nimport solaris as sol\nfrom albumentations.pytorch.transforms import ToTensor\nfrom albumentations import (\n Compose,\n Normalize\n)\n\n\ndef mask2box(mask):\n y1, y2, x1, x2 = np.where(mask == 1)[0].min(), np.where(mask == 1)[0].max(), np.where(mask == 1)[1].min(), \\\n np.where(mask == 1)[1].max()\n return y1, y2, x1, x2\n\n\ndef mask2box_xminyminxmaxymax(mask):\n y1, y2, x1, x2 = np.where(mask == 1)[0].min(), np.where(mask == 1)[0].max(), np.where(mask == 1)[1].min(), \\\n np.where(mask == 1)[1].max()\n return x1, y1, x2, y2\n\n\ndef colormask2boxes(mask):\n \"\"\"\n Args:\n mask: [height,width], mask values, integers 0-255, 0=background\n Returns:\n list of bboxes (bbox is a list of 4 numbers, [xmin, ymin, xmax, ymax])\n \"\"\"\n boxes = []\n if mask.sum() > 0:\n # for i in range(1,len(np.unique(mask))):\n for i in [x for x in np.unique(mask) if x not in [0]]:\n x1y1x2y2 = mask2box_xminyminxmaxymax(mask == i)\n boxes.append([x1y1x2y2[0], x1y1x2y2[1], x1y1x2y2[2], x1y1x2y2[3]])\n return boxes\n\n\nsigmoid = lambda x: 1 / (1 + np.exp(-x))\n\n\n# def denormalize(x_batch):\n# #x_batch of shape batch_size,channels,height,width\n# x_batch2=x_batch.numpy().copy()\n# mean=[0.485, 0.456, 0.406]\n# std=[0.229, 0.224, 0.225]\n# for i in range(3):\n# x_batch2[:,i,...] = x_batch2[:,i,...]*std[i] + mean[i]\n# return (np.round(x_batch2*255)).astype('uint8')\n\ndef multimask2mask3d(multimask):\n num_buildings = len(np.unique(multimask))\n if num_buildings > 1:\n mask3d = np.zeros((multimask.shape[0], multimask.shape[1], num_buildings - 1))\n for i in range(1, num_buildings):\n mask3d[..., i - 1][multimask[..., 0] == i] = 1\n else:\n mask3d = np.zeros((multimask.shape[0], multimask.shape[1], 1))\n return (mask3d)\n\n\ndef multimask2mask3d_v2(multimask):\n num_buildings = len(np.unique(multimask))\n if multimask.sum() > 0:\n mask3d = np.zeros((multimask.shape[0], multimask.shape[1], num_buildings - 1))\n # for i in range(1,num_buildings):\n for i in [x for x in np.unique(multimask) if x not in [0]]:\n mask3d[..., i - 1][multimask[..., 0] == i] = 1\n else:\n mask3d = np.zeros((multimask.shape[0], multimask.shape[1], 1))\n return (mask3d)\n\n\ndef multimask2mask3d_v3(multimask):\n num_buildings = len(np.unique(multimask))\n if multimask.sum() > 0:\n mask3d = np.zeros((multimask.shape[0], multimask.shape[1], num_buildings - 1))\n # for i in range(1,num_buildings):\n counter = 0\n for i in [x for x in np.unique(multimask) if x not in [0]]:\n mask3d[..., counter][multimask == i] = 1\n counter += 1\n else:\n mask3d = np.zeros((multimask.shape[0], multimask.shape[1], 1))\n return (mask3d.astype('uint8'))\n\n\ndef mask2buildings(mask):\n maskC = mask.copy()\n maskC_output = np.zeros_like(maskC) # .astype('int32')\n contours, hierarchy = cv2.findContours(maskC, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for i in range(len(contours)):\n cnt = contours[i]\n maskC_output += (cv2.drawContours(maskC, [cnt], -1, 1, cv2.FILLED) > 127.5).astype('uint8')\n uns = np.unique(maskC_output).copy()\n for ii in range(len(uns)):\n maskC_output[maskC_output == uns[ii]] = ii\n\n return maskC_output\n\n\ndef masks2masknum_v2(masks):\n outmask = np.zeros(masks.shape[1:])\n add = masks.shape[0]\n for m in range(len(masks)):\n outmask += masks[m] * (m + 1 + add)\n un_masks = np.unique(outmask)\n for mm in range(len(un_masks)):\n outmask[outmask == un_masks[mm]] = mm\n return outmask # .astype('uint8')\n\n\ndef masks2masknum(masks):\n outmask = np.zeros(masks.shape[1:])\n for m in range(len(masks)):\n outmask += masks[m] * (m + 1)\n return outmask\n\n\ndef mask_to_polygon(mask):\n all_polygons = []\n lens=[]\n for shape, value in features.shapes(mask.astype(np.int16), mask=(mask == 1), transform=rasterio.Affine(1.0, 0, 0, 0, 1.0, 0)):\n# print(value)\n# print(len(shape['coordinates'][0]))\n all_polygons.append(shapely.geometry.shape(shape))\n lens.append(len(shape['coordinates'][0]))\n# print(np.argmax(lens))\n all_polygons = shapely.geometry.Polygon(all_polygons[np.argmax(lens)])\n if not all_polygons.is_valid:\n all_polygons = all_polygons.buffer(0)\n # Sometimes buffer() converts a simple Multipolygon to just a Polygon,\n # need to keep it a Multi throughout\n# if all_polygons.type == 'Polygon':\n# all_polygons = shapely.geometry.MultiPolygon([all_polygons])\n return all_polygons\n\ndef _convert_coordinates_to_raster(coords, img_size, xymax):\n x_max, y_max = xymax\n height, width = img_size\n W1 = 1.0 * width * width / (width + 1)\n H1 = 1.0 * height * height / (height + 1)\n xf = W1 / x_max\n yf = H1 / y_max\n coords[:, 1] *= yf\n coords[:, 0] *= xf\n coords_int = np.round(coords).astype(np.int32)\n return coords_int\n\n\ndef _plot_mask_from_contours(raster_img_size, contours, class_value=1):\n img_mask = np.zeros(raster_img_size, np.int8)\n if contours is None:\n return img_mask\n perim_list, interior_list = contours\n# print(interior_list)\n cv2.fillPoly(img_mask, perim_list, class_value)\n# img_mask[np.array(list(proposalcsv.PolygonWKT_Pix.values[-1].exterior.coords)).astype(int)]=0\n cv2.fillPoly(img_mask, interior_list, 0)\n return img_mask\n\ndef _get_and_convert_contours(onepolygon, raster_img_size, xymax):\n perim_list = []\n interior_list = []\n# if onepolygon is None:\n# return None\n# for k in range(len(onepolygon)):\n poly = onepolygon\n# for ppp in poly.interiors:\n# print(ppp)\n perim = np.array(list(poly.exterior.coords))\n perim_c = _convert_coordinates_to_raster(perim, raster_img_size, xymax)\n perim_list.append(perim_c)\n for pi in poly.interiors:\n interior = np.array(list(pi.coords))\n interior_c = _convert_coordinates_to_raster(interior, raster_img_size, xymax)\n interior_list.append(interior_c)\n return perim_list, interior_list\n\ndef polygon2mask(polygon, width, height):\n xymax = (900,900)\n\n mask = np.zeros(( width, height))\n\n# for i, p in enumerate(polygons):\n i=0\n polygon_list = wkt_loads(str(polygon))\n# if polygon_list.length == 0:\n# continue\n contours = _get_and_convert_contours(polygon_list, (width, height), xymax)\n mask = _plot_mask_from_contours((width, height), contours, 1)\n return mask\n\n\ndef read_jpg(tile_id, data_folder='test_path'):\n image_name = tile_id\n image_path = data_folder + image_name\n img = cv2.imread(image_path)\n return img\n\n\ndef jpg_to_tensor(img, transforms, preprocessing=None):\n augmented = transforms(image=img)\n img = augmented['image']\n if preprocessing is not None:\n preprocessed = preprocessing(image=img, mask=np.zeros_like(img).astype('uint8'))\n img = preprocessed['image']\n return img\n\n\ndef patch_left_right_fixed(im1, mask1, im2, mask2):\n r = 0.5\n mid = max(2, int(im1.shape[0] * r))\n img_new = np.zeros_like(im1)\n img_new[:, :mid, :] = im1[:, -mid:, :]\n img_new[:, mid:, :] = im2[:, :-mid, :]\n mask_new = np.zeros_like(mask1)\n mask_new[:, :mid] = mask1[:, -mid:]\n mask_new[:, mid:] = mask2[:, :-mid]\n\n return img_new, mask_new\n\n\ndef patch_top_down_fixed(im1, mask1, im2, mask2):\n r = 0.5\n mid = max(2, int(im1.shape[0] * r))\n img_new = np.zeros_like(im1)\n img_new[:mid, :, :] = im1[-mid:, :, :]\n img_new[mid:, :, :] = im2[:-mid, :, :]\n mask_new = np.zeros_like(mask1)\n mask_new[:mid, :] = mask1[-mid:, :]\n mask_new[mid:, :] = mask2[:-mid, :]\n\n return img_new, mask_new\n\n\nclass BuildingsDatasetInferenceCombined(Dataset):\n def __init__(self, img_ids: np.array = None, combImages=None,\n transforms=None,\n preprocessing=None):\n self.combImages = combImages\n self.img_ids = img_ids\n self.transforms = transforms\n self.preprocessing = preprocessing\n\n def __getitem__(self, idx):\n img = self.combImages[idx]\n\n augmented = self.transforms(image=img, mask=np.zeros_like(img).astype('uint8'))\n img = augmented['image']\n mask = augmented['mask']\n if self.preprocessing:\n preprocessed = self.preprocessing(image=img, mask=np.zeros_like(img).astype('uint8'))\n img = preprocessed['image']\n mask = preprocessed['mask']\n\n return img, mask\n\n def __len__(self):\n return len(self.img_ids)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='SpaceNet 6 Baseline Algorithm')\n parser.add_argument('--testdata',\n help='BaseDir')\n parser.add_argument('--outputfile',\n help='Output directory')\n\n\n args = parser.parse_args(sys.argv[1:])\n\n print ('torch', torch.__version__)\n print ('gpd', gpd.__version__)\n print (\"solaris\", sol.__version__)\n\n test_data_path = args.testdata\n output_file = args.outputfile\n spacenet_out_dir = os.path.join(os.path.curdir, 'data/')\n\n spacenet_test_sar_path = os.path.join(test_data_path , 'SAR-Intensity/')\n\n print ('Base dir :', spacenet_test_sar_path)\n print ('Output dir :', spacenet_out_dir)\n\n #\n # Copy orientation to output as well...\n orientation_file = os.path.join('./', 'SAR_orientations.txt')\n if os.path.exists(orientation_file):\n print('SAR_orientations.txt exists')\n else:\n print ('FATAL SAR_orientations.txt missing')\n exit(1)\n\n import datagen\n from nasiosdataprocess import createdir, write_test_images\n\n #\n # 0. Nasios pipeline data prep\n test_save_path = os.path.join(spacenet_out_dir, 'test_sar_productscale_orient/')\n\n spacenet_test_sar = os.listdir(spacenet_test_sar_path)\n spacenet_test_sar = np.sort(spacenet_test_sar)\n orient_df = pd.read_csv(orientation_file, header=None, sep=\" \")\n orient_df.columns = ['date', 'orient']\n testtifsdates = [('_').join(x.split('-')[1][10:].split('_')[:2]) for x in spacenet_test_sar]\n mines = np.load('productminesAllBoth.npy')\n maxes = np.load('productmaxesAllBoth.npy')\n\n if not os.path.exists(test_save_path):\n createdir(test_save_path)\n write_test_images(orient_df, test_save_path, spacenet_test_sar_path, testtifsdates, mines, maxes)\n tmp = os.listdir(test_save_path)\n print('nasios test images created', len(tmp))\n else:\n tmp = os.listdir(test_save_path)\n print('nasios test images exist', len(tmp) )\n shutil.rmtree(test_save_path)\n createdir(test_save_path)\n write_test_images(orient_df, test_save_path, spacenet_test_sar_path, testtifsdates, mines, maxes)\n tmp = os.listdir(test_save_path)\n print('nasios test images created', len(tmp))\n\n #\n # 2. Test on experiments\n from experiments import infer_one, create_model_optimizer\n from experiments import experiments as exps_vog\n from nasios import experiments1 as exp_nas1\n from nasios import experiments2 as exp_nas2\n exp_nas = exp_nas1 + exp_nas2\n\n from nasios import BuildingsDatasetBorders, get_preprocessing, get_validation_augmentation\n\n test_ids = [x[:-4] for x in os.listdir(test_save_path)]\n test_tiles = ['_'.join(x.split('_')[-4:-1]) for x in test_ids]\n test_tiles_nums = [int(x.split('_')[-1]) for x in test_ids]\n sortorder = np.argsort(test_tiles_nums)\n test_ids = list((np.array(test_ids)[sortorder]))\n\n\n test_ids2 = []\n for untile in np.unique(test_tiles):\n test_images_part = [x for x in test_ids if untile in x]\n test_ids2.extend(test_images_part)\n test_ids = test_ids2[:]\n test_ids_jpg = [x + '.jpg' for x in test_ids]\n\n test_ids_vog = ['_'.join(f.split('_')[-4:]) for f in test_ids]\n pream = '_'.join(test_ids[0].split('_')[:-4]) + '_'\n print('pream', pream)\n\n test_tiles_nums_nums = []\n for untile in np.unique(test_tiles):\n num = 0\n test_images_part = [x for x in test_ids if untile in x]\n test_tiles_nums = np.array([int(x.split('_')[-1]) for x in test_images_part])\n test_tiles_nums2 = ((test_tiles_nums - test_tiles_nums[0]) / 2).astype('int')\n test_tiles_nums_nums.extend(list(test_tiles_nums2))\n\n #pd.DataFrame(test_ids).to_csv(\"test_ids.csv\")\n #pd.DataFrame(test_tiles_nums).to_csv(\"test_tiles_nums.csv\")\n #pd.DataFrame(test_tiles_nums_nums).to_csv(\"test_tiles_nums_nums.csv\")\n\n #\n # Accumulate all preds here\n final_preds = np.zeros((len(test_ids), 900, 900), dtype='float32')\n final_w = 0\n final_preds_borders = np.zeros((len(test_ids), 900, 900), dtype='float32')\n final_w_borders = 0\n\n\n test_df = pd.DataFrame({'ImageId': test_ids_vog, 'FullImageId':test_ids_jpg})\n test_df['date'] = test_df.apply(lambda row: row.ImageId.split(\"_\")[0] + \"_\" + row.ImageId.split(\"_\")[1], axis=1)\n test_df['tile'] = test_df.apply(lambda row: row.ImageId.split(\"_\")[-1], axis=1)\n\n orient_df = pd.read_csv(spacenet_out_dir + '/SAR_orientations.txt', header=None, sep=\" \")\n orient_df.columns = ['date', 'orient']\n test_df = pd.merge(test_df, orient_df, on='date')\n\n # Create pairs\n date_id = np.sort(np.unique(test_df.date.values))\n len(date_id)\n df_grp = test_df.groupby('date')\n pairs_D = {}\n pairs_L = {}\n for dat in tqdm.tqdm(date_id):\n df = df_grp.get_group(dat)\n\n for im1, orient1, tile_id1 in zip(df.FullImageId, df.orient, df.tile):\n my_ud = []\n my_lf = []\n for im2, orient2, tile_id2 in zip(df.FullImageId, df.orient, df.tile):\n\n if orient1 == 1:\n if (int(tile_id1) % 2 == 1) and (int(tile_id1) == int(tile_id2) - 1):\n my_ud.append(im2)\n if True and (int(tile_id1) == int(tile_id2) - 2):\n # if (int(tile_id1)%2 == 1) and (int(tile_id1) == int(tile_id2)-2):\n my_lf.append(im2)\n elif orient1 == 0:\n if (int(tile_id1) % 2 == 1) and (int(tile_id1) == int(tile_id2) - 1):\n my_ud.append(im2)\n if True and (int(tile_id1) == int(tile_id2) - 2):\n # if (int(tile_id1)%2 == 1) and (int(tile_id1) == int(tile_id2)-2):\n my_lf.append(im2)\n\n pairs_D[im1] = my_ud\n pairs_L[im1] = my_lf\n\n # Calculate pair stats\n ico = 0\n for key in pairs_D.keys():\n if len(pairs_D[key]) > 0:\n ico = ico + 1\n print('Pairs found ', ico / len(pairs_D.keys()), ico, len(pairs_D.keys()))\n ico = 0\n for key in pairs_L.keys():\n if len(pairs_L[key]) > 0:\n ico = ico + 1\n print('Pairs found ',ico / len(pairs_L.keys()), ico, len(pairs_L.keys()))\n #\n # Create map from tile_id to position\n DD = dict(zip(test_ids, range(len(test_ids))))\n\n #\n # Create new images from left-right half/half\n ico = 0\n for key in pairs_L.keys():\n if len(pairs_L[key]) > 0:\n ico += 1\n print('keys LR', ico)\n\n if ico > 0:\n combImages = np.zeros((ico, 900, 900, 3), dtype='uint8')\n else:\n combImages = np.zeros((1, 900, 900, 3), dtype='uint8')\n\n counter = 0\n orients = []\n testid1testid2 = []\n for k in tqdm.tqdm(pairs_L.keys()):\n right = pairs_L[k]\n if len(right) > 0:\n key_L = k\n key_R = right[0]\n date_id = key_L.split('_')[0] + '_' + key_L.split('_')[1]\n\n orient_L = orient_df.orient.loc[orient_df.date == ('_').join(key_L.split('_')[-4:][:2])].values[0]\n orient_R = orient_df.orient.loc[orient_df.date == ('_').join(key_R.split('_')[-4:][:2])].values[0]\n sar_L = read_jpg(key_L, data_folder=test_save_path)\n sar_R = read_jpg(key_R, data_folder=test_save_path)\n\n assert(orient_L==orient_R)\n if orient_L == 0:\n sar_final, _ = patch_left_right_fixed(sar_L, sar_L, sar_R, sar_R)\n else:\n sar_final, _ = patch_left_right_fixed(sar_R, sar_R, sar_L, sar_L)\n combImages[counter] = sar_final\n orients.append(orient_L)\n testid1testid2.append(key_L[:-4] + '--' + key_R[:-4])\n counter += 1\n\n #\n # 70/30 weight of new inference on the half of the image\n combW = 0.3\n origW = 1 - combW\n\n #\n # Accumulate all seconday preds here\n if len(testid1testid2) > 0:\n print ('we have ', len(testid1testid2), 'cases')\n final_preds_2 = np.zeros((len(testid1testid2), 900, 900), dtype='float32')\n final_w_2 = 0\n final_preds_borders_2 = np.zeros((len(testid1testid2), 900, 900), dtype='float32')\n final_w_borders_2 = 0\n\n # ########################################\n # ### nasios\n # ########################################\n print(\"-------------- nas inference ---------------- \")\n\n firsttime = True\n total_w = 0\n\n for exp in exp_nas:\n out_dir = os.path.join(spacenet_out_dir, exp['exp_id'])\n for i in range(len(exp['sizes'])):\n sz = exp['sizes'][i]\n w = exp['weights'][i]\n\n model_file = out_dir + '/' + str(exp['exp_id']) + '_' + str(exp['sizes'][i]) + '_' + str(\n exp['mode'][i]) + '.pth'\n flag = os.path.exists(model_file)\n print(exp['exp_id'] + ' ' + str(sz) + ' ' + str(w), '-> ', model_file), flag\n\n if flag and w > 0:\n ACTIVATION = 'sigmoid' # None#'sigmoid'\n ENCODER = exp['encoder']\n ENCODER_WEIGHTS = 'imagenet'\n DEVICE = 'cuda'\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)\n\n model = smp.Unet(\n encoder_name=ENCODER,\n encoder_weights=ENCODER_WEIGHTS,\n classes=2,\n activation=ACTIVATION,\n decoder_attention_type='scse'\n )\n\n model.cuda()\n num_workers = 4\n bs = 3\n test_dataset = BuildingsDatasetBorders(datatype='test', data_folder=test_save_path, img_ids=test_ids,\n transforms=get_validation_augmentation(sz),\n preprocessing=get_preprocessing(preprocessing_fn))\n test_loader = DataLoader(test_dataset, batch_size=bs, shuffle=False, num_workers=num_workers)\n\n model.load_state_dict(torch.load(model_file))\n model.eval()\n\n test_preds = np.zeros((len(test_ids), 900, 900), dtype='float32')\n test_preds_borders = np.zeros((len(test_ids), 900, 900), dtype='float32')\n for i, (x_batch, y_batch) in enumerate(tqdm.tqdm(test_loader)):\n preds = model(x_batch.cuda()).detach().cpu()\n mask = preds[:, 0, ...]\n borders = preds[:, 1, ...]\n for j in range(len(preds)):\n test_preds[i * bs + j, ...] = w * cv2.resize((mask[j, ...]).numpy().astype('float32'),\n (900, 900))\n test_preds_borders[i * bs + j, ...] = w * cv2.resize(\n (borders[j, ...]).numpy().astype('float32'), (900, 900))\n\n final_w = final_w + w\n final_w_borders = final_w_borders + w\n final_preds += test_preds\n final_preds_borders += test_preds_borders\n\n if len(testid1testid2) > 0:\n test_dataset_2 = BuildingsDatasetInferenceCombined(combImages=combImages,\n img_ids=testid1testid2,\n transforms=get_validation_augmentation(sz),\n preprocessing=get_preprocessing(preprocessing_fn))\n test_loader_2 = DataLoader(test_dataset_2, batch_size=bs, shuffle=False, num_workers=num_workers)\n test_preds_2 = np.zeros((len(testid1testid2), 900, 900), dtype='float32')\n test_preds_borders_2 = np.zeros((len(testid1testid2), 900, 900), dtype='float32')\n for i, (x_batch, y_batch) in enumerate(tqdm.tqdm(test_loader_2)):\n preds = model(x_batch.cuda()).detach().cpu()\n mask = preds[:, 0, ...]\n borders = preds[:, 1, ...]\n for j in range(len(preds)):\n test_preds_2[i * bs + j, ...] = w * cv2.resize((mask[j, ...]).numpy().astype('float32'), (900, 900))\n test_preds_borders_2[i * bs + j, ...] = w * cv2.resize((borders[j, ...]).numpy().astype('float32'), (900, 900))\n\n final_w_2 = final_w_2 + w\n final_w_borders_2 = final_w_borders_2 + w\n final_preds_2 += test_preds_2\n final_preds_borders_2 += test_preds_borders_2\n\n del test_preds_2\n del test_preds_borders_2\n del test_dataset_2\n del test_loader_2\n gc.collect()\n else:\n if w == 0:\n print('skipping model: due to weight')\n else:\n print('skipping model: file not found')\n\n # merge preds\n if len(testid1testid2) > 0:\n print('new cases applied', len(testid1testid2), len(final_preds_2), len(orients))\n for i in tqdm.tqdm(range(len(final_preds_2))):\n num1 = DD[testid1testid2[i].split('--')[0]]\n num2 = DD[testid1testid2[i].split('--')[1]]\n\n if orients[i] == 1:\n final_preds[num1, :, :450] = origW * final_preds[num1, :, :450] + combW * final_preds_2[i, :, 450:]\n final_preds_borders[num1, :, :450] = origW * final_preds_borders[num1, :, :450] + combW * final_preds_borders_2[\n i, :, 450:]\n\n final_preds[num2, :, 450:] = origW * final_preds[num2, :, 450:] + combW * final_preds_2[i, :, :450]\n final_preds_borders[num2, :, 450:] = origW * final_preds_borders[num2, :, 450:] + combW * final_preds_borders_2[\n i, :, :450]\n\n else:\n final_preds[num1, :, 450:] = origW * final_preds[num1, :, 450:] + combW * final_preds_2[i, :, :450]\n final_preds_borders[num1, :, 450:] = origW * final_preds_borders[num1, :, 450:] + combW * final_preds_borders_2[\n i, :, :450]\n\n final_preds[num2, :, :450] = origW * final_preds[num2, :, :450] + combW * final_preds_2[i, :, 450:]\n final_preds_borders[num2, :, :450] = origW * final_preds_borders[num2, :, :450] + combW * final_preds_borders_2[\n i, :, 450:]\n del final_preds_2\n del final_preds_borders_2\n gc.collect()\n ########################################\n ### voglis\n ########################################\n print(\"-------------- vog inference ---------------- \")\n\n\n for exp in exps_vog:\n print (exp['exp_id'], exp['sar']['train']['mean'], exp['sar']['train']['std'])\n\n transform_infer = Compose(\n [Normalize(mean=exp['sar']['train']['mean'],\n std=exp['sar']['train']['std']),\n ToTensor()\n ])\n\n inference_dataset = datagen.SpaceNetSAR2RGBSteroidsInference(test_df, transformers=transform_infer, sar_base_path=spacenet_test_sar_path,\n orientation=True, return_orientation=True,\n return_labels=True, sar_preampl=pream,\n scale_max=True, lee=0)\n inference_loader = DataLoader(inference_dataset, batch_size=exp['sar']['train']['batch_size'], shuffle=False, num_workers=6)\n ENCODER = exp['sar']['model']['encoder']\n models = []\n for f in range(exp['nfolds']):\n print('************************************')\n print('*********** ' + str(f) + '***************')\n print('************************************')\n model, preprocessing_fn, optimizer, train_epoch, valid_epoch = create_model_optimizer(\n smp_model=exp['sar']['model']['model_type'],\n encoder=exp['sar']['model']['encoder'],\n activation=exp['sar']['model']['activation'],\n init_weights=None,\n init_lr=exp['sar']['train']['init_lr'],\n loss_type=exp['sar']['train']['loss'])\n\n model_dir = exp['sar']['id'] + '_' + str(f) + '/' + exp['sar']['id'] + '_' + str(f) + '.pth'\n MODEL_FILE = spacenet_out_dir + \"/\" + model_dir\n print (MODEL_FILE)\n flag = os.path.exists(MODEL_FILE)\n if flag:\n print('(2) weight exists')\n if os.path.exists(MODEL_FILE):\n print(MODEL_FILE)\n model.load_state_dict(torch.load(MODEL_FILE))\n model.eval();\n models.append(model)\n\n dl = inference_loader\n\n PREDS = []\n with torch.no_grad():\n for (x_batch, tile_batch, orient_batch) in tqdm.tqdm(dl):\n for i in range(len(x_batch)):\n preds = np.zeros((900, 900))\n for m in models:\n pred = infer_one(m, x_batch[i, ...], tile_size=(512, 512), tile_step=(224, 224), weight='pyramid')\n preds = preds + pred\n preds = preds / len(models)\n\n PREDS.append(preds.astype('float16'))\n PREDS = np.array(PREDS)\n\n #np.saved_compressed('PREDS_' + exp['exp_id'], a=PREDS)\n\n final_preds = final_preds + exp['weight'] * PREDS\n final_w = final_w + exp['weight']\n\n\n\n\n final_preds = final_preds / final_w\n final_preds_borders = final_preds_borders / final_w_borders\n\n final_preds = final_preds - final_preds_borders\n\n print('Final weights: ', final_w)\n print('Final border weights: ', final_w_borders)\n\n\n firstfile = True\n tile_ids = []\n min_mask_size = 160\n ocounter = 0\n kernel = np.ones((5, 5), np.uint8)\n counter2 = 0\n\n for imn in tqdm.tqdm(test_ids):\n # for imn in tqdm.tqdm_notebook(os.listdir('/var/data/spacenet/detectron/test_sar_productscale_orient')):\n # THRESH=0.5\n THRESH = 0.45 - test_tiles_nums_nums[counter2] / 32\n DIFFTHRESH = 0.15\n A = 0.4\n\n orientvar = orient_df.orient.loc[orient_df.date == ('_').join(imn.split('-')[1][10:-4].split('_')[:2])].values[0]\n tile_id = imn.split('-')[1][10:]\n #print (orientvar, tile_id, ('_').join(imn.split('-')[1][10:-4].split('_')[:2]))\n\n imgl = ndimage.label((final_preds[counter2] > THRESH).astype('uint16'))[0]\n\n pred_masks = multimask2mask3d_v3(imgl)\n pred_masks = np.rollaxis(pred_masks, 2, 0)\n\n # if len(pred_masks)>0:\n if pred_masks.sum() > 0:\n tempscores = pred_masks.sum((1, 2)) # .shape\n if len(tempscores[tempscores > min_mask_size]) > 0:\n pred_masks = pred_masks[tempscores > min_mask_size]\n\n else:\n if len(pred_masks.shape) == 2:\n pred_masks = np.expand_dims(pred_masks, 0)\n\n\n if pred_masks.sum() > 0:\n for g in range(pred_masks.shape[0]):\n pred_masks[g, ...] = cv2.dilate(pred_masks[g, ...], kernel, iterations=1)\n\n # remove masks of low probability\n keepmask = []\n for g in range(pred_masks.shape[0]):\n maskprob = pred_masks[g, ...] * final_preds[counter2]\n # if maskprob.mean()>0.55:\n # keepmask.append(g)\n if (maskprob > THRESH + DIFFTHRESH).sum() > A * (maskprob > THRESH).sum():\n keepmask.append(g)\n keepmask = np.array(keepmask)\n if len(keepmask) > 0:\n pred_masks = pred_masks[keepmask, ...]\n\n sortpreds = np.argsort(pred_masks.sum((1, 2)))[::-1]\n pred_masks = pred_masks[sortpreds, ...]\n\n counter = 0\n for mi in range(pred_masks.shape[0]):\n\n m = pred_masks[mi, ...]\n if orientvar == 1:\n m = np.fliplr(np.flipud(m))\n\n\n if m.sum() > 0:\n vectordata = mask_to_polygon(m)\n\n csvaddition = pd.DataFrame({'ImageId': tile_id,\n 'BuildingId': 0,\n 'PolygonWKT_Pix': vectordata,\n 'Confidence': 1\n }, index=[ocounter])\n # csvaddition.to_csv('/home/voglis/SpaceNet6/tmp_proposal.csv', index=False)\n\n tile_ids.append(tile_id)\n if firstfile:\n proposalcsv = csvaddition\n firstfile = False\n else:\n proposalcsv = proposalcsv.append(csvaddition)\n\n counter += 1\n ocounter += 1\n\n else:\n csvaddition = pd.DataFrame({'ImageId': tile_id,\n 'BuildingId': 0,\n 'PolygonWKT_Pix': ['POLYGON EMPTY'],\n 'Confidence': 1.\n }, index=[ocounter])\n\n if firstfile:\n proposalcsv = csvaddition\n firstfile = False\n else:\n proposalcsv = proposalcsv.append(csvaddition)\n counter += 1\n ocounter += 1\n\n\n else:\n csvaddition = pd.DataFrame({'ImageId': tile_id,\n 'BuildingId': 0,\n 'PolygonWKT_Pix': ['POLYGON EMPTY'],\n 'Confidence': 1.\n }, index=[ocounter])\n\n if firstfile:\n proposalcsv = csvaddition\n firstfile = False\n else:\n proposalcsv = proposalcsv.append(csvaddition)\n counter += 1\n ocounter += 1\n\n counter2 += 1\n\n proposalcsv.loc[:, ['ImageId', 'PolygonWKT_Pix', 'Confidence']].to_csv(\n output_file, index=False)\n","sub_path":"3-SatShipAI/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":31916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"237668","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\nfrom item import LightSource\n\n\nclass Player:\n def __init__(self, current_room):\n self.current_room = current_room\n self.inventory = []\n self.has_lightsource = False\n self.hit_points = 100\n self.attack = 10\n self.is_alive = True\n\n def take_item(self, item):\n if isinstance(item, LightSource):\n self.has_lightsource = True\n self.inventory.append(item)\n\n def drop_item(self, item):\n if isinstance(item, LightSource):\n self.has_lightsource = False\n self.inventory.remove(item)\n\n def show_inventory(self):\n print(\"You have the following items in your inventory:\")\n for item in self.inventory:\n print(f\"{item.name}: {item.description}\")\n\n def move_to(self, room):\n if self.current_room != room:\n self.current_room = room\n if self.current_room.is_light or self.has_lightsource:\n print(f\"You have now entered: {room.name}, {room.description}\")\n else:\n print(\"You've changed location, but it's pitch black!\")\n\n def attack_monster(self, monster):\n monster.on_attack(self.attack)\n if monster.is_alive:\n self.hit_points -= monster.attack\n if self.hit_points <= 0:\n self.is_alive = False\n print(f\"{monster.name} attacked you! Your health: {self.hit_points}\")\n","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167744550","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"This module implements bet action for game with 'consecutive' paymode.\r\n\r\n\"\"\"\r\n\r\n\r\nimport tornado.ioloop\r\nfrom betbasehandler import BetBaseHandler\r\nfrom translation import _T\r\nimport machineconsecutive as machine\r\nimport initgame\r\nimport redisutils\r\nfrom redis import WatchError\r\nimport logger\r\nimport random\r\nimport time\r\n\r\n\r\nCASH_SPIN = 'cash'\r\nFREE_SPIN = 'free'\r\n\r\n\r\nBIG_WIN = 1\r\nJACKPOT = 2\r\nNORMAL = 0 \r\n\r\nclass BetConsecutiveHandler(BetBaseHandler):\r\n \"\"\"Bet of consecutive slot game handler class.\r\n\r\n This class implements HTTP GET request '/betconsecutive'.\r\n The query for this handler are:\r\n \r\n - id: user-id (facebook id)\r\n - cpb: coin per bet (the number of coins for one spin)\r\n - cookie: cookie for user\r\n - gid: id of slot game\r\n\r\n Examples::\r\n\r\n /betconsecutive?id=10157565441830527&cpb=100&cookie=222594957adadc7f03dd2caae783c2c8&gid=xmas\r\n\r\n \"\"\"\r\n\r\n\r\n def _get_money(self, room_coin, rate, win_type, game):\r\n if win_type in ['freespins','lucky']:\r\n return int(rate)\r\n return int(rate * room_coin)\r\n\r\n\r\n def _incr_game_banker(self, spin_type, room_coin, game):\r\n \"\"\" Increase jackpot and game banker per spin\r\n \"\"\"\r\n game_banker_key = redisutils.game_banker_key(game['id'], room_coin)\r\n\r\n if spin_type == FREE_SPIN:\r\n #không tính phế, jackpot và game_banker cho lần quay miễn phí\r\n commission = game_banker_incr = 0\r\n game_banker = int(self.rd.get(game_banker_key) or 0)\r\n else:\r\n #commision\r\n commission = int(game['commission'] * room_coin / 100.)\r\n\r\n #increase game banker\r\n game_banker_incr = room_coin - commission\r\n game_banker = self.rd.incrby(game_banker_key, game_banker_incr)\r\n\r\n return commission, game_banker_incr, game_banker\r\n\r\n def _spin(self, game, room_coin, game_banker):\r\n for i in range(5):\r\n (reels, pos, dup), ret_lines = machine.spin(game)\r\n win_coin = free_spins = lucky_times = 0\r\n win_lines = {}\r\n for c, counter in ret_lines.iteritems():\r\n for symbol, (line, symbol, c, win_type, rate) in counter.iteritems():\r\n win_value = self._get_money(room_coin, rate, win_type, game)\r\n win_lines[symbol] = [line, symbol, c, win_type, win_value]\r\n #win_lines[c][symbol] = [line, symbol, c, win_type, win_value]\r\n\r\n if win_type in ['jackpot', 'cash']:\r\n win_coin += win_value\r\n elif win_type == 'freespins':\r\n free_spins += win_value\r\n elif win_type == 'lucky':\r\n lucky_times += win_value\r\n\r\n approximate_banker = win_coin + lucky_times * game['lucky']['averange']\r\n if approximate_banker <= game_banker:\r\n return (reels, pos, dup), win_lines, win_coin, free_spins, lucky_times\r\n\r\n #không trúng\r\n return random.choice(game['lose_reels_list']), {}, 0, 0, 0\r\n\r\n\r\n def _bet(self, fid, user_coin, spin_type, room_coin, game):\r\n commission, game_banker_incr, game_banker = \\\r\n self._incr_game_banker(spin_type, room_coin, game)\r\n \r\n (reels, pos, dup), win_lines, win_coin, free_spins, lucky_times \\\r\n = self._spin(game, room_coin, game_banker)\r\n\r\n # (reels, pos, dup), ret_lines = machine.spin(game)\r\n # win_coin = free_spins = lucky_times = 0\r\n # win_lines = {}\r\n # for c, counter in ret_lines.iteritems():\r\n # for symbol, (line, symbol, c, win_type, rate) in counter.iteritems():\r\n # win_value = self._get_money(room_coin, rate, win_type, game)\r\n # win_lines[symbol] = [line, symbol, c, win_type, win_value]\r\n # #win_lines[c][symbol] = [line, symbol, c, win_type, win_value]\r\n\r\n # if win_type in ['jackpot', 'cash']:\r\n # win_coin += win_value\r\n # elif win_type == 'freespins':\r\n # free_spins += win_value\r\n # elif win_type == 'lucky':\r\n # lucky_times += win_value\r\n\r\n if win_coin:\r\n user_coin = self.rd.incrby(redisutils.user_coin(fid), win_coin)\r\n game_banker = self.rd.decr(redisutils.game_banker_key(game['id'], room_coin), win_coin)\r\n\r\n user_win_type = self.get_win_type(room_coin, win_coin)\r\n\r\n result = {\r\n 'id':fid,\r\n 'type':spin_type,\r\n 'display':{\r\n 'reels':reels,\r\n 'pos':pos,\r\n 'duplicate':dup,\r\n },\r\n 'win-lines':win_lines, #danh sách các line thắng\r\n 'win-coin':win_coin, #số tiền nhận được tổng cộng\r\n 'bet-coin':room_coin, #tiền cược\r\n 'win-type':user_win_type, #kiểu thắng\r\n 'free-spins':free_spins, #so lan quay mien phi\r\n 'lucky-times':lucky_times, #ti le nhan lucky\r\n 'user-coin':user_coin, #số tiền còn lại user\r\n }\r\n bet_coin_manager = {\r\n 'bet_coin':room_coin,\r\n 'commission':commission,\r\n 'game_banker_incr':game_banker_incr,\r\n 'banker_pay':win_coin,\r\n 'game_banker':game_banker,\r\n }\r\n self.log.info(\r\n 'bet consecutive coin manager: {bet_coin}, {commission}, {game_banker_incr}, {banker_pay}, {game_banker}'\r\n .format(**bet_coin_manager)\r\n )\r\n return result, user_coin, bet_coin_manager\r\n\r\n def _lucky(self, fid, user_coin, room_coin, lucky_times, game):\r\n game_banker_key = redisutils.game_banker_key(game['id'], room_coin)\r\n lucky_game = game['lucky']\r\n\r\n game_banker = int(self.rd.get(game_banker_key) or 0)\r\n\r\n for i in range(3):\r\n if i < 2:\r\n samples = random.sample(lucky_game['symbols'], lucky_game['spin'])\r\n else:\r\n samples = random.choice(lucky_game['lose_reels_list'])[:]\r\n random.shuffle(samples)\r\n\r\n #print samples\r\n rate = sum(lucky_game['values'].get(s,0) for s in samples) \\\r\n * (sum(lucky_game['rates'].get(s, 0) for s in samples) + lucky_times)\r\n\r\n is_crazy = not (lucky_game['special'] - set(samples))\r\n rate = rate * lucky_game.get('special_rate', 10) if is_crazy else rate\r\n\r\n win_coin = int(rate * room_coin)\r\n\r\n if win_coin <= game_banker:\r\n break\r\n\r\n\r\n if win_coin:\r\n user_coin = self.rd.incrby(redisutils.user_coin(fid), win_coin)\r\n game_banker = self.rd.decr(game_banker_key, win_coin)\r\n\r\n samples_detail = [(s, int(lucky_game['values'].get(s,0) * room_coin) \\\r\n or lucky_game['rates'].get(s)) for s in samples]\r\n\r\n result = {\r\n 'id':fid,\r\n 'type':'lucky',\r\n 'win-lines':samples_detail, #tiền tưng đơt lucky,\r\n 'crazy':is_crazy, #thawngs dac biet\r\n 'win-coin':win_coin, #số tiền nhận được tổng cộng\r\n 'lucky-times':lucky_times, #ti le nhan lucky\r\n 'user-coin':user_coin #so tien con lai cua user\r\n }\r\n\r\n lucky_coin_manager = {\r\n 'banker_pay':user_coin,\r\n 'game_banker':game_banker\r\n }\r\n self.log.info('lucky consecutive coin manager: {banker_pay}, {game_banker}'.format(**lucky_coin_manager))\r\n return result, user_coin, lucky_coin_manager\r\n\r\n def get(self):\r\n \"\"\" Implement HTTP GET method.\"\"\"\r\n if not self.get_base_arguments(paymode='consecutive'):\r\n raise StopIteration\r\n\r\n fid, game, room_coin,gameid = self.fid, self.game, self.room_coin, self.game['id']\r\n bet_coin = room_coin\r\n\r\n enough, user_coin = self.is_enough_coin(fid, bet_coin)\r\n if not enough:\r\n self.write({'error_code':5,'error_msg':_T(\"You have not enough money to bet\")})\r\n return\r\n \r\n result, user_coin, bet_coin_manager = self._bet(fid, user_coin, CASH_SPIN, room_coin, game)\r\n\r\n # #only for test\r\n # result['free-spins'] = 5\r\n # result['lucky-times'] = 5\r\n # #end for test\r\n\r\n result['total-free-spins'] = total_free_spins = result['free-spins']\r\n ret = [result]\r\n coin_manager = [bet_coin_manager]\r\n\r\n if result['lucky-times']:\r\n result, user_coin, lucky_coin_manager = self._lucky(fid, user_coin, room_coin, result['lucky-times'], game)\r\n ret += [result]\r\n coin_manager += [lucky_coin_manager]\r\n\r\n while total_free_spins:\r\n total_free_spins -= 1\r\n result, user_coin, bet_coin_manager = self._bet(fid, user_coin, FREE_SPIN, room_coin, game)\r\n result['total-free-spins'] = total_free_spins = result['free-spins'] + total_free_spins\r\n ret += [result]\r\n coin_manager += [bet_coin_manager]\r\n\r\n #sinh lucky\r\n if result['lucky-times']:\r\n result, user_coin, lucky_coin_manager = self._lucky(fid, user_coin, room_coin, result['lucky-times'], game)\r\n ret += [result]\r\n coin_manager += [lucky_coin_manager]\r\n \r\n self.write({'error_code':0, 'ret':ret})\r\n\r\n redisutils.async_user(self.rd, fid)\r\n\r\n\r\n total_win_coin = sum(r['win-coin'] for r in ret)\r\n total_commission = sum(r.get('commission',0) for r in coin_manager)\r\n\r\n min_rate = self.config.award['min_rate']\r\n min_value = self.config.award['min_value']\r\n total_win_type = BIG_WIN if total_win_coin >= max(bet_coin*min_rate, min_value) else NORMAL\r\n \r\n log = {\r\n 'time': time.strftime('%Y-%m-%d %H:%M:%S'),\r\n 'fid':fid,\r\n 'gameid':gameid,\r\n 'room_coin': bet_coin,\r\n 'bet_coin': bet_coin,\r\n 'commission':total_commission,\r\n 'win_coin': total_win_coin,\r\n 'win_type': total_win_type,\r\n 'user_coin': ret[-1]['user-coin'],\r\n }\r\n logger.trans_log.put(log)\r\n ","sub_path":"app/gameserver/betconsecutivehandler.py","file_name":"betconsecutivehandler.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529159529","text":"#Scoring program for calculating Accuracy, Precision and Recall, written by Nina Kersten#\n#This program runs an example for real data\n\n\nimport sys,os, re\nimport csv\nimport itertools\nimport ast\nimport math\n\n\n#Read in the names_lists for the cartesian product\n#names_list1 = [\"4EBP1_pS65\",\"4EBP1_pT37_pT46\",\"ACC_pS79\",\"AKT_pS473\",\"AKT_pT308\"]\n#names_list2 = [\"4EBP1_pS65\",\"4EBP1_pT37_pT46\",\"ACC_pS79\",\"AKT_pS473\",\"AKT_pT308\"]\n\n#Importing a .csv file for a certain cellline, getting the names of the proteins\nrows = csv.reader(open(\"./example/BT20_main.csv\", \"r\"), delimiter=',')\narows = [row for row in rows if \"Antibody Name\" in row]\narows1= arows[0]\narows2 = [x for x in arows1 if x]\narows2.remove(\"Antibody Name\")\n\n\nnames_list1 = arows2 \nnames_list2 = arows2\n\n#Create the cartesian productof the names\nnames_list = []\nfor i in itertools.product(names_list1,names_list2):\n\tnames_list.append(i)\n\n\n#Converting the \"\\t\"-seperated Goldstandard network into a whitespace seperated network\nwith open(\"./example/goldstandard.sif\",\"r\") as fin, open (\"./example/goldstandard1.sif\",\"w\") as fout:\n\tfor line in fin:\n\t\tfout.write(line.replace('\\t',' '))\n#print(fout)\n\n#Read in the Golstandard and the Prediction data set\ngold_file = open(\"./example/goldstandard1.sif\",\"r\")\npred_file = open(\"./example/prediction.sif\",\"r\")\n\n\nline1 = gold_file.readlines()\nline2 = pred_file.readlines()\n#print(line1,line2)\n\nline1 = [elem.replace('\\n','').replace(' 1, -1 ',' 1 ').replace(' -1 ',' 1 ') for elem in line1]\nline2 = [elem.replace('\\n','').replace(' 1, -1 ',' 1 ').replace(' -1 ',' 1 ') for elem in line2]\nprint(line1,line2)\n\nline1 = [e for e in line1 if e not in ('')]\nline2 = [e for e in line2 if e not in ('')]\n#print(line1, line2)\n\n#Calculating True Positive\ntp_biglist = []\nfn_biglist = []\nfor gold in line1:\n\tif gold in line2:\n\t\ttp_list = list(gold)\n\t\t#print(tp_biglist)\n\t\ttp_list = ''.join(gold)\n\t\t#print(tp_biglist)\n\t\ttp_list = tp_list.strip().split()\n\t\ttp_list = [e for e in tp_list if e not in ('1')]\n\t\ttp_biglist.append(tuple(tp_list))\n\t\t#print(tp_biglist)\n\t\ttp = len(tp_biglist)\n\t\t#print(tp_biglist) #[('a1a', 'b2b'), ('a1a', 'c3c'), ('e5e', 'a1a')]\n\n#Calculating False Negative\n\telif gold not in line2:\n\t\tfn_list = list(gold)\n\t\tfn_list = ''.join(gold)\n\t\tfn_list = fn_list.strip().split()\n\t\tfn_list = [e for e in fn_list if e not in ('1')]\n\t\tfn_biglist.append(tuple(fn_list))\n\t\t#fn_biglist = [t for t in fn_biglist if t != ()]\n\t\tfn = len(fn_biglist)\n\n#print(tp_biglist)\t\t\n\n#Calculating False Positive\nfp_biglist = []\nfor pred in line2:\n\tif pred not in line1:\n\t\tfp_list = list(pred)\n\t\tfp_list = ''.join(pred)\n\t\tfp_list = fp_list.strip().split()\n\t\tfp_list = [e for e in fp_list if e not in ('1')]\n\t\tfp_biglist.append(tuple(fp_list))\n\t\t#fp_biglist = [t for t in fp_biglist if t != ()]\n\t\tfp = len(fp_biglist)\n\n\n#Deleting TP, FN, FP from the names_list2 to get finally the TN-value\ndef EmptyList1(tp_biglist):\n\tif len(tp_biglist) == 0:\n\t\treturn 0\n\telse:\n\t\treturn 1\n \nif EmptyList1(tp_biglist): \n\tfor tp_string in tp_biglist:\n\t\tif tp_string in names_list:\n\t\t\tnames_list.remove(tp_string)\nelse:\n\ttp = 0\n\n\nprint(len(names_list))\n\ndef EmpytList2(fp_biglist):\n\tif len(fp_biglist) == 0:\n\t\treturn 0\n\telse:\n\t\treturn 1\n\nif EmpytList2(fp_biglist): \n\tfor fp_string in fp_biglist:\n\t\tif fp_string in names_list:\n\t\t\tnames_list.remove(fp_string)\nelse:\n\tfp = 0\n\nprint(len(names_list))\n\ndef EmpytList3(fn_biglist):\n\tif len(fn_biglist) == 0:\n\t\treturn 0\n\telse:\n\t\treturn 1\n\nif EmpytList3(fn_biglist): \n\tfor fn_string in fn_biglist:\n\t\tif fn_string in names_list:\n\t\t\tnames_list.remove(fn_string)\nelse:\n\tfn = 0\n\nprint(len(names_list))\n\t\t\n\ntn = len(names_list)\n\n\nprint(\"True Negative Value =\",tn)\nprint(\"False Positive Value =\",fp)\nprint(\"True Positive Value =\",tp)\nprint(\"False Negative Value =\",fn)\n\n\n#Berechnung von Accuracy, Prcision und Recall\nacc = ((tp+tn)/(tp+tn+fp+fn))\nprint(\"Accuracy =\",acc)\npre = ((tp)/(tp+fp))\nprint(\"Precision =\",pre)\nrec = ((tp)/(tp+fn))\nprint(\"Recall =\",rec)\n\n#Calculating Matthiews correlation coefficient(MCC):\nmcc = ((tp*tn-fp*fn)/(math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))))\nprint(\"Matthiews correlation coefficient (MCC) = \",mcc)\n\n#Balanced Accuracy\nbacc = (((tp/(tp+fn))+(tn/(fp+tn)))/2)\nprint(\"Balanced Accuracy =\",bacc)\n\n","sub_path":"Scoring/scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438663996","text":"\"\"\"Utility functions.\"\"\"\nfrom __future__ import print_function\nfrom typing import Any, Dict, Iterator, List, Optional, Union # noqa pylint: disable=unused-import\n\nfrom contextlib import contextmanager\nimport hashlib\nimport importlib\nimport json\nimport os\nimport platform\nimport re\nimport stat\nfrom subprocess import check_call\nimport sys\nimport six\n\nAWS_ENV_VARS = ('AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',\n 'AWS_SESSION_TOKEN')\nEMBEDDED_LIB_PATH = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'embedded'\n)\n\n\nclass cached_property(object): # noqa pylint: disable=invalid-name,too-few-public-methods\n \"\"\"Decorator for creating cached properties.\n\n A property that is only computed once per instance and then replaces itself\n with an ordinary attribute. Deleting the attribute resets the property.\n Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76\n\n \"\"\"\n\n def __init__(self, func):\n \"\"\"Initialize class.\n\n Args:\n func (Callable): Method being decorated.\n\n \"\"\"\n self.func = func\n\n def __get__(self, obj, _):\n \"\"\"Attempt to get a cached value.\n\n Args:\n obj (Any): Instance of a class.\n\n Returns:\n Any\n\n \"\"\"\n if obj is None:\n return self\n\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value\n\n\n# python2 supported pylint is unable to load six.moves correctly\nclass MutableMap(six.moves.collections_abc.MutableMapping): # pylint: disable=no-member\n \"\"\"Base class for mutable map objects.\"\"\"\n\n def __init__(self, **kwargs):\n # type: (Dict[str, Any]) -> None\n \"\"\"Initialize class.\n\n Provided ``kwargs`` are added to the object as attributes.\n\n Example:\n .. codeblock: python\n\n obj = MutableMap(**{'key': 'value'})\n print(obj.__dict__)\n # {'key': 'value'}\n\n \"\"\"\n for key, value in kwargs.items():\n if isinstance(value, dict):\n setattr(self, key, MutableMap(**value))\n else:\n setattr(self, key, value)\n if kwargs:\n self._found_queries = MutableMap()\n\n @property\n def data(self):\n # type: () -> Dict[str, Any]\n \"\"\"Sanitized output of __dict__.\n\n Removes anything that starts with ``_``.\n\n \"\"\"\n result = {}\n for key, val in self.__dict__.items():\n if key.startswith('_'):\n continue\n if isinstance(val, MutableMap):\n result[key] = val.data\n else:\n result[key] = val\n return result\n\n def clear_found_cache(self):\n # type: () -> None\n \"\"\"Clear _found_cache.\"\"\"\n for _, val in self.__dict__.items():\n if isinstance(val, MutableMap):\n val.clear_found_cache()\n if hasattr(self, '_found_queries'):\n self._found_queries.clear()\n\n def find(self, query, default=None, ignore_cache=False):\n # type: (str, Any, bool) -> Any\n \"\"\"Find a value in the map.\n\n Previously found queries are cached to increase search speed. The\n cached value should only be used if values are not expected to change.\n\n Args:\n query: A period delimited string that is split to search for\n nested values\n default: The value to return if the query was unsuccessful.\n ignore_cache: Ignore cached value.\n\n \"\"\"\n if not hasattr(self, '_found_queries'):\n # if not created from kwargs, this attr won't exist yet\n # this is done to prevent endless recursion\n self._found_queries = MutableMap()\n\n if not ignore_cache:\n cached_result = self._found_queries.get(query, None)\n if cached_result:\n return cached_result\n\n split_query = query.split('.')\n\n if len(split_query) == 1:\n result = self.get(split_query[0], default)\n if result != default:\n self._found_queries[split_query[0]] = result\n return result\n\n nested_value = self.get(split_query[0])\n\n if not nested_value:\n return default\n\n nested_value = nested_value.find(split_query[1])\n\n try:\n nested_value = self[split_query[0]].find('.'.join(split_query[1:]),\n default, ignore_cache)\n if nested_value != default:\n self._found_queries[query] = nested_value\n return nested_value\n except (AttributeError, KeyError):\n return default\n\n def get(self, key, default=None):\n # type: (str, Any) -> Any\n \"\"\"Implement evaluation of self.get.\n\n Args:\n key: Attribute name to return the value for.\n default: Value to return if attribute is not found.\n\n \"\"\"\n return getattr(self, key, default)\n\n def __bool__(self):\n # type: () -> bool\n \"\"\"Implement evaluation of instances as a bool.\"\"\"\n if self.data:\n return True\n return False\n\n def __contains__(self, value):\n # type: () -> bool\n \"\"\"Implement evaluation of 'in' conditional.\"\"\"\n return value in self.data\n\n __nonzero__ = __bool__ # python2 compatability\n\n def __getitem__(self, key):\n # type: (str) -> Any\n \"\"\"Implement evaluation of self[key].\n\n Args:\n key: Attribute name to return the value for.\n\n Returns:\n The value associated with the provided key/attribute name.\n\n Raises:\n Attribute: If attribute does not exist on this object.\n\n Example:\n .. codeblock: python\n\n obj = MutableMap(**{'key': 'value'})\n print(obj['key'])\n # value\n\n \"\"\"\n return getattr(self, key)\n\n def __setitem__(self, key, value):\n # type: (str, Any) -> None\n \"\"\"Implement assignment to self[key].\n\n Args:\n key: Attribute name to associate with a value.\n value: Value of a key/attribute.\n\n Example:\n .. codeblock: python\n\n obj = MutableMap()\n obj['key'] = 'value'\n print(obj['key'])\n # value\n\n \"\"\"\n if isinstance(value, dict):\n setattr(self, key, MutableMap(**value))\n else:\n setattr(self, key, value)\n\n def __delitem__(self, key):\n # type: (str) -> None\n \"\"\"Implement deletion of self[key].\n\n Args:\n key: Attribute name to remove from the object.\n\n Example:\n .. codeblock: python\n\n obj = MutableMap(**{'key': 'value'})\n del obj['key']\n print(obj.__dict__)\n # {}\n\n \"\"\"\n delattr(self, key)\n\n def __len__(self):\n # type: () -> int\n \"\"\"Implement the built-in function len().\n\n Example:\n .. codeblock: python\n\n obj = MutableMap(**{'key': 'value'})\n print(len(obj))\n # 1\n\n \"\"\"\n return len(self.__dict__)\n\n def __iter__(self):\n # type: () -> Iterator[Any]\n \"\"\"Return iterator object that can iterate over all attributes.\n\n Example:\n .. codeblock: python\n\n obj = MutableMap(**{'key': 'value'})\n for k, v in obj.items():\n print(f'{key}: {value}')\n # key: value\n\n \"\"\"\n return iter(self.__dict__)\n\n def __str__(self):\n # type: () -> str\n \"\"\"Return string representation of the object.\"\"\"\n return json.dumps(self.data)\n\n\n@contextmanager\ndef change_dir(newdir):\n \"\"\"Change directory.\n\n Adapted from http://stackoverflow.com/a/24176022\n \"\"\"\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)\n\n\ndef ensure_file_is_executable(path):\n \"\"\"Exit if file is not executable.\"\"\"\n if platform.system() != 'Windows' and (\n not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]):\n print(\"Error: File %s is not executable\" % path)\n sys.exit(1)\n\n\n@contextmanager\ndef environ(env=None, **kwargs):\n \"\"\"Context manager for temporarily changing os.environ.\n\n The original value of os.environ is restored upon exit.\n\n Args:\n env (Dict[str, str]): Dictionary to use when updating os.environ.\n\n \"\"\"\n env = env or {}\n env.update(kwargs)\n\n original_env = {key: os.getenv(key) for key in env}\n os.environ.update(env)\n\n try:\n yield\n finally:\n # always restore original values\n for key, val in original_env.items():\n if val is None:\n del os.environ[key]\n else:\n os.environ[key] = val\n\n\ndef load_object_from_string(fqcn):\n \"\"\"Convert \".\" delimited strings to a python object.\n\n Given a \".\" delimited string representing the full path to an object\n (function, class, variable) inside a module, return that object.\n\n Example::\n\n load_object_from_string(\"os.path.basename\")\n load_object_from_string(\"logging.Logger\")\n load_object_from_string(\"LocalClassName\")\n\n \"\"\"\n module_path = \"__main__\"\n object_name = fqcn\n if \".\" in fqcn:\n module_path, object_name = fqcn.rsplit(\".\", 1)\n importlib.import_module(module_path)\n return getattr(sys.modules[module_path], object_name)\n\n\ndef merge_dicts(dict1, dict2, deep_merge=True):\n \"\"\"Merge dict2 into dict1.\"\"\"\n if deep_merge:\n if isinstance(dict1, list) and isinstance(dict2, list):\n return dict1 + dict2\n\n if not isinstance(dict1, dict) or not isinstance(dict2, dict):\n return dict2\n\n for key in dict2:\n dict1[key] = merge_dicts(dict1[key], dict2[key]) if key in dict1 else dict2[key] # noqa pylint: disable=line-too-long\n return dict1\n dict3 = dict1.copy()\n dict3.update(dict2)\n return dict3\n # Alternate py3 version:\n # (tbd if it does or doesn't deep merge, and if that is needed)\n # if sys.version_info > (3, 4):\n # return {**dict1, **dict2}\n\n\ndef extract_boto_args_from_env(env_vars):\n \"\"\"Return boto3 client args dict with environment creds.\"\"\"\n boto_args = {}\n for i in ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']:\n if env_vars.get(i.upper()):\n boto_args[i] = env_vars[i.upper()]\n return boto_args\n\n\ndef flatten_path_lists(env_dict, env_root=None):\n \"\"\"Join paths in environment dict down to strings.\"\"\"\n for (key, val) in env_dict.items():\n # Lists are presumed to be path components and will be turned back\n # to strings\n if isinstance(val, list):\n env_dict[key] = os.path.join(env_root, os.path.join(*val)) if (env_root and not os.path.isabs(os.path.join(*val))) else os.path.join(*val) # noqa pylint: disable=line-too-long\n return env_dict\n\n\ndef merge_nested_environment_dicts(env_dicts, env_name=None, env_root=None):\n \"\"\"Return single-level dictionary from dictionary of dictionaries.\"\"\"\n # If the provided dictionary is just a single \"level\" (no nested\n # environments), it applies to all environments\n if all(isinstance(val, (six.string_types, list))\n for (_key, val) in env_dicts.items()):\n return flatten_path_lists(env_dicts, env_root)\n\n if env_name is None:\n if env_dicts.get('*'):\n return flatten_path_lists(env_dicts.get('*'), env_root)\n return {}\n\n if not env_dicts.get('*') and not env_dicts.get(env_name):\n return {}\n\n combined_dicts = merge_dicts(env_dicts.get('*', {}),\n env_dicts.get(env_name, {}))\n return flatten_path_lists(combined_dicts, env_root)\n\n\ndef find_cfn_output(key, outputs):\n \"\"\"Return CFN output value.\"\"\"\n for i in outputs:\n if i['OutputKey'] == key:\n return i['OutputValue']\n return None\n\n\ndef get_embedded_lib_path():\n \"\"\"Return path of embedded libraries.\"\"\"\n return os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'embedded'\n )\n\n\ndef get_hash_for_filename(filename, hashfile_path):\n \"\"\"Return hash for filename in the hashfile.\"\"\"\n filehash = ''\n with open(hashfile_path, 'r') as stream:\n for _cnt, line in enumerate(stream):\n if line.rstrip().endswith(filename):\n filehash = re.match(r'^[A-Za-z0-9]*', line).group(0)\n break\n if filehash:\n return filehash\n raise AttributeError(\"Filename %s not found in hash file\" % filename)\n\n\n@contextmanager\ndef ignore_exit_code_0():\n \"\"\"Capture exit calls and ignore those with exit code 0.\"\"\"\n try:\n yield\n except SystemExit as exit_exc:\n if exit_exc.code != 0:\n raise\n\n\ndef fix_windows_command_list(commands):\n # type: (List[str]) -> List[str]\n \"\"\"Return command list with working Windows commands.\n\n npm on windows is npm.cmd, which will blow up\n subprocess.check_call(['npm', '...'])\n\n Similar issues arise when calling python apps like pipenv that will have\n a windows-only suffix applied to them\n \"\"\"\n fully_qualified_cmd_path = which(commands[0])\n if fully_qualified_cmd_path:\n commands[0] = os.path.basename(fully_qualified_cmd_path)\n return commands\n\n\ndef run_commands(commands, # type: List[Union[str, List[str], Dict[str, Union[str, List[str]]]]]\n directory, # type: str\n env=None # type: Optional[Dict[str, Union[str, int]]]\n ): # noqa\n # type: (...) -> None\n \"\"\"Run list of commands.\"\"\"\n if env is None:\n env = os.environ.copy()\n for step in commands:\n if isinstance(step, (list, six.string_types)):\n execution_dir = directory\n raw_command = step\n elif step.get('command'): # dictionary\n execution_dir = os.path.join(directory,\n step.get('cwd')) if step.get('cwd') else directory # noqa pylint: disable=line-too-long\n raw_command = step['command']\n else:\n raise AttributeError(\"Invalid command step: %s\" % step)\n command_list = raw_command.split(' ') if isinstance(raw_command, six.string_types) else raw_command # noqa pylint: disable=line-too-long\n if platform.system().lower() == 'windows':\n command_list = fix_windows_command_list(command_list)\n\n with change_dir(execution_dir):\n failed_to_find_error = \"Attempted to run \\\"%s\\\" and failed to find it (are you sure it is installed and added to your PATH?)\" % command_list[0] # noqa pylint: disable=line-too-long\n if sys.version_info[0] < 3:\n # Legacy exception version for python 2\n try:\n check_call(command_list, env=env)\n except OSError:\n print(failed_to_find_error, file=sys.stderr)\n sys.exit(1)\n else:\n try:\n check_call(command_list, env=env)\n # The noqa/pylint overrides can be dropped alongside python 2\n except FileNotFoundError: # noqa pylint: disable=undefined-variable\n print(failed_to_find_error, file=sys.stderr)\n sys.exit(1)\n\n\ndef md5sum(filename):\n \"\"\"Return MD5 hash of file.\"\"\"\n md5 = hashlib.md5()\n with open(filename, 'rb') as stream:\n while True:\n data = stream.read(65536) # 64kb chunks\n if not data:\n break\n md5.update(data)\n return md5.hexdigest()\n\n\ndef sha256sum(filename):\n \"\"\"Return SHA256 hash of file.\"\"\"\n sha256 = hashlib.sha256()\n mem_view = memoryview(bytearray(128 * 1024))\n with open(filename, 'rb', buffering=0) as stream:\n for i in iter(lambda: stream.readinto(mem_view), 0):\n sha256.update(mem_view[:i])\n return sha256.hexdigest()\n\n\ndef strip_leading_option_delim(args):\n \"\"\"Remove leading -- if present.\n\n Using the \"--\" end of options syntax bypasses docopt's parsing of options.\n \"\"\"\n if len(args) > 1:\n if args[0] == '--':\n return args[1:]\n return args\n\n\n@contextmanager\ndef use_embedded_pkgs(embedded_lib_path=None):\n \"\"\"Temporarily prepend embedded packages to sys.path.\"\"\"\n if embedded_lib_path is None:\n embedded_lib_path = get_embedded_lib_path()\n\n old_sys_path = list(sys.path)\n sys.path.insert(\n 1, # https://stackoverflow.com/a/10097543\n embedded_lib_path\n )\n try:\n yield\n finally:\n sys.path = old_sys_path\n\n\ndef which(program):\n \"\"\"Mimic 'which' command behavior.\"\"\"\n def is_exe(fpath):\n \"\"\"Determine if program exists and is executable.\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n def get_extensions():\n \"\"\"Get PATHEXT if the exist, otherwise use default.\"\"\"\n exts = ['.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC']\n\n if os.environ.get('PATHEXT', []):\n exts = os.environ['PATHEXT']\n\n return exts.split(';')\n\n fname, file_ext = os.path.splitext(program)\n fpath, fname = os.path.split(program)\n\n if not file_ext and platform.system().lower() == 'windows':\n fnames = [fname + ext for ext in get_extensions()]\n else:\n fnames = [fname]\n\n for i in fnames:\n if fpath:\n exe_file = os.path.join(fpath, i)\n if is_exe(exe_file):\n return exe_file\n else:\n for path in os.environ.get('PATH').split(os.pathsep) if 'PATH' in os.environ else [os.getcwd()]: # noqa pylint: disable=line-too-long\n exe_file = os.path.join(path, i)\n if is_exe(exe_file):\n return exe_file\n\n return None\n","sub_path":"runway/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":18071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292343525","text":"def mostFrequentItem(arr):\n newlst=list(set(arr))\n out=[]\n ganti = True\n while ganti == True:\n ganti = False\n for i in range(len(newlst)-1):\n vardumb=0\n if arr.count(newlst[i+1])<arr.count(newlst[i]):\n vardumb = newlst[i]\n newlst[i] = newlst[i+1]\n newlst[i+1] = vardumb\n ganti = True\n for i in newlst:\n inpiut=i+'({})'.format(arr.count(i))\n out.append(inpiut)\n b=', '.join(out)\n return b\nprint('')\nprint(mostFrequentItem(['asus', 'asus', 'samsung', 'iphone', 'iphone', 'asus', 'asus']))\n# 'samsung(1), iphone(2), asus(4)'\nprint('')\nprint(mostFrequentItem(['9', 'b', 'b', 'c', '9', '9', 'b', '9', '2', '2']))\n# 'c(1), 2(2), b(3), 9(4)'\nprint('')\nprint(mostFrequentItem(['book', 'laptop', 'iPod']))\n# 'book(1), laptop(1), iPod(1)'\nprint('') ","sub_path":"Alta Batch 4/Phase 1/Week 1/Day 5/Algo DS # Problem Pencarian dan Pengurutan/4 - The Most Frequent Item.py","file_name":"4 - The Most Frequent Item.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"318769138","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os\nimport numpy as np\nimport h5py\n\ndataset = \"avazu\"\n\ndatapath = \"../data/{}/\".format(dataset)\n\ndef load_hdf5(self, data_path, key=\"data\"):\n with h5py.File(data_path, 'r') as hf:\n data_array = hf[key][:]\n return data_array\n\ntrain_df = pd.read_csv(\"../data/{}/{}_train.csv\".format(dataset, dataset))\nvalid_df = pd.read_csv(\"../data/{}/{}_valid.csv\".format(dataset, dataset))\ntest_df = pd.read_csv(\"../data/{}/{}_test.csv\".format(dataset, dataset))\n\n\n\nfeature_columns = [col for col in train_df.columns if col != \"click\"]\n\ntrain_y = train_df.iloc[:, -1]\nvalid_y = valid_df.iloc[:, -1]\ntest_y = test_df.iloc[:, -1]\n\ntrain_i = train_df.iloc[:, 0:-1]\nvalid_i = valid_df.iloc[:, 0:-1]\ntest_i = test_df.iloc[:, 0:-1]\n\ntrain_v = pd.DataFrame(np.ones(train_df[feature_columns].shape),columns=feature_columns) \nvalid_v = pd.DataFrame(np.ones(valid_df[feature_columns].shape),columns=feature_columns) \ntest_v = pd.DataFrame(np.ones(test_df[feature_columns].shape),columns=feature_columns) \n\ntrain_i.to_csv(os.path.join(datapath, \"train_i.csv\"), index=False, header=None)\nvalid_i.to_csv(os.path.join(datapath, \"valid_i.csv\"), index=False, header=None)\ntest_i.to_csv(os.path.join(datapath, \"test_i.csv\"), index=False, header=None)\n\ntrain_v.to_csv(os.path.join(datapath, \"train_v.csv\"), index=False, header=None)\nvalid_v.to_csv(os.path.join(datapath, \"valid_v.csv\"), index=False, header=None)\ntest_v.to_csv(os.path.join(datapath, \"test_v.csv\"), index=False, header=None)\n\ntrain_y.to_csv(os.path.join(datapath, \"train_y.csv\"), index=False, header=None)\nvalid_y.to_csv(os.path.join(datapath, \"valid_y.csv\"), index=False, header=None)\ntest_y.to_csv(os.path.join(datapath, \"test_y.csv\"), index=False, header=None)\n\nprint(\"Finish converting.\")","sub_path":"script/convert_avazu.py","file_name":"convert_avazu.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651897219","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('title', models.CharField(verbose_name='Tytuł', max_length=50)),\n ('slug', models.SlugField(unique=True, max_length=30, verbose_name='nazwa w URL')),\n ('content', models.TextField(blank=True, help_text='Markdown', null=True, verbose_name='treść')),\n ('publish_date', models.DateTimeField(verbose_name='data opublikowania')),\n ('hidden', models.BooleanField(default=True, help_text='widoczny tylko dla członków ekipy z linkiem do postu', verbose_name='ukryty')),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='autor')),\n ],\n options={\n 'verbose_name': 'posty',\n },\n ),\n ]\n","sub_path":"blog/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"127065642","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\n#get values from txt file\ndef get_results_from_file(fileName):\n data = []\n f = open(fileName, \"r\")\n for line in f:\n line = line[:-1] #remove the newline character\n data.append(float(line))\n data = np.array(data)\n return data\n\n\n#VGG16\ndata2 = get_results_from_file(\"results/vgg161557602241.8598354.txt\")\nax2, = plt.plot(data2, 'b', marker='H')\n\n\n#VGG19\ndata3 = get_results_from_file(\"results/vgg191557604871.2468514.txt\")\nax3, = plt.plot(data3, 'r', marker='*')\n\n\nplt.ylabel('Validation Accuracy')\nplt.xlabel('Epoch')\nplt.title(\"Model Performance\")\nplt.legend([ax2, ax3], ['VGG16', 'VGG19'], loc='best')\nplt.savefig('saved_plots/performances_pre-trained_NNs.png', bbox_inches='tight')\nplt.show()","sub_path":"stats_pre_trained.py","file_name":"stats_pre_trained.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306707599","text":"import json\nimport sys\nimport os\nimport pprint\nsys.path.append(\n os.path.join(\n os.environ['CCETC_ROOT']\n )\n)\nimport ccetc_py.info\nfrom lib.project import project\n\n#Init projects\nprojects = {}\nwith open(\"res/project_list.json\") as f:\n projects_json = json.loads(f.read())\nfor project_json in projects_json:\n p = project( project_json )\n projects[project_json[\"name\"]] = p\n\n#Init nodes and groups\nnodes = ccetc_py.info.nodes()\ngroups = ccetc_py.info.groups()\n\ndef getProject(project):\n '''If it exists, returns project. Else, returns None'''\n if project in projects: return projects[project]\n else: return None\n\ndef getNode(node):\n '''If it exists, returns node object. Else, returns None'''\n if node in nodes: return nodes[node]\n else: return None\n\ndef getGroup(group):\n '''If it exists, returns group dict. Else, returns empty dict'''\n if group in groups: return groups[group]\n else: return {}\n","sub_path":"lib/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280932022","text":"import re\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\n\ndef tokenize_word(text):\n \"\"\" Tokenize a text into words using NLTK\n\n Filter punctuations (non-alphanumeric characters) except period.\n Tokenize text into words.\n Filter periods.\n Filter stop words and empty words.\n Stem words.\n Filter non-noun words, if only_noun=true\n\n Args:\n text: Text to be tokenized into words.\n only_noun: A boolean, if true then filter non-noun words\n Returns:\n A list of word tokens.\n \"\"\"\n\n text = text.lower()\n\n pattern = re.compile(r'[^a-zA-Z.]+')\n text = pattern.sub(' ', text)\n\n word_tokens = word_tokenize(text)\n\n pattern = re.compile(r'\\.+')\n word_tokens = [pattern.sub('', word) for word in word_tokens]\n\n stop_words = set(stopwords.words('english'))\n word_tokens = [word for word in word_tokens if word and word not in stop_words]\n\n stemmer = SnowballStemmer('english')\n word_tokens = [stemmer.stem(word) for word in word_tokens]\n\n return word_tokens\n\n\ndef tokenize_sentence(text):\n \"\"\"Tokenize a text into sentences using NLTK\n\n Args:\n text: Text to be tokenized into sentences.\n \"\"\"\n\n sentence_tokens = sent_tokenize(text, language='english')\n\n return sentence_tokens\n","sub_path":"tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330552863","text":"secret = \"Lbh zhfg hayrnea, jung lbh unir yrnearq.\"\n# secret = \"hello\"\noffset = 13\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\nresult = ''\nfor char in secret:\n ascii_code = ord(char)\n is_uppercase = ascii_code >= 65 and ascii_code <= 90\n char = char.lower()\n if char not in alphabet:\n new_char = char\n else:\n idx = alphabet.find(char)\n new_idx = idx + offset\n if new_idx > 25:\n new_idx = new_idx - 26\n new_char = alphabet[new_idx]\n if is_uppercase:\n new_char = new_char.upper()\n result += new_char\nprint(result)","sub_path":"ceasar.py","file_name":"ceasar.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291498588","text":"# file: input_validator.py\r\n\r\ndef get_float(prompt):\r\n\r\n ''' function to prompt user for a \\\r\n floating point value, prompt: the \\\r\n prompt to use, Returns a valid float\r\n '''\r\n\r\n valid_input = False\r\n while not valid_input:\r\n try:\r\n user_float = float(input(prompt))\r\n valid_input = True\r\n\r\n except:\r\n print(\"Input error: try again\")\r\n\r\n # valid float, return value\r\n\r\n return user_float\r\n \r\n","sub_path":"Python_Old/201_Using Python 3/input_validator.py","file_name":"input_validator.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"46401844","text":"from dao.mongo.mongo_connector_singleton import MongoConnectorSingleton\nfrom config.runtime_config import RuntimeConfig\nimport pandas as pd\n\n\nclass MongoAdapter:\n \"\"\"\n A class to have transactions with mongodb\n \"\"\"\n\n __connection__ = None\n\n def __init__(self):\n self.get_connection()\n self.db = self.__connection__.tapsell\n self.ad_ctr_collection = self.db.ad_ctr_collection\n self.system_requests_stats_collection = self.db.system_requests_stats_collection\n\n @classmethod\n def get_connection(cls, new: bool = False):\n \"\"\"Get connection singleton\n\n :param new: flag indicates to create new connection singleton\n :return: class instance collection\n \"\"\"\n if new or not cls.__connection__:\n cls.__connection__ = MongoConnectorSingleton().create_connection()\n return cls.__connection__\n\n def insert_many(self, documents: list) -> None:\n try:\n self.ad_ctr_collection.insert_many(documents=documents)\n except Exception as e:\n raise Exception(f'Document initial insertions failed!, {str(e)}')\n\n def create_index_on_field(self, field: str) -> None:\n \"\"\" Create index on a field in ad_ctr_collection\n\n usage: invoked after db initialization\n :param field: string\n :return:\n \"\"\"\n try:\n self.ad_ctr_collection.create_index(keys=field)\n except Exception as e:\n raise Exception(f'Index creation failed!, {str(e)}')\n\n def find_one(self, ad_id: int):\n \"\"\"Query database to find document by field ad_id\n\n :param ad_id: int\n :return: mongodb cursor object\n \"\"\"\n res = self.ad_ctr_collection.find_one({\"ad_id\": ad_id})\n return res\n\n def insert_new_stat(self, received_at: int, response_time: float) -> None:\n try:\n self.system_requests_stats_collection.insert({\n \"received_at\": received_at,\n \"response_time\": response_time\n })\n except Exception as e:\n raise Exception(f'Document insertion failed!, {str(e)}')\n\n @property\n def distinct_ads(self) -> list:\n \"\"\"Class property to fetch distinct documents by ad_id\n\n :return: list of documents\n \"\"\"\n try:\n return self.ad_ctr_collection.distinct(key='ad_id')\n except:\n raise Exception('Database Not Available!')\n\n @property\n def system_requests_stats_dataframe(self):\n \"\"\"Class property to fetch db collection and convert it to pandas dataframe\n\n :return: pandas.DataFrame\n \"\"\"\n try:\n cursor = self.system_requests_stats_collection.find({})\n df = pd.DataFrame(list(cursor))\n return df\n except:\n raise Exception('Database Not Available!')\n\n @property\n def count_system_requests_stats_collection(self) -> int:\n \"\"\"Class property to count documents in collection\n\n :return: int\n \"\"\"\n try:\n cursor = self.system_requests_stats_collection.find({})\n return cursor.count()\n except:\n raise Exception('Database Not Available!')\n\n def retrieve_all(self):\n \"\"\" Retrieve distinct documents\n\n :return: db cursor object\n \"\"\"\n\n try:\n distinct = self.distinct_ads\n # TODO : We assume the db collection is normalized, with no redundancy\n\n res = self.ad_ctr_collection.find({\n \"ad_id\": {\n \"$in\": distinct\n }\n })\n return res\n except:\n raise Exception('Database Not Available!')","sub_path":"app/dao/mongo/mongo_adapter.py","file_name":"mongo_adapter.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530437388","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QThread, pyqtSignal, QCoreApplication, QRect, QMetaObject\nfrom PyQt5.QtGui import QFont, QIcon\nfrom sys import argv, exit\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom requests import post\nfrom time import sleep, strftime, localtime, time\nfrom json import loads\nfrom re import findall\n\n# 保存视频播放进度\nsave_progress_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.saveProgress.do\"\n# 同步刷新记录\nupdate_time_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.updateTimestepByUserTimmer.do\"\n# 获取课程包含的小节信息\nload_course_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.loadCourseItemTree.do\"\n# 选课接口,包含location信息\nselect_resource_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.selectResource.do?vbox_server=&fromNetWorkSetting=false\"\n# 确认选课接口\nstudy_check_api_tmp = \"http://study.foton.com.cn/els/html/coursestudyrecord/coursestudyrecord.studyCheck.do?courseId={}&scoId={}\"\n# 查看小节学习进度\nscols_complate_api_tmp = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.scoIsComplate.do?courseId={}&processType=THREESCREEN\"\n# 单分屏获取播放进度api\none_screen_save_progress_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.saveCoursePrecent.do\"\n\ncourse_id_list = []\ncookie = {}\ncourse_info_list = []\ncompleted_list = []\nvideo_id_list = []\nvideo_name_list = []\ncourse_url_list = []\ncourse_name = ''\nONESCREEN = -1\n\nheader = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Content-Length': '140',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'study.foton.com.cn',\n 'Origin': 'http://study.foton.com.cn',\n 'Referer': 'http://study.foton.com.cn/els/flash/elnFlvPlayer.swf?v=4.0.2',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'}\n\ndata = {\n 'courseId': '',\n 'scoId': '',\n 'progress_measure': '100',\n 'session_time': '60:01',\n 'location': '3601'\n }\n\ndata_single = {\n 'courseId': '',\n 'playTime': '9999'\n}\n\nselect_video_data = {\n 'courseId': '',\n 'scoId': '',\n 'firstLoad': 'true',\n 'Location': '0',\n 'elsSign': ''\n}\n\n\nclass OpenBroswerThread(QThread):\n signal = pyqtSignal(str)\n\n def __init__(self):\n super(OpenBroswerThread, self).__init__()\n\n def run(self):\n self.signal.emit(\"正在打开登录页面,请登录后进入课程视频播放页面,回到程序选课\\n\")\n global driver\n driver = webdriver.Firefox(executable_path=\"./geckodriver.exe\")\n # print(\"正在打开登录页面,请登录后进入课程视频播放页面,然后回到程序继续执行\")\n driver.get(\"http://study.foton.com.cn\")\n driver.maximize_window()\n\n\nclass SelectCourseThread(QThread):\n signal = pyqtSignal(str)\n\n def __init__(self):\n super(SelectCourseThread, self).__init__()\n\n def run(self):\n \"\"\"\n 提取courseId\n \"\"\"\n driver.switch_to.window(driver.window_handles[1])\n course_url_list.append(driver.current_url)\n course_id = findall(r\"courseId=(.*)&vb_server=&\", driver.current_url)[0]\n course_id_list.append(course_id)\n msg = \"已选取{}门课\".format(len(course_id_list))\n self.signal.emit(msg + '\\n')\n\n\nclass StudyCousre(QThread):\n signal = pyqtSignal(str)\n\n def __init__(self):\n super(StudyCousre, self).__init__()\n\n def get_cookie(self):\n cookie_list = driver.get_cookies()\n for single_cookie in cookie_list:\n cookie[single_cookie['name']] = single_cookie['value']\n\n def show_time(self):\n cur_time = strftime('%Y-%m-%d %H:%M:%S', localtime(time()))\n self.signal.emit(cur_time)\n # print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))\n\n def load_course(self, course_id):\n \"\"\"\n 加载课程信息,课程名称,视频名称,视频ID\n \"\"\"\n global course_info_list\n global course_name\n\n try:\n loaded = post(load_course_api, headers=header, cookies=cookie,\n data={'elsSign': cookie['eln_session_id'], 'courseId': course_id}, timeout=(15, 15))\n except:\n self.signal.emit(\"加载视频信息出错\\n\")\n # print(\"加载视频信息出错\")\n else:\n if len(loaded.text) != 0:\n try:\n course_info_orign = loads(loaded.text)\n except:\n pass\n else:\n course_name = course_info_orign[0]['text']\n self.show_time()\n self.signal.emit(\"课程名称是:《{}》,开始学习\\n\".format(course_name))\n # print(\"课程名称是:《{}》,开始学习 \".format(course_name))\n # 一部分在[0]['children']['0']['children'],另一部分课程在['0']['children']下\n\n # print(course_info_orign[0]['children'][0]['children'])\n # print(course_info_orign[0]['children'])\n\n if len(course_info_orign[0]['children'][0]['children']) == 1:\n course_info_list = course_info_orign[0]['children']\n c = True\n else:\n for chapter in course_info_orign[0]['children']:\n # course_info_list = course_info_orign[0]['children'][0]['children']\n course_info_list += chapter['children']\n c = False\n # print(course_info_list)\n\n if not c:\n for course_info in course_info_list:\n # video_id_list是全局变量第二次学习时并不会覆盖第一次的id\n video_id_list.append(course_info['id'])\n video_name_list.append(course_info['text'])\n else:\n for course_info in course_info_list:\n video_id_list.append(course_info['children'][0]['id'])\n video_name_list.append(course_info['children'][0]['text'])\n # print(video_id_list)\n # print(video_name_list)\n\n def get_completed_video_list(self, course_id):\n \"\"\"\n 获取已经完成的视频列表\n \"\"\"\n if ONESCREEN != 1:\n global completed_list\n scols_complate_api = scols_complate_api_tmp.format(course_id)\n try:\n c = post(scols_complate_api, headers=header, cookies=cookie,\n data={'elsSign': cookie['eln_session_id']}, timeout=(15, 15))\n except:\n self.signal.emit(\"获取视频完成列表出错\\n\")\n # print(\"获取视频完成列表出错\")\n else:\n if len(c.text) != 0:\n completed_list = loads(c.text)\n\n def select_video(self, course_id, video_id):\n global ONESCREEN\n if ONESCREEN != 1:\n select_video_data['courseId'] = course_id\n select_video_data['scoId'] = video_id\n select_video_data['elsSign'] = cookie['eln_session_id']\n post(select_resource_api, headers=header, cookies=cookie, data=select_video_data, timeout=(15, 15))\n study_check_api = study_check_api_tmp.format(course_id, video_id)\n post(study_check_api, headers=header, cookies=cookie, data={'elsSign': cookie['eln_session_id']},\n timeout=(15, 15))\n\n def course_finished(self, course_id):\n \"\"\"\n 判断课程是否学习完毕\n \"\"\"\n global ONESCREEN\n if ONESCREEN != 1:\n\n if len(completed_list) == len(course_info_list):\n return True\n else:\n return False\n else:\n data_single['courseId'] = course_id\n try:\n sr = post(one_screen_save_progress_api, headers=header, cookies=cookie, data=data_single, timeout=(15, 15))\n except:\n self.signal.emit(\"获取进度失败\")\n else:\n sr_data = sr.text\n if len(sr_data) != 0:\n try:\n sr_dict = loads(sr_data)\n except:\n # print(\"HTTP Status 500 服务器内部错误\")\n pass\n else:\n if 'courseProgress' in sr_dict:\n if sr_dict['courseProgress'] == '100':\n return True\n else:\n return False\n else:\n return False\n\n def video_finished(self, course_id, video_id, video_name):\n \"\"\"\n 判断视频是否播放完毕\n \"\"\"\n global course_name\n data['courseId'] = course_id\n data['scoId'] = video_id\n self.get_completed_video_list(course_id)\n\n # print(course_id)\n # print(video_id)\n # print(completed_list)\n\n if video_id in completed_list:\n return True\n try:\n r = post(save_progress_api, headers=header, cookies=cookie, data=data, timeout=(15, 15))\n except:\n self.signal.emit(\"获取视频播放进度时出错\\n\")\n # print(\"获取视频播放进度时出错\")\n else:\n r_data = r.text\n # print(r.text)\n if len(r_data) != 0:\n # print(r_data)\n try:\n r_dict = loads(r_data)\n except:\n self.signal.emit(\"HTTP Status 500 服务器内部错误\\n\")\n # print(\"HTTP Status 500 服务器内部错误\")\n else:\n if 'completed' in r_dict:\n if r_dict['completed'] == 'true':\n return True\n else:\n self.show_time()\n self.signal.emit(\"{} 视频播放进度{}%,《{}》课程学习进度{}%\\n\".format(video_name,\n r_dict['completeRate'], course_name, r_dict['courseProgress']))\n # print(\"视频播放进度{}%,课程学习进度{}%\".format(r_dict['completeRate'], r_dict['courseProgress']))\n return False\n else:\n return False\n else:\n return False\n\n def clear_list(self):\n video_id_list.clear()\n video_name_list.clear()\n completed_list.clear()\n course_info_list.clear()\n\n def print_list(self):\n print(video_id_list)\n print(video_name_list)\n print(completed_list)\n print(course_info_list)\n\n def run(self):\n global course_name, ONESCREEN\n for i, course_url in enumerate(course_url_list):\n self.show_time()\n self.signal.emit(\"开始学习第{}门课\\n\".format(i+1))\n driver.get(course_url)\n course_id = course_id_list[i]\n # print(course_id)\n try:\n # ele存在说明是双分屏或者三分屏\n # ele = driver.find_element_by_id('vodtree')\n ele = WebDriverWait(driver, 5, 0.5).until(\n EC.presence_of_element_located((By.ID, 'vodtree')))\n except:\n ONESCREEN = 1\n else:\n ONESCREEN = 0\n div = WebDriverWait(driver, 5, 0.5).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'barleft')))\n if ONESCREEN == 1:\n course_name = div.text.strip(\"网络设置\")\n else:\n course_name = div.text\n self.get_cookie()\n self.load_course(course_id)\n self.get_completed_video_list(course_id)\n if self.course_finished(course_id):\n self.show_time()\n self.signal.emit(\"《{}》课程全部视频学习完毕\\n\".format(course_name))\n # print(\"《{}》课程全部视频学习完毕\".format(course_name))\n self.clear_list()\n continue\n else:\n for j, video_id in enumerate(video_id_list):\n video_name = video_name_list[j]\n self.show_time()\n self.signal.emit(\"开始学习 {} 视频\\n\".format(video_name))\n # print(\"开始学习 {} 视频\".format(video_name))\n self.select_video(course_id, video_id)\n while True:\n if self.video_finished(course_id, video_id, video_name):\n self.show_time()\n self.signal.emit(\"{} 视频学习完毕\\n\".format(video_name))\n # print(\"{} 视频学习完毕\".format(video_name))\n break\n else:\n post(update_time_api, headers=header, cookies=cookie,\n data={'elsSign': cookie['eln_session_id']}, timeout=(15, 15))\n sleep(180)\n self.get_completed_video_list(course_id)\n if self.course_finished(course_id):\n self.show_time()\n self.signal.emit(\"《{}》课程全部视频学习完毕\\n\".format(course_name))\n # print(\"《{}》课程全部视频学习完毕\".format(course_name))\n self.clear_list()\n sleep(1)\n self.clear_list()\n sleep(1)\n ONESCREEN = -1\n course_id_list.clear()\n course_url_list.clear()\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1000, 750)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setStyleSheet(\"\")\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setContentsMargins(-1, 0, -1, -1)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())\n self.pushButton.setSizePolicy(sizePolicy)\n font = QFont()\n font.setFamily(\"Microsoft YaHei\")\n font.setPointSize(20)\n self.pushButton.setFont(font)\n self.pushButton.setObjectName(\"pushButton\")\n self.horizontalLayout.addWidget(self.pushButton)\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())\n self.pushButton_2.setSizePolicy(sizePolicy)\n font = QFont()\n font.setFamily(\"Microsoft YaHei\")\n font.setPointSize(20)\n self.pushButton_2.setFont(font)\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.horizontalLayout.addWidget(self.pushButton_2)\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())\n self.pushButton_3.setSizePolicy(sizePolicy)\n font = QFont()\n font.setFamily(\"Microsoft YaHei\")\n font.setPointSize(20)\n self.pushButton_3.setFont(font)\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.horizontalLayout.addWidget(self.pushButton_3)\n self.horizontalLayout.setStretch(0, 1)\n self.horizontalLayout.setStretch(1, 1)\n self.horizontalLayout.setStretch(2, 1)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)\n self.textBrowser.setObjectName(\"textBrowser\")\n self.verticalLayout.addWidget(self.textBrowser)\n self.verticalLayout.setStretch(0, 1)\n self.verticalLayout.setStretch(1, 2)\n self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QRect(0, 0, 788, 18))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QMetaObject.connectSlotsByName(MainWindow)\n\n self.pushButton.clicked.connect(self.open_broswer)\n self.pushButton_2.clicked.connect(self.select_course)\n self.pushButton_3.clicked.connect(self.study_course)\n self.cursor = self.textBrowser.textCursor()\n\n def retranslateUi(self, MainWindow):\n _translate = QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"一键学习\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"开始学习\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"选这门课\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"选课结束\\n一键学习\"))\n\n def open_broswer(self):\n self.thread = OpenBroswerThread()\n self.thread.signal.connect(self.set_text_broswer)\n self.thread.start()\n\n def set_text_broswer(self, text):\n # self.textBrowser.setText(text)\n self.textBrowser.append(text)\n self.textBrowser.moveCursor(self.cursor.End)\n\n def select_course(self):\n self.thread2 = SelectCourseThread()\n self.thread2.signal.connect(self.set_text_broswer)\n self.thread2.start()\n\n def study_course(self):\n self.thread3 = StudyCousre()\n self.thread3.signal.connect(self.set_text_broswer)\n self.thread3.start()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(argv)\n app.setWindowIcon(QIcon(r\"Images/foton.jpg\"))\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n exit(app.exec_())\n","sub_path":"StudyBotGUI.py","file_name":"StudyBotGUI.py","file_ext":"py","file_size_in_byte":19617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583814943","text":"\"\"\"\nThis file houses the bulk of our business logic layer.\n\nIt presents all routes used by our web application, and\ndefines the methods that receive user input, perform safety checks,\nand interact with the DAL to perform CRUD operations.\n\"\"\"\n\n# Python library includes\nimport base64\nimport bleach\nimport httplib2\nimport imghdr\nimport json\nimport logging\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport os\nimport requests\n\n# Third-party includes\nfrom flask import (\n Flask,\n redirect,\n request,\n session,\n )\napp = Flask(__name__)\nPATH_TO_CLIENT_SECRETS = \"/var/www/html/fsndp5/catalog/client_secrets.json\"\nCLIENT_ID = json.loads(\n open(PATH_TO_CLIENT_SECRETS, \"r\").read())[\"web\"][\"client_id\"]\nfrom werkzeug import secure_filename\n\n# Project-specific includes\nimport dal\nfrom entities import AuthSource, jdefault\nfrom handler_utils import (\n already_exists_error,\n bad_request_error,\n create_atom_response,\n create_err_response,\n create_json_response,\n date_to_atom_friendly,\n internal_error,\n not_authenticated_error,\n not_authorized_error,\n not_found_error,\n render,\n )\nfrom session_utils import (\n check_nonce,\n generate_nonce,\n get_active_user,\n load_from_session,\n save_to_session,\n SessionKeys,\n set_active_user,\n )\n\n\n@app.route('/static/<path:filename>')\ndef download_static_file(filename):\n \"\"\"\n Safely serves static files, like .css or .js resources.\n Uses send_from_directory to prevent directory traversal attacks.\n \"\"\"\n return send_from_directory(\"/static\", filename, as_attachment=True)\n\n@app.route('/')\ndef dashboard():\n \"\"\" Serves the splash page for the application. \"\"\"\n recent_items = dal.get_recent_items(5)\n return render(\"dashboard.html\", recent_items=recent_items)\n\n\n\n@app.route('/catalog.json')\ndef jsonEndpoint():\n \"\"\" Dumps all categories and items to JSON format \"\"\"\n categories = dal.get_categories()\n items = dal.get_items()\n\n cat_dict = {}\n for i in categories:\n i.items = []\n cat_dict[i.cat_id] = i\n for j in items:\n cat_dict[j.cat_id].items.append(j)\n\n output = json.dumps(cat_dict.values(), default=jdefault)\n return create_json_response(output)\n\n@app.route('/catalog.atom')\ndef atomEndpoint():\n \"\"\"\n Displays recently added items in Atom format.\n Data is formatted as specified in http://atomenabled.org/developers/syndication/\n and was validated against https://validator.w3.org/feed/#validate_by_input/\n \"\"\"\n last_updated = None\n recent_items = dal.get_recent_items(10)\n # Convert the dates to RFC-3339 format for Atom compatibility\n for i in recent_items:\n i.changed = date_to_atom_friendly(i.changed)\n if recent_items:\n last_updated = recent_items[0].changed\n output = render(\"atom.xml\", last_updated=last_updated, items=recent_items)\n return create_atom_response(output)\n\n\n\n@app.route('/catalog/create-cat/', methods=['POST'])\ndef categoryCreate():\n \"\"\"\n Creates a new category owned by the logged-in user\n \"\"\"\n state = request.values.get('state')\n if not check_nonce(state):\n return bad_request_error()\n\n active_user = get_active_user()\n if not active_user:\n return not_authenticated_error()\n\n cat_name = bleach.clean(request.values.get(\"cat_create_name\"))\n duplicate = dal.get_category_by_name(cat_name)\n if duplicate:\n return already_exists_error()\n\n # All checks passed\n generate_nonce()\n cat_id = dal.create_category(cat_name, active_user.user_id)\n return redirect(\"/\")\n\n@app.route('/catalog/delete-cat/', methods=['POST'])\ndef categoryDelete():\n \"\"\"\n Deletes a category owned by the logged-in user\n \"\"\"\n state = request.values.get('state')\n if not check_nonce(state):\n return bad_request_error()\n\n cat_name = bleach.clean(request.values.get(\"cat_delete_name\"))\n cat = dal.get_category_by_name(cat_name)\n if not cat:\n return not_found_error()\n\n active_user = get_active_user()\n if not active_user:\n return not_authenticated_error()\n if active_user.user_id != cat.creator_id:\n return not_authorized_error()\n\n # All checks passed\n generate_nonce()\n dal.delete_category(cat.cat_id)\n return redirect(\"/\")\n\n@app.route('/catalog/update-cat/', methods=['POST'])\ndef categoryUpdate():\n \"\"\"\n Updates a category owned by the logged-in user\n \"\"\"\n state = request.values.get('state')\n if not check_nonce(state):\n return bad_request_error()\n\n old_cat_name = bleach.clean(request.values.get(\"cat_update_old_name\"))\n cat = dal.get_category_by_name(old_cat_name)\n if not cat:\n return not_found_error()\n\n active_user = get_active_user()\n if not active_user:\n return not_authenticated_error()\n if active_user.user_id != cat.creator_id:\n return not_authorized_error()\n\n # All checks passed\n generate_nonce()\n new_cat_name = bleach.clean(request.values.get(\"cat_update_new_name\"))\n dal.update_category(cat.cat_id, new_cat_name)\n return redirect(\"/\")\n\n\n@app.route('/catalog/<cat_name>/<item_name>/')\ndef itemLookupByName(cat_name, item_name):\n \"\"\"\n Looks up an item based on its human-readable item and category names\n \"\"\"\n cat = dal.get_category_by_name(cat_name)\n if not cat:\n return not_found_error()\n\n item = dal.get_item_by_name(cat.cat_id, item_name)\n if not item:\n return not_found_error()\n\n # All checks passed\n return render(\"show_item.html\", item=item, active_cat=cat_name, active_item=item_name)\n\n@app.route('/catalog/create-item/', methods=['POST'])\ndef itemCreate():\n \"\"\"\n Creates a new item owned by the logged-in user\n \"\"\"\n state = request.values.get('state')\n if not check_nonce(state):\n return bad_request_error()\n\n cat_name = bleach.clean(request.values.get(\"item_create_parent\"))\n cat = dal.get_category_by_name(cat_name)\n if not cat:\n return not_found_error()\n\n active_user = get_active_user()\n if not active_user:\n return not_authenticated_error()\n\n item_name = bleach.clean(request.values.get(\"item_create_name\"))\n duplicate = dal.get_item_by_name(cat.cat_id, item_name)\n if duplicate:\n return already_exists_error()\n\n try:\n pic_data = validate_picture(request.files[\"item_create_pic\"])\n except InvalidPictureError:\n return bad_request_error()\n\n # All checks passed\n generate_nonce()\n desc = bleach.clean(request.values.get(\"item_create_description\"))\n item_id = dal.create_item(\n item_name, cat.cat_id, active_user.user_id, pic_data, desc)\n if not item_id:\n logging.error(\"Unable to create item: did not receive an item_id from database\")\n return internal_error()\n item = dal.get_item(item_id)\n if not item:\n logging.error(\n \"Unable to create item: an instance was not created for item_id {}\".format(item_id))\n return internal_error()\n return redirect(\"/catalog/{}/{}/\".format(cat_name, item_name))\n\ndef validate_picture(pic):\n \"\"\"\n Uses code from http://flask.pocoo.org/docs/0.10/patterns/fileuploads/\n\n If pic is a valid picture file that can safely be stored in the db,\n return its base64-encoded binary contents.\n If the pic is malformed somehow, throws a descriptive InvalidPictureError.\n \"\"\"\n pic.filename = secure_filename(pic.filename)\n if not pic.filename.endswith(\".jpg\"):\n raise InvalidPictureError(\"Invalid extension\")\n if len(pic.filename) <= 4:\n raise InvalidPictureError(\"Invalid filename length\")\n # Snoop into the file's data to ensure it actually contains a jpg image\n content = pic.read()\n if not imghdr.what(\"\", h=content) == 'jpeg':\n raise InvalidPictureError(\"Invalid file contents\")\n\n # All checks passed\n return base64.b64encode(content)\n\nclass InvalidPictureError(Exception):\n pass\n\n@app.route('/catalog/delete-item/', methods=['POST'])\ndef itemDelete():\n \"\"\"\n Deletes an item owned by the current user\n \"\"\"\n state = request.values.get('state')\n if not check_nonce(state):\n return bad_request_error()\n\n cat_name = bleach.clean(request.values.get(\"item_delete_parent\"))\n cat = dal.get_category_by_name(cat_name)\n if not cat:\n return not_found_error()\n\n active_user = get_active_user()\n if not active_user:\n return not_authenticated_error()\n\n item_name = bleach.clean(request.values.get(\"item_delete_name\"))\n item = dal.get_item_by_name(cat.cat_id, item_name)\n if not item:\n return not_found_error()\n\n if active_user.user_id != item.creator_id:\n return not_authorized_error()\n\n # All checks passed\n generate_nonce()\n dal.delete_item(item.item_id)\n return redirect(\"/\")\n\n@app.route('/catalog/update-item/', methods=['POST'])\ndef itemUpdate():\n \"\"\"\n Selectively update fields on an item owned by the logged-in user\n \"\"\"\n # This will take a few steps. Start with loading the old object and performing\n # our usual auth process.\n state = request.values.get('state')\n if not check_nonce(state):\n return bad_request_error()\n\n old_parent_name = bleach.clean(request.values.get(\"item_update_old_parent\"))\n old_parent = dal.get_category_by_name(old_parent_name)\n if not old_parent:\n return not_found_error()\n\n active_user = get_active_user()\n if not active_user:\n return not_authenticated_error()\n\n old_item_name = bleach.clean(request.values.get(\"item_update_old_name\"))\n old_item = dal.get_item_by_name(old_parent.cat_id, old_item_name)\n if not old_item:\n return not_found_error()\n\n # Item was found, security checks out. Now pull in the new values from\n # the request. If a field is empty, it's assumed that the user doesn't\n # want to change it. Set to None so the DAL will skip those.\n new_item_name = bleach.clean(request.values.get(\"item_update_new_name\")) or None\n desc = bleach.clean(request.values.get(\"item_update_description\")) or None\n\n raw_pic_data = request.files[\"item_update_pic\"] or None\n pic_data = None\n try:\n if raw_pic_data:\n pic_data = validate_picture(raw_pic_data)\n except InvalidPictureError:\n return bad_request_error()\n\n new_parent_name = bleach.clean(request.values.get(\"item_update_new_parent\")) or None\n\n new_cat = dal.get_category_by_name(new_parent_name)\n new_cat_id = new_cat.cat_id if new_cat else None\n\n # New values look good. All checks passed.\n generate_nonce()\n dal.update_item(old_item.item_id, name=new_item_name, description=desc,\n pic_id=old_item.pic_id, pic=pic_data, cat_id=new_cat_id)\n redirect_cat = new_parent_name or old_parent_name\n redirect_item = new_item_name or old_item_name\n return redirect(\"/catalog/{}/{}/\".format(redirect_cat, redirect_item))\n\n\n\n@app.route('/logout')\ndef logout():\n \"\"\" Terminates all session data for the user, including login credentials. \"\"\"\n session.clear()\n return redirect(\"/\")\n\n@app.route('/login')\ndef showLogin():\n \"\"\" Creates a nonce and displays the page listing available login options. \"\"\"\n return render(\"login.html\")\n\n@app.route('/gconnect', methods=[\"POST\"])\ndef gconnect():\n \"\"\"\n Receives and processes Google Plus login requests.\n Most of this code was adapted from the intro course on authentication.\n \"\"\"\n if not check_nonce(request.args.get('state')):\n return create_err_response(\"Invalid state parameter\", 401)\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object\n scope = \"email profile\"\n oauth_flow = flow_from_clientsecrets(PATH_TO_CLIENT_SECRETS, scope=scope)\n oauth_flow.redirect_uri = \"postmessage\"\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n return create_err_response(\"Failed to upgrade the authorization code\", 401)\n\n # Check that the access token is valid\n access_token = credentials.access_token\n url = (\"https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s\" % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, \"GET\")[1])\n if result.get(\"error\") is not None:\n return create_err_response(result.get(\"error\"), 500)\n\n # Verify that the access token is used for the intended user\n gplus_id = credentials.id_token[\"sub\"]\n if result[\"user_id\"] != gplus_id:\n return create_err_response(\"Token's user ID doesn't match given user ID\", 401)\n\n # Check to see if user is already logged in\n stored_credentials = session.get(SessionKeys.CREDENTIALS)\n stored_gplus_id = session.get(SessionKeys.GPLUS_ID)\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n return create_err_response(\"Current user is already connected\", 200)\n\n # Store the access token in the session for later use.\n save_to_session(SessionKeys.CREDENTIALS, credentials)\n session[SessionKeys.GPLUS_ID] = gplus_id\n\n # Get user info from Google\n try:\n answer = None\n data = None\n\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {\"access_token\": credentials.access_token, \"alt\":\"json\"}\n answer = requests.get(userinfo_url, params=params)\n data = json.loads(answer.text)\n\n username = data[\"email\"]\n auth_source = AuthSource.GOOGLE_PLUS\n auth_source_id = data[\"id\"]\n # Everything checks out. Create a new user record if this is the\n # first time they've logged in, then set them as active in the session.\n user = dal.get_or_create_user(username, auth_source, auth_source_id)\n set_active_user(user)\n except Exception:\n return create_err_response(\n \"Received invalid user data\\n\\nanswer.text: {}\\n\\ndata: {}\".format(\n answer.text, data), 401)\n generate_nonce()\n return \"Authentication successful\"\n\n","sub_path":"catalog/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":14093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"324109542","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\nimport numpy as np\nimport random as rnd\nimport math\n\n# coding: utf-8\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\n\n##############################################################################\n# Shader\n##############################################################################\n# Checks for GL posted errors after appropriate calls\ndef printOpenGLError():\n err = glGetError()\n if (err != GL_NO_ERROR):\n print('GLERROR: ', gluErrorString(err))\n # sys.exit()\n\n\nclass Shader:\n def __init__(self, vsFileName, fsFileName, gsFileName = None, attrib_list = None):\n self.initShader(vsFileName, fsFileName, gsFileName, attrib_list)\n\n def initShader(self, vs_file, fs_file, gs_file, attrib_list):\n fileVS = open(vs_file, \"r\")\n fileFS = open(fs_file, \"r\")\n\n\n\n # create program\n self.program = glCreateProgram()\n print('create program')\n printOpenGLError()\n\n # vertex shader\n print('compile vertex shader...')\n self.vs = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(self.vs, fileVS.read())\n glCompileShader(self.vs)\n glAttachShader(self.program, self.vs)\n printOpenGLError()\n\n if gs_file is not None:\n fileGS = open(gs_file, \"r\")\n print('compile geometry shader...')\n self.gs = glCreateShader(GL_GEOMETRY_SHADER)\n glShaderSource(self.gs, fileGS.read())\n glCompileShader(self.gs)\n glAttachShader(self.program, self.gs)\n printOpenGLError()\n\n # fragment shader\n print('compile fragment shader...')\n self.fs = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.fs, fileFS.read())\n glCompileShader(self.fs)\n glAttachShader(self.program, self.fs)\n printOpenGLError()\n\n if attrib_list is not None :\n for i in range(len(attrib_list)) :\n glBindAttribLocation(self.program, 10+i, attrib_list[i])\n\n print('link...')\n glLinkProgram(self.program)\n printOpenGLError()\n\n def begin(self):\n if glUseProgram(self.program):\n printOpenGLError()\n\n def end(self):\n glUseProgram(0)\n\nclass ComputeShader:\n def __init__(self, cpsFileName):\n self.initShader(cpsFileName)\n\n def initShader(self, cpsFileName):\n fileCPS = open(cpsFileName, \"r\")\n\n # create program\n self.program = glCreateProgram()\n print('create program for compute shader')\n printOpenGLError()\n\n # compute shader load, compile and attach\n print('compile compute shader...')\n self.cps = glCreateShader(GL_COMPUTE_SHADER)\n glShaderSource(self.cps, fileCPS.read())\n glCompileShader(self.cps)\n glAttachShader(self.program, self.cps)\n printOpenGLError()\n\n print('link compute shader...')\n glLinkProgram(self.program)\n printOpenGLError()\n\n def setupShaderStorageBufferObject(self, ssbo, index, bufferData):\n glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo)\n glBufferData(GL_SHADER_STORAGE_BUFFER, bufferData, GL_STATIC_DRAW)\n glBindBufferBase(GL_SHADER_STORAGE_BUFFER, index, ssbo)\n\n def begin(self):\n if glUseProgram(self.program):\n printOpenGLError()\n\n def end(self):\n glUseProgram(0)","sub_path":"ExCode/Lab11_GeometryShader/Shader.py","file_name":"Shader.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"376844614","text":"import os\n\nimport nibabel as nib\nimport numpy as np\n\nfrom deepreg.data.mr_us.loader import DataLoader\nfrom deepreg.data.mr_us.util import get_label_indices\n\n\nclass NiftiFileLoader:\n def __init__(self, dir_name, load_into_memory):\n file_names, file_path_names = get_fnames_in_dir(dir_name)\n self.load_into_memory = load_into_memory\n self.loaded_data = None\n self.name_to_path_dict = None\n if self.load_into_memory:\n self.loaded_data = dict(zip(file_names,\n [load_from_nifti(x) for x in file_path_names]))\n\n else:\n self.name_to_path_dict = dict(zip(file_names, file_path_names))\n\n def get_data(self, file_name):\n if self.load_into_memory:\n return self.loaded_data[file_name]\n else:\n return load_from_nifti(self.name_to_path_dict[file_name])\n\n def get_sorted_keys(self):\n if self.load_into_memory:\n return sorted(self.loaded_data.keys())\n else:\n return sorted(self.name_to_path_dict.keys())\n\n def get_image_shape(self):\n keys = self.get_sorted_keys()\n data = self.get_data(keys[0])\n return data.shape[:3] # label.shape might be [dim1, dim2, dim3, num_labels]\n\n\nclass NiftiDataLoader(DataLoader):\n def __init__(self,\n moving_image_dir, fixed_image_dir, moving_label_dir, fixed_label_dir,\n load_into_memory, sample_label, tfrecord_dir):\n super(NiftiDataLoader, self).__init__()\n loader_moving_image = NiftiFileLoader(moving_image_dir, load_into_memory)\n loader_fixed_image = NiftiFileLoader(fixed_image_dir, load_into_memory)\n loader_moving_label = NiftiFileLoader(moving_label_dir, load_into_memory)\n loader_fixed_label = NiftiFileLoader(fixed_label_dir, load_into_memory)\n\n # sanity check\n # filenames should be the same\n assert loader_moving_image.get_sorted_keys() == loader_fixed_image.get_sorted_keys()\n assert loader_moving_image.get_sorted_keys() == loader_moving_label.get_sorted_keys()\n assert loader_moving_image.get_sorted_keys() == loader_fixed_label.get_sorted_keys()\n\n moving_image_shape = loader_moving_image.get_image_shape()\n fixed_image_shape = loader_fixed_image.get_image_shape()\n moving_label_shape = loader_moving_label.get_image_shape()\n fixed_label_shape = loader_fixed_label.get_image_shape()\n\n # sanity check\n # image and label have same shape\n assert moving_image_shape == moving_label_shape\n assert fixed_image_shape == fixed_label_shape\n\n # save variables\n self.file_names = loader_moving_image.get_sorted_keys()\n\n self.loader_moving_image = loader_moving_image\n self.loader_fixed_image = loader_fixed_image\n self.loader_moving_label = loader_moving_label\n self.loader_fixed_label = loader_fixed_label\n\n self.moving_image_shape = moving_image_shape # [dim1, dim2, dim3]\n self.fixed_image_shape = fixed_image_shape # [dim1, dim2, dim3]\n self.sample_label = sample_label\n self.num_images = len(self.file_names)\n self.num_indices = 2\n self.tfrecord_dir = tfrecord_dir\n\n def get_generator(self):\n for image_index, image_key in enumerate(self.file_names):\n moving_image = self.loader_moving_image.get_data(image_key) / 255.\n fixed_image = self.loader_fixed_image.get_data(image_key) / 255.\n moving_label = self.loader_moving_label.get_data(image_key)\n fixed_label = self.loader_fixed_label.get_data(image_key)\n\n if len(moving_label.shape) == 4: # multiple labels\n label_indices = get_label_indices(moving_label.shape[3], self.sample_label)\n for label_index in label_indices:\n indices = np.asarray([image_index, label_index], dtype=np.float32)\n yield (moving_image, fixed_image, moving_label[..., label_index], indices), \\\n fixed_label[..., label_index]\n elif len(moving_label.shape) == 3: # only one label\n label_index = 0\n indices = np.asarray([image_index, label_index], dtype=np.float32)\n yield (moving_image, fixed_image, moving_label, indices), fixed_label\n else:\n raise ValueError(\"Unknown moving_label.shape\")\n\n\ndef get_fnames_in_dir(dir_name):\n assert dir_name is not None\n file_names = os.listdir(dir_name)\n file_names.sort()\n file_path_names = [os.path.join(dir_name, x) for x in file_names]\n return file_names, file_path_names\n\n\ndef load_from_nifti(x):\n return np.asarray(nib.load(x).dataobj, dtype=np.float32)\n","sub_path":"deepreg/data/mr_us/loader_nifti.py","file_name":"loader_nifti.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447429324","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 23 14:12:52 2019\n\n@author: fed\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n if not root or root == p or root == q:\n return root\n \n l = self.lowestCommonAncestor(root.left, p, q)\n r = self.lowestCommonAncestor(root.right, p, q)\n if l and r:\n return root\n elif l:\n return l\n elif r:\n return r\n else:\n return\n \n ","sub_path":"236_lowestCommonAncestor.py","file_name":"236_lowestCommonAncestor.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291889573","text":"import os\nfrom glob import glob\nimport argparse\nimport numpy as np\nfrom shutil import rmtree, copy2\nfrom utils import get_files\n\ndef create_dir(path):\n if os.path.exists(path):\n rmtree(path)\n os.makedirs(path)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('dataset')\nparser.add_argument('--train_size', type=int)\nparser.add_argument('--test_size', type=int)\nparser.add_argument('-i', '--input_fname_pattern', default='*.jpg')\nargs = parser.parse_args()\n\ndata_dir = './data'\n\nprint('* Loading data')\ndata = get_files(os.path.join(data_dir, args.dataset),\n args.input_fname_pattern)\nnp.random.shuffle(data)\n\ntrain_dir = os.path.join(data_dir, args.dataset, 'train')\ncreate_dir(train_dir)\ntest_dir = os.path.join(data_dir, args.dataset, 'test')\ncreate_dir(test_dir)\n\nif args.train_size == None and args.test_size == None:\n args.train_size = int(len(data) * 0.8)\n args.test_size = len(data) - args.train_size\nelif args.train_size == None:\n args.train_size = len(data) - args.test_size\nelif args.test_size == None:\n args.test_size = len(data) - args.train_size\n\nprint('* Copying training set')\nfor f in data[:args.train_size]:\n copy2(f, train_dir)\nprint('* Copying test set')\nfor f in data[args.train_size:args.train_size+args.test_size]:\n copy2(f, test_dir)\nprint('* Finish')\n","sub_path":"partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3951372","text":"import numpy as np\nimport json\nimport pickle\nimport os\nfrom model import trainer\nfrom model.prediction import Scorer\n\n# Years to predict the variables for (all one-year ahead)\nPERIODS = [{'train_years': (1995, Y-1),\n 'predict_year': Y} for Y in np.arange(2011, 2018, 1)]\n\n# Feature combinations to evaluate\nFSETS = ['ETH', 'SSA',\n 'ETH+AR', 'SSA+AR',\n 'ETH+AR+DEST', 'SSA+AR+DEST',\n 'ETH+AR+DEST+TLAG', 'SSA+AR+DEST+TLAG']\n\nMODELFILE = os.path.join(os.path.dirname(__file__), \"model\", \"models.joblib\")\nCONFIGURATION = os.path.join(os.path.dirname(__file__), \"configuration.json\")\nCLUSTERS = os.path.join(os.path.dirname(__file__), \"groupings.json\")\n\n\n# Configuration of data sources\nwith open(CONFIGURATION, 'rt') as infile:\n config = json.load(infile)\n\n# Get the groups (for labels and inference)\nwith open(CLUSTERS, 'rt') as infile:\n GROUPING = json.load(infile)\n\n# Indicators that we use for Scenarios and their relative improvements\nINDICATORS = {i['code']: i['direction-improvement']\n for grp in GROUPING['clusters'] for i in grp['indicators']}\n\n# Themes that are defined in the groupings\nTHEMES = [t['sub-theme'] for t in GROUPING['clusters']]\n\n# User facing labels\nLABELS = ['worse', 'poor', 'average', 'good', 'best']\n\nTARGETS = [t['targets'] for t in trainer.TARGETS]\n\nresults = []\nfor p in PERIODS:\n\n # Update training parameters (period and feature sets)\n # and (re)train the models\n trainer.PERIODS = p\n trainer.MODELFILE = MODELFILE\n trainer.CONFIGURATION = CONFIGURATION\n trainer.execute()\n\n # Generate the forecasts (one year ahead)\n pred_api = Scorer(MODELFILE, config, GROUPING, p['predict_year'] - 1)\n predictions = pred_api.predict(p['predict_year'])\n pred = {v['target']: v['forecast'] for v in predictions}\n\n # Get the ground truth\n truth = {t: pred_api.features.indicator_value(t, \"ETH\", p['predict_year']) for t in TARGETS}\n\n # A naive baseline is the current year's value\n baseline = {t: pred_api.features.indicator_value(t, \"ETH\", p['predict_year'] - 1) for t in TARGETS}\n\n # Feature set used\n fset = {t['targets']: t['features'] for t in trainer.TARGETS}\n\n for t in TARGETS:\n res = {'predict_year': p['predict_year'],\n 'target': t,\n 'fset': fset[t],\n 'forecast': pred[t],\n 'truth': truth[t],\n 'baseline': baseline[t]}\n\n results.append(res)\n\nwith open(\"result.pkl\", 'wb') as outfile:\n pickle.dump(results, outfile)\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332185423","text":"import collections\nimport re\n\nimport numpy\n\nn = '(-?\\d*)'\npat = re.compile(f'p=<{n},{n},{n}>, v=<{n},{n},{n}>, a=<{n},{n},{n}>')\nparticles = numpy.array([list(map(int, pat.match(l).groups())) for l in open('20.txt')])\nremaining = set(range(len(particles)))\n\n\ndef collisions():\n positions = collections.defaultdict(set)\n for i, p in enumerate(particles):\n if i in remaining:\n positions[tuple(p[:3])].add(i)\n return [ids for ids in positions.values() if len(ids) > 1]\n\n\nparticles[:, 3:6] += particles[:, 6:9]\n\nk = 100\nfor _ in range(k):\n remaining -= {i for ids in collisions() for i in ids}\n particles[:, :6] += particles[:, -6:]\n\nprint(len(remaining))","sub_path":"17/20_numpy.py","file_name":"20_numpy.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556011064","text":"import pygame, sys\nfrom PIL import Image, ImageOps, ImageFont, ImageDraw\nimport textwrap\n\nTEMPLATE_NAME = \"Instagram Template.png\"\nMASK_NAME = \"Instagram Template_ProfilePicture.png\"\n\nBOLD_FONT = \"boldFont.ttf\"\nREG_FONT = \"font.ttf\"\nLIGHT_FONT = \"lightFont.ttf\"\n\nUSERNAME = \"Sample User\"\nLIKES = 72\nCAPTION = \"Here's my great picture of a flower that I found\"\nCOMMENTS = {\n 'Sample User2: ': 'Wow! Love this picture! Comments are also multi-line.',\n 'Sample User38921034: ': \"This is an extremely drawn out and long comment to demonstrate that comments are also multi-line. Isn't that neat?\"\n}\n\ndef pillowScreen():\n im = Image.open(TEMPLATE_NAME)\n boldFont = ImageFont.truetype(BOLD_FONT, 32)\n font = ImageFont.truetype(REG_FONT, 20)\n lightFont = ImageFont.truetype(LIGHT_FONT, 20)\n\n # Mask the profile picture\n unmasked = Image.open(\"SamplePP.png\")\n mask = Image.open(MASK_NAME).convert('L')\n masked = ImageOps.fit(unmasked, mask.size, centering=(0.5, 0.5))\n masked.putalpha(mask)\n masked.save('MaskedPP.png')\n\n # Crop the image\n image = Image.open(\"SampleImage.png\")\n image = ImageOps.fit(image, (539, 541))\n image.save('CroppedImage.png')\n\n # Add all the text\n imDraw = ImageDraw.Draw(im)\n # Username\n imDraw.text((80, 20), USERNAME, (0, 0, 0), font=boldFont)\n # Likes\n imDraw.text((18, 670), str(LIKES) + \" Likes\", (0, 0, 0), font=font)\n # Caption\n yPos = 700\n for line in textwrap.wrap(CAPTION, 55):\n imDraw.text((18, yPos), line, (0, 0, 0), font=lightFont)\n yPos += 30\n # Comments\n for username, comment in COMMENTS.items():\n imDraw.text((18, yPos), username, (0, 0, 0), font=font)\n multiLineComment = textwrap.wrap(comment, 55 - len(username))\n for line in range(len(multiLineComment)):\n if line == 0:\n imDraw.text((18, yPos), int((len(username)) * 1.75) * \" \" + multiLineComment[line], (0, 0, 0), font=lightFont)\n else:\n imDraw.text((18, yPos), multiLineComment[line], (0, 0, 0), font=lightFont)\n yPos += 30\n \n im.save('TemplateText.png')\n\n\ndef pygameScreen():\n screen = pygame.display.set_mode(size=(539, 1000))\n\n # Background\n template = pygame.image.load(TEMPLATE_NAME)\n templateRect = template.get_rect()\n\n # Text\n text = pygame.image.load(\"TemplateText.png\")\n textRect = text.get_rect()\n\n # Profile Picture\n pp = pygame.image.load(\"MaskedPP.png\")\n ppRect = template.get_rect()\n ppRect.x = 2\n\n # Image\n image = pygame.image.load(\"CroppedImage.png\")\n imageRect = image.get_rect()\n imageRect.y = 73\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n screen.blit(template, templateRect)\n screen.blit(text, textRect)\n screen.blit(pp, ppRect)\n screen.blit(image, imageRect)\n \n pygame.display.flip()\n\npillowScreen()\npygameScreen()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378646157","text":"'''\n'''\n\nimport os\nimport sqlite3\nimport time, datetime\nimport logging\nfrom decimal import Decimal\n\nfrom matplotlib import pyplot\n\nfrom lib.db.CSVFile import CSVFile\nfrom lib.db.sqlite import shortcuts\n\nclass OperatorsTable():\n \n COLUMNS = [\"id\", \"short\", \"name\", \"info\", \"profiles\", \"georef\"]\n \n SQL_CREATE = \"create table if not exists operators (id integer primary key autoincrement, short text, name text, info text, profiles text, georel text)\"\n SQL_ID_BY_SHORT = \"select id from operators where short=?\"\n SQL_INSERT = \"insert into operators (short, name, info, profiles, georel) values (?,?,?,?,?)\"\n SQL_UPDATE = \"update operators set short=?, name=?, info=?, profiles=?, georel=? where id=?\"\n SQL_DELETE = \"delete from operators where id=?\"\n SQL_GET = \"select short, name, info, profiles, georel from operators where id=?\"\n SQL_MAX_ID = \"select id from operators order by id asc limit 1\"\n \n def __init__(self, db):\n self.db = db\n \n def create(self):\n self.db.execute(self.SQL_CREATE)\n self.db.commit()\n \n def id_by_short(self, short):\n dbc = self.db.execute(self.SQL_ID_BY_SHORT, (short,))\n return dbc.fetchone()\n \n def exists(self, short):\n return True if self.id_by_short(short) else False\n \n def insert(self, operator):\n ''' insert operator if not exist, return id '''\n _id = self.id_by_short(operator[\"short\"])\n if _id:\n logging.warn(\"OperatorsTable::insert(): <%s> already exists\" % operator[\"short\"])\n return _id \n self.db.execute(self.SQL_INSERT_OPERATOR, (operator[\"short\"], operator[\"name\"], operator[\"info\"], operator[\"profiles\"], operator[\"georel\"]))\n self.db.commit()\n dbc = self.db.execute(self.SQL_OPERATOR_MAX_ID)\n return dbc.fetchone()\n \n def update(self, operator):\n self.db.execute(self.SQL_UPDATE_OPERATOR, (operator[\"short\"], operator[\"name\"], operator[\"info\"], operator[\"profiles\"], operator[\"georel\"], operator[\"id\"]))\n self.db.commit()\n \n def delete(self, operator):\n if isinstance(operator, int):\n _id = operator\n else:\n _id = operator[\"id\"]\n self.db.execute(self.SQL_DELETE_OPERATOR, (_id,))\n self.db.commit()\n \n def get(self, _id):\n dbc = self.db.execute(self.SQL_GET, (_id,))\n res = dbc.fetchone()\n if res:\n return dict(zip(self.COLUMNS, res))\n else:\n return None\n\n\nclass ProfilesTable():\n \n COLUMNS = [\"id\", \"publisher\", \"symbol\", \"name\", \"info\"]\n \n SQL_CREATE = \"create table if not exists profiles (id integer primary key autoincrement, publisher text, symbol text, name text, info text)\"\n SQL_ID_BY_SYMBOL = \"select id from profiles where symbol=?\"\n SQL_MAX_ID = \"select id from profiles order by id asc limit 1\" \n SQL_INSERT = \"insert into profiles (publisher, symbol, name, info) values (?,?,?,?)\"\n SQL_UPDATE = \"update profiles set publisher=?, symbol=?, name=?, info=? where id=?\"\n SQL_DELETE = \"delete from profiles where id=?\"\n SQL_GET = \"select id, publisher, symbol, name, info from profiles where id=?\"\n \n def __init__(self, db):\n self.db = db\n \n def create(self):\n self.db.execute(self.SQL_CREATE)\n self.db.commit()\n \n def id_by_symbol(self, symbol):\n dbc = self.db.execute(self.SQL_ID_BY_SYMBOL, (symbol,))\n return dbc.fetchone()\n \n def exists(self, symbol):\n return True if self.id_by_symbol(symbol) else False\n \n def insert(self, profile):\n _id = self.id_by_short(profile[\"symbol\"])\n if _id:\n logging.warn(\"ProfilesTable::insert(): <%s> already exists\" % profile[\"symbol\"])\n return _id\n self.db.execute(self.SQL_INSERT, (profile[\"publisher\"], profile[\"symbol\"], profile[\"name\"], profile[\"info\"]))\n self.db.commit()\n dbc = self.db.execute(self.SQL_MAX_ID)\n return dbc.fetchone()\n \n def update(self, profile):\n self.db.execute(self.SQL_UPDATE, (profile[\"publisher\"], profile[\"symbol\"], profile[\"name\"], profile[\"info\"], profile[\"id\"]))\n self.db.commit()\n \n def delete(self, profile):\n if isinstance(profile, int):\n _id = profile\n else:\n _id = profile[\"id\"]\n self.db.execute(self.SQL_DELETE, (_id,))\n self.db.commit()\n \n def get(self, _id):\n dbc = self.db.execute(self.SQL_GET, (_id,))\n res = dbc.fetchone()\n if res:\n return dict(zip(self.COLUMNS, res))\n else:\n return None\n\n\nclass LoadProfileTable():\n \n SQL_CREATE = \"create table if not exists %s (dt datetime primary key not null unique, mean float, sdev float)\"\n SQL_ADD_OPERATOR = \"alter table %s add column %s float\"\n \n def __init__(self, db):\n self.db = db\n \n \n def create(self, symbol):\n self.db.execute(self.SQL_CREATE % symbol)\n self.db.commit()\n\n def add_operator(self, operator):\n self.db.execute(self.SQL_ADD_OPERATOR,)\n \n\nclass LoadProfilesDB():\n \n DEFAULT_PATH = \"/home/uzanto/workspace/Daten/Strom/Verbrauch/Lastprofile/loadprofiles.sqlite\"\n DEFAULT_PROFILES = \"default_profiles.csv\"\n \n def __init__(self, **args):\n self.path = args.get(\"path\", self.DEFAULT_PATH)\n self.db = sqlite3.connect(self.path)\n self.db.text_factory = str\n \n self.operators = OperatorsTable(self.db)\n self.profiles = ProfilesTable(self.db)\n self.loadprofiles = dict()\n \n def add_profile(self, profile):\n '''\n '''\n if self.profiles.exists(profile[\"symbol\"]):\n logging.warn(\"LoadProfilesDB::add_profile(): <%s> already exists\" % profile[\"symbol\"])\n return\n self.profiles.insert(profile)\n self.loadprofiles[profile[\"symbol\"]] = LoadProfileTable(self.db, profile[\"symbol\"])\n self.loadprofiles[profile[\"symbol\"]].create()\n \n def create_database(self):\n path = os.path.join(os.path.dirname(__file__), self.DEFAULT_PROFILES)\n profiles_csv = CSVFile(path=path, separator=\";\")\n \n self.profiles.create()\n for rowid in range(profiles_csv.rows()):\n profile = profiles_csv.get_row_as_dict(rowid)\n self.add_profile(profile)\n \n profiles_csv.close()\n \n self.operators.create()\n \n def import_loadprofile(self, **args):\n path = args.get(\"path\", \"\")\n \n if not(os.path.exists(path)):\n logging.warn(\"LoadProfilesDB::import_loadprofile(): path does not exist - nothing imported!\")\n return\n \n operator = args.get(\"operator\", None)\n profile = args.get(\"profile\", None)\n skip_lines = args.get(\"skip_lines\", 1)\n has_header = args.get(\"has_header\", True)\n column_sep = args.get(\"separator\", \";\")\n columns = args.get(\"columns\", dict(date=0, time=1, load=2))\n date_format = args.get(\"date_format\", \"%d.%m.%Y\")\n time_format = args.get(\"time_format\", \"%H:%M:%S\")\n load_format = args.get(\"number_format\", \"de\")\n load_unit = args.get(\"load_unit\", \"kW\")\n interval = Decimal(args.get(\"interval\", 0.25)) #interval [h]\n \n csv = CSVFile(path=path, skip_lines=skip_lines, separator=column_sep)\n \n #create operator if not exists\n self.operators.insert(dict(short=operator))\n \n #create profile if not exists\n self.add_profile(dict(symbol=profile))\n \n #create publisher in profile if not exist\n \n if not(shortcuts.has_column(self.db, profile, publisher)):\n self.db.execute(self.SQL_LOADPROFILE_ADD_OPERATOR % (profile, publisher))\n self.db.commit()\n \n for rowid in range(csv.rows()):\n #get ...\n date_string = csv.cell(columns[\"date\"], rowid)\n time_string = csv.cell(columns[\"time\"], rowid)\n load_string = csv.cell(columns[\"load\"], rowid)\n #format ...\n dt = datetime.datetime(*(time.strptime(date_string, date_format)[0:3] + time.strptime(time_string, time_format)[3:5]))\n if load_format == \"de\":\n load = Decimal(str(load_string).replace(\",\", \".\"))\n elif load_format == \"en\":\n load = Decimal(load_string)\n else:\n load = Decimal(load_string)\n #convert ...\n if load_unit == \"kW\":\n pass\n elif load_unit == \"W\":\n load = load / 1000\n elif load_unit == \"kWh\":\n load = load / interval\n elif load_unit == \"Wh\":\n load = load / interval / 1000\n else:\n load = load #error\n #insert ...\n load = str(load)\n dbc = self.db.execute(\"select rowid from %s where dt=?\" % profile, (dt,))\n res = dbc.fetchone()\n if res:\n try:\n self.db.execute(\"update %s set %s=? where dt=?\" % (profile, publisher), (load, dt))\n except sqlite3.InterfaceError:\n logging.error(\"update loadprofile: <%s>\" % str(load)) \n else:\n try:\n self.db.execute(\"insert into %s (dt, %s) values (?,?)\" % (profile, publisher), (dt, load))\n except sqlite3.InterfaceError:\n logging.error(\"insert loadprofile: <%s>\" % str(load))\n \n self.db.commit()\n csv.close()\n \n def get_loadprofile(self, profile, dt1, dt2, **args):\n publisher = args.get(\"publisher\", \"eon_Mitte\")\n dbc = self.db.execute(\"select %s from %s where dt <= ? and dt >= ?\" % (publisher, profile), (dt2, dt1))\n res = dbc.fetchall()\n return res\n\n \n\ndef test():\n dbpath = \"test_loadprofiles.sqlite\"\n tpath = \"/home/uzanto/workspace/Daten/Strom/Verbrauch/Lastprofile/eon Mitte/slp_profile_eon_mitte_ag/G0 - Gewerbe allgemein.csv\"\n \n #if os.path.exists(dbpath):\n # os.remove(dbpath)\n #db = LoadProfilesDB(path=dbpath)\n #db.create_database()\n #db.insert_operator(\"eon_Mitte\", \"eon Mitte GmbH\", None, None, None)\n \n '''\n db.import_loadprofile(path=tpath, \n publisher=\"eon_Mitte\",\n profile=\"G0\",\n skip_lines = 17,\n date_format = \"%d.%m.%Y\",\n time_format = \"%H:%M:%S\",\n number_format = \"de\"\n )\n \n print \"done.\"\n '''\n db = LoadProfilesDB()\n lp = db.get_loadprofile(\"G0\", \"2011-01-01 00:00:00\", \"2011-01-30 00:00:00\", publisher=\"DB_Energie__E\")\n pyplot.plot(lp)\n pyplot.show()\n \ndef test__operator_exists():\n db = LoadProfilesDB()\n assert db.operator_exists(\"eon_Mitte\")\n \nif __name__ == '__main__':\n logging.basicConfig(filename=\"test_loadprofiles.log\", filemode=\"w\", level=logging.DEBUG, format=\"%(asctime)s - %(levelname)s - %(message)s\")\n test()\n #test__operator_exists()","sub_path":"plx/sci/electricity/demand/loadprofiles/slp.py","file_name":"slp.py","file_ext":"py","file_size_in_byte":11326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389040912","text":"\"\"\"\nFunctionalities to interact with gene sets defined in an assembly and gene-annotation (such as protein-coding).\n\"\"\"\n\nimport gzip\nimport numpy as np\nimport os\nfrom typing import Iterable, List, Union\nimport pandas\nimport pathlib\nimport urllib.error\nimport urllib.request\n\nfrom sfaira.consts.directories import CACHE_DIR_GENOMES\n\nKEY_SYMBOL = \"gene_name\"\nKEY_ID = \"gene_id\"\nKEY_TYPE = \"gene_biotype\"\nVALUE_GTF_GENE = \"gene\"\nKEY_GTF_REGION_TYPE = 2\nKEY_GTF_REGION_DETAIL_FIELD = 8\nIDX_GTF_REGION_DETAIL_FIELD_ID = 0\nIDX_GTF_REGION_DETAIL_FIELD_SYMBOL = 2\nIDX_GTF_REGION_DETAIL_FIELD_TYPE = 4\n\n\nclass GtfInterface:\n\n def __init__(self, assembly: str):\n self.assembly = assembly\n\n @property\n def cache_dir(self):\n \"\"\"\n The cache dir is in a cache directory in the sfaira installation that is excempt from git versioning.\n \"\"\"\n cache_dir_path = pathlib.Path(CACHE_DIR_GENOMES)\n cache_dir_path.mkdir(parents=True, exist_ok=True)\n return CACHE_DIR_GENOMES\n\n @property\n def cache_fn(self):\n return os.path.join(self.cache_dir, self.assembly + \".csv\")\n\n @property\n def release(self) -> str:\n return self.assembly.split(\".\")[-1]\n\n @property\n def organism(self) -> str:\n return self.assembly.split(\".\")[0].lower()\n\n @property\n def url_ensembl_ftp(self):\n return f\"ftp://ftp.ensembl.org/pub/release-{self.release}/gtf/{self.organism}/{self.assembly}.gtf.gz\"\n\n def download_gtf_ensembl(self):\n \"\"\"\n Download .gtf file from ensembl FTP server and turn into reduced, gene-centric cache .csv.\n \"\"\"\n temp_file = os.path.join(self.cache_dir, self.assembly + \".gtf.gz\")\n try:\n _ = urllib.request.urlretrieve(url=self.url_ensembl_ftp, filename=temp_file)\n except urllib.error.URLError as e:\n raise ValueError(f\"Could not download gtf from {self.url_ensembl_ftp} with urllib.error.URLError: {e}, \"\n f\"check if assembly name '{self.assembly}' corresponds to an actual assembly.\")\n with gzip.open(temp_file) as f:\n tab = pandas.read_csv(f, sep=\"\\t\", comment=\"#\", header=None)\n os.remove(temp_file) # Delete temporary file .gtf.gz.\n tab = tab.loc[tab[KEY_GTF_REGION_TYPE].values == VALUE_GTF_GENE, :]\n conversion_tab = pandas.DataFrame({\n KEY_ID: [\n x.split(\";\")[IDX_GTF_REGION_DETAIL_FIELD_ID].split(\" \")[-1].strip(\"\\\"\")\n for x in tab[KEY_GTF_REGION_DETAIL_FIELD].values],\n KEY_SYMBOL: [\n x.split(\";\")[IDX_GTF_REGION_DETAIL_FIELD_SYMBOL].split(\" \")[-1].strip(\"\\\"\")\n for x in tab[KEY_GTF_REGION_DETAIL_FIELD].values],\n KEY_TYPE: [\n x.split(\";\")[IDX_GTF_REGION_DETAIL_FIELD_TYPE].split(\" \")[-1].strip(\"\\\"\")\n for x in tab[KEY_GTF_REGION_DETAIL_FIELD].values],\n }).sort_values(\"gene_id\")\n conversion_tab.to_csv(self.cache_fn)\n\n @property\n def cache(self) -> pandas.DataFrame:\n if not os.path.exists(self.cache_fn):\n self.download_gtf_ensembl()\n return pandas.read_csv(self.cache_fn)\n\n\nclass GenomeContainer:\n \"\"\"\n Container class for a genome annotation for a specific release.\n\n This class can be used to translate between symbols and ENSEMBL IDs for a specific assembly, to store specific gene\n subsets of an assembly, and to subselect genes by biotypes in an assembly.\n \"\"\"\n\n genome_tab: pandas.DataFrame\n assembly: str\n\n def __init__(\n self,\n assembly: str = None,\n ):\n \"\"\"\n Are you not sure which assembly to use?\n\n - You could use the newest one for example, check the ENSEMBL site regularly for updates:\n http://ftp.ensembl.org/pub/\n - You could use one used by a specific aligner, the assemblies used by 10x cellranger are described here\n for example: https://support.10xgenomics.com/single-cell-gene-expression/software/release-notes/build\n\n :param assembly: The full name of the genome assembly, e.g. Homo_sapiens.GRCh38.102.\n \"\"\"\n if not isinstance(assembly, str):\n raise ValueError(f\"supplied assembly {assembly} was not a string\")\n self.assembly = assembly\n self.gtfi = GtfInterface(assembly=self.assembly)\n self.load_genome()\n\n @property\n def organism(self):\n return self.gtfi.organism\n\n def load_genome(self):\n self.genome_tab = self.gtfi.cache\n\n def subset(\n self,\n biotype: Union[None, str, List[str]] = None,\n symbols: Union[None, str, List[str]] = None,\n ensg: Union[None, str, List[str]] = None,\n ):\n \"\"\"\n Subset by gene biotype or to gene list defined by identifiers (symbol or ensemble ID).\n\n Will subset by multiple factors if more than one parameter is not None.\n\n :param biotype: Gene biotype(s) of gene(s) to subset genome to. Elements have to appear in genome.\n Separate in string via \",\" if choosing multiple or supply as list of string.\n :param symbols: Gene symbol(s) of gene(s) to subset genome to. Elements have to appear in genome.\n Separate in string via \",\" if choosing multiple or supply as list of string.\n :param ensg: Ensemble gene ID(s) of gene(s) to subset genome to. Elements have to appear in genome.\n Separate in string via \",\" if choosing multiple or supply as list of string.\n \"\"\"\n subset = np.ones((self.n_var,), \"int\") == 1\n if biotype is not None:\n if isinstance(biotype, list):\n pass\n elif isinstance(biotype, str):\n biotype = biotype.split(\",\")\n else:\n raise ValueError(f\"Supply biotype as string, see also function annotation. Supplied {biotype}.\")\n self.__validate_types(x=biotype)\n subset = np.logical_and(\n subset,\n [x in biotype for x in self.genome_tab[KEY_TYPE].values]\n )\n if symbols is not None:\n if isinstance(symbols, list):\n pass\n elif isinstance(symbols, str):\n symbols = symbols.split(\",\")\n else:\n raise ValueError(f\"Supply symbols as string, see also function annotation. Supplied {symbols}.\")\n self.__validate_symbols(x=symbols)\n subset = np.logical_and(\n subset,\n [x in symbols for x in self.genome_tab[KEY_SYMBOL].values]\n )\n if ensg is not None:\n if isinstance(ensg, list):\n pass\n elif isinstance(ensg, str):\n ensg = ensg.split(\",\")\n else:\n raise ValueError(f\"Supply ensg as string, see also function annotation. Supplied {ensg}.\")\n self.__validate_ensembl(x=ensg)\n subset = np.logical_and(\n subset,\n [x in ensg for x in self.genome_tab[KEY_ID].values]\n )\n self.genome_tab = self.genome_tab.loc[subset, :].copy()\n\n @property\n def symbols(self) -> List[str]:\n \"\"\"\n List of symbols of genes in genome container.\n \"\"\"\n return self.genome_tab[KEY_SYMBOL].values.tolist()\n\n @property\n def ensembl(self) -> List[str]:\n \"\"\"\n List of ENSEMBL IDs of genes in genome container.\n \"\"\"\n return self.genome_tab[KEY_ID].values.tolist()\n\n @property\n def biotype(self) -> List[str]:\n \"\"\"\n List of biotypes of genes in genome container.\n \"\"\"\n return self.genome_tab[KEY_TYPE].values.tolist()\n\n def __validate_ensembl(self, x: List[str], enforce_captitalization: bool = True):\n if enforce_captitalization:\n not_found = [y for y in x if y not in self.ensembl]\n else:\n ensembl_upper = [y.upper() for y in self.ensembl]\n not_found = [y for y in x if y.upper() not in ensembl_upper]\n if len(not_found) > 0:\n raise ValueError(f\"Could not find ENSEMBL ID: {not_found}\")\n\n def __validate_symbols(self, x: List[str], enforce_captitalization: bool = True):\n if enforce_captitalization:\n not_found = [y for y in x if y not in self.symbols]\n else:\n symbols_upper = [y.upper() for y in self.symbols]\n not_found = [y for y in x if y.upper() not in symbols_upper]\n if len(not_found) > 0:\n raise ValueError(f\"Could not find symbol: {not_found}\")\n\n def __validate_types(self, x: List[str]):\n not_found = [y for y in x if y not in self.biotype]\n if len(not_found) > 0:\n raise ValueError(f\"Could not find type: {not_found}\")\n\n @property\n def n_var(self) -> int:\n \"\"\"\n Number of genes in genome container.\n \"\"\"\n return self.genome_tab.shape[0]\n\n @property\n def symbol_to_id_dict(self):\n \"\"\"\n Dictionary-formatted map of gene symbols to ENSEMBL IDs.\n \"\"\"\n return dict(zip(self.genome_tab[KEY_SYMBOL].values.tolist(), self.genome_tab[KEY_ID].values.tolist()))\n\n @property\n def id_to_symbols_dict(self):\n \"\"\"\n Dictionary-formatted map of ENSEMBL IDs to gene symbols.\n \"\"\"\n return dict(zip(self.genome_tab[KEY_ID].values.tolist(), self.genome_tab[KEY_SYMBOL].values.tolist()))\n\n def translate_symbols_to_id(self, x: Union[str, Iterable[str]]) -> Union[str, List[str]]:\n \"\"\"\n Translate gene symbols to ENSEMBL IDs.\n\n :param x: Symbol(s) to translate.\n :return: ENSEMBL IDs\n \"\"\"\n if isinstance(x, str):\n x = [x]\n self.__validate_symbols(x=x, enforce_captitalization=False)\n map_dict = dict([(k.upper(), v) for k, v in self.symbol_to_id_dict.items()])\n y = [map_dict[xx.upper()] for xx in x]\n if len(y) == 1:\n y = y[0]\n return y\n\n def translate_id_to_symbols(self, x: Union[str, Iterable[str]]) -> Union[str, List[str]]:\n \"\"\"\n Translate ENSEMBL IDs to gene symbols.\n\n :param x: ENSEMBL ID(s) to translate.\n :return: Gene symbols.\n \"\"\"\n if isinstance(x, str):\n x = [x]\n self.__validate_ensembl(x=x, enforce_captitalization=False)\n map_dict = dict([(k.upper(), v) for k, v in self.id_to_symbols_dict.items()])\n y = [map_dict[xx.upper()] for xx in x]\n if len(y) == 1:\n y = y[0]\n return y\n\n @property\n def strippednames_to_id_dict(self):\n return dict(zip([i.split(\".\")[0] for i in self.genome_tab[KEY_SYMBOL]],\n self.genome_tab[KEY_ID].values.tolist()))\n\n\nclass CustomFeatureContainer(GenomeContainer):\n\n def __init__(\n self,\n genome_tab: pandas.DataFrame,\n organism: str,\n ):\n \"\"\"\n\n :param genome_tab: Table characterising feature space. Must be a data frame with 3 columns:\n\n - \"gene_name\": Name of features.\n - \"gene_id\": ID of features, can be the same as values of \"gene_name\"\n - \"gene_biotype\": Types of features, can be arbitrary like \"embedding\"\n \"\"\"\n self.assembly = \"custom\"\n assert len(genome_tab.columns) == 3\n assert KEY_SYMBOL in genome_tab.columns\n assert KEY_ID in genome_tab.columns\n assert KEY_TYPE in genome_tab.columns\n self.genome_tab = genome_tab\n self._organism = organism\n\n @property\n def organism(self):\n return self._organism\n","sub_path":"sfaira/versions/genomes/genomes.py","file_name":"genomes.py","file_ext":"py","file_size_in_byte":11606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"24227542","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\n\n\nif __name__ == '__main__':\n rospy.init_node('robot_cleaner', anonymous=True)\n velocity_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n vel_msg = Twist()\n while not rospy.is_shutdown():\n vel_msg.linear.x = 10\n velocity_publisher.publish(vel_msg)\n #Testing our function\n #move()\n rospy.spin()\n","sub_path":"test/scripts/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373557919","text":"#!python3\n\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport matplotlib.dates as mdates\n\nwith open('pmdata.txt', 'r' ) as f:\n\ttxtstr = f.readlines()\n\txlab = []\n\txs = []\n\ty = []\n\tfor i,line in enumerate(txtstr):\n\t\tif i==0:\n\t\t\txlab.append(line[5:19])\n\t\telif i != len(txtstr)-1:\n\t\t\t\txlab.append(\" \")\n\t\telse:\n\t\t\txlab.append(line[5:19])\n\t\txs.append(datetime.strptime(line[:19], '%Y-%m-%d %H:%M:%S'))\n\t\tindex = line.find(',')\n\t\ty.append(int(line[19:index]))\n\n# 配置横坐标\n#plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))\n#plt.gca().xaxis.set_major_locator(mdates.DayLocator())\n# 自动旋转日期标记\n#plt.gcf().autofmt_xdate(rotation=90) \nplt.xticks(xs,xlab,rotation=15)\n\n#plt.plot(xs, y, 'o-')\nplt.plot(xs, y)\nplt.show()\n","sub_path":"pm_hourly/dataImg.py","file_name":"dataImg.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109377581","text":"from django.db.models import Q\r\n\r\nfrom toolshareapp.models import Tool\r\nfrom toolshareapp.models import ToolStatus\r\nfrom toolshareapp.models import User\r\nfrom toolshareapp.services.notification_service import NotificationService\r\nfrom toolshareapp.services.email_service import EmailService\r\nfrom toolshareapp.services.reservation_service import ReservationService\r\n\r\nnotificationService = NotificationService()\r\nemailService = EmailService()\r\nreservationService = ReservationService()\r\n\r\nclass ToolHasReservationsError(Exception):\r\n def __init__(self, msg):\r\n self.msg = msg\r\n\r\nclass ToolService:\r\n \r\n def send_email_if_notification_enabled(self, template, subject, to, reservation): \r\n if to.email_notifications:\r\n emailService.send_email(template, subject, to.email, reservation) \r\n\r\n def register(self, tool):\r\n tool.status = ToolStatus.Available\r\n tool.active = True\r\n \r\n if tool.shed is not None and tool.shed.coordinator != tool.owner:\r\n self.send_email_if_notification_enabled('inclusion_request',\r\n 'New inclusion request for your shed',\r\n tool.shed.coordinator,\r\n tool)\r\n notificationService.create(\"/toolshare/shed/requests/\",\r\n tool.shed.coordinator.id,\r\n \"There is a new tool inclusion request for your shed\")\r\n \r\n tool.requested_shed = tool.shed\r\n tool.shed = None\r\n \r\n tool.save()\r\n\r\n def update(self, tool, user_id): \r\n orig = Tool.objects.get(id = tool.id)\r\n \r\n if orig.shed_id != tool.shed_id:\r\n \r\n if reservationService.get_reservations_by_tool(tool.id): # if tool can change location\r\n raise ToolHasReservationsError(\"Cannot change this tool's location. It has pending reservations.\")\r\n \r\n if tool.shed_id and tool.shed.coordinator_id != user_id:\r\n tool.requested_shed_id = tool.shed_id\r\n tool.shed_id = orig.shed_id\r\n \r\n self.send_email_if_notification_enabled('inclusion_request',\r\n 'New inclusion request for your shed',\r\n tool.shed.coordinator,\r\n tool)\r\n notificationService.create(\"/toolshare/shed/requests/\",\r\n tool.shed.coordinator.id,\r\n \"There is a new tool inclusion request for your shed\")\r\n \r\n else:\r\n tool.requested_shed_id = None \r\n \r\n tool.save()\r\n\r\n def deregister(self, tool_id):\r\n tool = Tool.objects.get(id = tool_id)\r\n tool.deactivate()\r\n tool.save()\r\n reservations = reservationService.get_reservations_by_tool(tool_id)\r\n \r\n for reservation in reservations:\r\n reservationService.cancel_lend(reservation.id, reservation.tool.owner_id)\r\n notificationService.create(\"/toolshare/reservation/\", reservation.user_id,\r\n \"The \" + tool.name + \" you had a reservation for, has been deregistered.\")\r\n self.send_email_if_notification_enabled('cancel_lend',\r\n \"The \" + tool.name + \" you had a reservation for, has been deregistered.\",\r\n reservation.user,\r\n reservation)\r\n \r\n def withhold(self, tool_id):\r\n tool = Tool.objects.get(id = tool_id) \r\n tool.make_unavailable() \r\n tool.save()\r\n \r\n reservations = reservationService.get_reservations_by_tool(tool_id)\r\n \r\n for reservation in reservations:\r\n reservationService.cancel_lend(reservation.id, reservation.tool.owner_id)\r\n \r\n def release(self, tool_id):\r\n tool = Tool.objects.get(id = tool_id)\r\n tool.make_available()\r\n tool.save()\r\n\r\n def get_tools_owned_by_user(self, user_id):\r\n tools = Tool.objects.filter(owner_id = user_id).filter(active = True)\r\n return tools\r\n \r\n def get_tools_in_users_shed(self, search_term, user_id):\r\n tools = Tool.objects.filter(shed__coordinator_id = user_id).filter(active = True)\r\n \r\n if search_term:\r\n tools = tools.filter(\r\n Q(name__contains = search_term) |\r\n Q(description__contains = search_term) |\r\n \r\n Q(categories__description__contains = search_term) |\r\n \r\n Q(owner__name__contains = search_term) |\r\n Q(owner__lastname__contains = search_term) | \r\n Q(owner__street_name__contains = search_term)\r\n )\r\n \r\n return tools\r\n \r\n def get_all_tools(self):\r\n return Tool.objects.all()\r\n \r\n def get_tools(self, search_term, user_id):\r\n user = User.objects.get(id=user_id)\r\n tools = Tool.objects.filter(active = True).filter(owner__zip_code = user.zip_code).exclude(status = ToolStatus.Unavailable)\r\n \r\n if search_term:\r\n tools = tools.filter(\r\n Q(name__contains = search_term) |\r\n Q(description__contains = search_term) |\r\n \r\n Q(categories__description__contains = search_term) |\r\n \r\n Q(owner__name__contains = search_term) |\r\n Q(owner__lastname__contains = search_term) | \r\n Q(owner__street_name__contains = search_term)\r\n )\r\n \r\n return tools\r\n \r\n def get_tool(self, tool_id):\r\n return Tool.objects.get(id = tool_id)\r\n ","sub_path":"toolshareapp/services/tool_service.py","file_name":"tool_service.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222095848","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# __author__ = \"Mr.chen\"\n# Date: 2018/6/7\n\nimport json\nimport matplotlib.pyplot as plt\n\nnew_dict = {}\n\nwith open('json.txt', 'r', encoding='utf-8') as json_f:\n f = json_f.read()\n new_dict = json.loads(f)\n print(\"加载入文件完成...\")\n\n\n# print(new_dict['dataPoints'])\n#\n# x = range(len(new_dict))\n# plt.plot(x, new_dict['dataPoints'])\n\ndict2 = [] # 横坐标\nfor i in range(0, 1445, 5):\n dict2.append(i)\n\n\n\n# plt.plot([1, 3, 2, 4])\nplt.plot(dict2, new_dict['dataPoints'])\n# plt.plot([1,3,3,4], [1,4,9,16])\nplt.ylabel('some numbers')\nplt.show()\n\n# http://www.datahub.top/coursedetail/?id=29","sub_path":"作业测试.py","file_name":"作业测试.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634198707","text":"import codecs\nimport os\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"numpy>=1.18.0\",\n \"scipy>=1.4.1\",\n \"scikit-learn>=0.22.2\",\n \"setuptools\",\n \"tqdm\",\n \"simanneal\",\n]\n\n\ndef read(rel_path):\n here = os.path.abspath(os.path.dirname(__file__))\n with codecs.open(os.path.join(here, rel_path), \"r\", encoding=\"utf-8\") as fp:\n return fp.read()\n\n\ndef get_version(rel_path):\n for line in read(rel_path).splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(\"Unable to find version string.\")\n\n\nsetup(\n name=\"uret\",\n version=get_version(\"uret/__init__.py\"),\n description=\"Toolkit for generic adversarial machine learning evaluations.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Kevin Eykholt\",\n author_email=\"kheykholt@ibm.com\",\n maintainer=\"Kevin Eykholt\",\n maintainer_email=\"kheykholt@ibm.com\",\n url=\"https://github.com/IBM/URET\",\n license=\"MIT\",\n install_requires=install_requires,\n extras_require={\n \"all\": [\"lief\", \"pandas\", \"tensorflow\", \"keras\", \"h5py\", \"keras-rl\"],\n \"binary\": [\"lief\"],\n \"rl\": [\"tensorflow\", \"keras\", \"h5py\", \"keras-rl\"],\n \"non-framework\": [\"pandas\"]\n },\n packages=find_packages(),\n include_package_data=True,\n)\n","sub_path":"URET/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576821807","text":"#!/usr/bin/env python\n# encoding: utf-8\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport subprocess\nimport logging\nimport json\n\n\ndef grobid_quantities(sentence, a, endpoint):\n \"\"\"\n a = annotations\n \"\"\"\n \"\"\"Pass sentence text to Grobid server on port 8080 for measurement parsing\n\n Args:\n sentence (str): Sentence to be parsed\n a (Annotations object): object containing relevant CoreNLP output\n\n Returns:\n dict: object containing Grobid output\n \"\"\"\n\n # $ needs to be escaped when passed via subprocess\n sentence = re.sub(\"\\$\", \"\\\\$\", sentence)\n sentence = re.sub(\"\\\"\", '\\\\\"', sentence)\n sentence = re.sub(\"%\", '%25', sentence)\n sentence = re.sub(\"`\", \"'\", sentence)\n sentence = re.sub(\"'\", '\\\\\"', sentence)\n\n if endpoint[len(endpoint)-1:] == \"/\":\n endpoint = endpoint[:len(endpoint)-1]\n\n response = None\n try:\n response = subprocess.check_output('curl -X POST -d text=\"' + sentence + '\" ' + endpoint + '/processQuantityText', shell=True)\n except:\n logging.error(\"Invalid subprocess call for: %s\" % sentence)\n quantities = {}\n try:\n quantities = json.loads(response)\n except ValueError as e:\n print('No Grobid response for: %s' % sentence)\n logging.warning('No Grobid response for: %s' % sentence)\n return \"\"\n\n #Add token index for num, unit, quantified if available\n if isinstance(quantities, dict):\n for q in quantities[\"measurements\"]:\n\n key = \"\"\n if q[\"type\"] == \"value\":\n key = \"quantity\"\n # if Grobid doesn't parse interval correctly, sometimes only 'QuantityLeast' or 'QuantityMost' is available\n if q[\"type\"] == \"interval\":\n if \"quantityLeast\" in q:\n key = \"quantityLeast\"\n elif \"QuantityMost\" in q:\n key = \"quantityMost\"\n else:\n return {}\n\n if q[\"type\"] == \"listc\":\n return {}\n\n if key == \"\":\n logging.error('Unknown Grobid key resulting from parse of: %s' % sentence)\n print(\"Unknown Grobid key resulting from parse of: %s\" % sentence)\n\n # Grobid doesn't pick up negatives\n if sentence[sentence.find(q[key][\"rawValue\"]) - 1] == \"-\":\n q[key][\"parsedValue\"] = float(\"-\" + str(q[key][\"parsedValue\"]))\n q[key][\"rawValue\"] = \"-\" + str(q[key][\"rawValue\"])\n q[key][\"offsetStart\"] -= 1\n\n if q[key][\"offsetStart\"] in a.tok_start:\n q[key][\"tokenIndex\"] = a.tok_start[q[key][\"offsetStart\"]]\n else:\n print(\"Not finding token index for Grobid Quantity value in CoreNLP output. Sentence: %s\" % sentence)\n logging.error(\"Not finding token index for Grobid Quantity value in CoreNLP output. Sentence: %s\" % sentence)\n return {}\n\n if \"rawUnit\" in q[key]:\n q[key][\"rawUnit\"][\"after\"] = a.lookup[q[key][\"tokenIndex\"]][\"after\"]\n q[key][\"rawUnit\"][\"tokenIndices\"] = []\n\n if q[key][\"rawUnit\"][\"offsetStart\"] in a.tok_start: \n q[key][\"rawUnit\"][\"tokenIndices\"].append(str(a.tok_start[q[key][\"rawUnit\"][\"offsetStart\"]])) \n if q[key][\"rawUnit\"][\"offsetEnd\"] in a.tok_end: \n q[key][\"rawUnit\"][\"tokenIndices\"].append(str(a.tok_end[q[key][\"rawUnit\"][\"offsetEnd\"]]))\n \n if q[key][\"rawUnit\"][\"offsetStart\"] == q[key][\"offsetEnd\"]: \n q[key][\"rawUnit\"][\"tokenIndices\"].append(str(q[key][\"tokenIndex\"])) \n q[key][\"rawUnit\"][\"tokenIndices\"] = list(set(q[key][\"rawUnit\"][\"tokenIndices\"]))\n\n if \"quantified\" in q:\n \n # often times Grobid with return a phrase where normalized name is in middle. In this case, \"offsetStart\" identifies the wrong token \n add_to_offset = 0\n normalized_idx, words = None, None\n if \" \" in q[\"quantified\"][\"rawName\"]:\n words = q[\"quantified\"][\"rawName\"].split(\" \")\n for i,w in enumerate(words):\n if not q[\"quantified\"][\"normalizedName\"] in w:\n add_to_offset += (len(w) + 1) # +1 for space that was split on\n else:\n break\n\n q[\"quantified\"][\"offsetStart\"] += add_to_offset\n\n if q[\"quantified\"][\"offsetStart\"] in a.tok_start:\n q[\"quantified\"][\"tokenIndex\"] = a.tok_start[q[\"quantified\"][\"offsetStart\"]]\n else:\n logging.warning(\"Not finding token index for Grobid quantified word in CoreNLP output. Sentence: %s\" %(sentence))\n #hyphen causing issue - Grobid doesn't treat hyphenated clause as one word \n # example error sentence: \"Macroscopic examination of the CNS revealed micrencephaly with a whole-brain weight of 84 grams.\"\n \n return quantities\n","sub_path":"marve/grobid.py","file_name":"grobid.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80445798","text":"from pyppeteer import launch\nfrom random import randint\nfrom time import sleep\n\n\nclass SiteNavigator:\n def __init__(self):\n self.browser = None\n self.page = None\n self.timed_out = False\n\n @staticmethod\n async def create_navi():\n navi = SiteNavigator()\n navi.browser = await launch(headless=True, autoClose=True)\n navi.page = await navi.browser.newPage()\n await navi.page.setViewport({'width': 1200, 'height': 630})\n await navi.page.goto('https://waifulabs.com/generate')\n return navi\n\n async def click_by_index(self, index):\n girls = await self.find_all_girls()\n await girls[index - 1].click()\n\n async def exit(self):\n await self.page.close()\n await self.browser.close()\n\n async def undo(self):\n await self.page.click(\".sc-bdvvtL:nth-child(1)\")\n\n async def keep(self):\n await self.page.click(\".sc-bdvvtL:nth-child(2)\")\n\n async def rand(self):\n random_index = randint(1, 15)\n await self.page.click(f\".waifu-grid > div:nth-child({random_index})\")\n\n async def refresh(self):\n await (await self.find_all_girls())[15].click()\n\n async def wait_for_not_load_screen(self):\n while await self.page.querySelector(\".loading-callout\"):\n sleep(0.01)\n\n async def wait_for_final_image(self):\n while await self.page.querySelector(\".waifu-preview-loading\"):\n sleep(0.01)\n\n async def find_all_girls(self):\n return await self.page.querySelectorAll(\".waifu-grid > div\")\n\n async def browser_timeout(self): #this is to differentiate between a timeout and a user exiting.\n self.timed_out = True\n await self.exit()","sub_path":"site_navigator.py","file_name":"site_navigator.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"152702093","text":"# -*- coding: utf-8 -*-\n# Used by usfm_cleanup.py.\n# Substitutions in this file convert straight double quotes to curly double quotes.\n# To be used in languages where the single quote (apostrophe) is a word-forming character.\n# These substitutions are applied after some regular expressions replacements have been made.\n\n# subs is a list of tuples to be used for string substitutions.\nsubs = [\n# Convert open quote marks\n\t('\"“', '““'),\n\t('“\"', '““'),\n# Convert closing quote marks\n\t('\"”', \"””\"),\n\t('”\"', \"””\"),\n]\n\nimport re\nquote0_re = re.compile(r'\\s(\"+)[\\w\\']+(\"+)\\s') # a single word in quotes\nquote1_re = re.compile(r' (\"+)[\\w\\']') # SPACE quotes word => open quotes\nquote2_re = re.compile(r': (\"+)') # colon SPACE quotes => open quotes\nquote3_re = re.compile(r'[,;](\"+) ') # comma/semicolon quotes SPACE => close quotes\nquote4_re = re.compile(r'[\\.!\\?](\"+)') # period/bang/question quotes => close quotes\nquote5_re = re.compile(r'[\\w\\'](\"+) *\\n') # word quotes EOL => close quotes\nopentrans = str.maketrans('\"', '“')\nclosetrans = str.maketrans('\"', '”')\n\n# Changes straight quotes to curly quotes where context suggests with very high confidence.\ndef promoteQuotes(str):\n pos = 0\n snippet = quote0_re.search(str, pos)\n while snippet:\n # if len(snippet.group(1)) == 1 and len(snippet.group(1)) == 1: # TEMPORARY!!!!!!\n if snippet.group(1) == snippet.group(2) and len(snippet.group(1)) == 1:\n (i,j) = (snippet.start()+1, snippet.end()-1)\n str = str[0:i] + snippet.group(1).translate(opentrans) + str[i+1:j-1] + snippet.group(2).translate(closetrans) + str[j:]\n pos = snippet.end()\n snippet = quote0_re.search(str, pos)\n\n snippet = quote1_re.search(str)\n while snippet:\n (i,j) = (snippet.start()+1, snippet.end()-1)\n str = str[0:i] + snippet.group(1).translate(opentrans) + str[j:]\n snippet = quote1_re.search(str)\n\n snippet = quote2_re.search(str)\n while snippet:\n (i,j) = (snippet.start()+2, snippet.end())\n str = str[0:i] + snippet.group(1).translate(opentrans) + str[j:]\n snippet = quote2_re.search(str)\n\n snippet = quote3_re.search(str)\n while snippet:\n (i,j) = (snippet.start()+1, snippet.end()-1)\n str = str[0:i] + snippet.group(1).translate(closetrans) + str[j:]\n snippet = quote3_re.search(str)\n\n snippet = quote4_re.search(str)\n while snippet:\n (i,j) = (snippet.start()+1, snippet.end())\n str = str[0:i] + snippet.group(1).translate(closetrans) + str[j:]\n snippet = quote4_re.search(str)\n\n snippet = quote5_re.search(str)\n while snippet:\n (i,j) = (snippet.start()+1, snippet.start() + 1 + len(snippet.group(1)))\n str = str[0:i] + snippet.group(1).translate(closetrans) + str[j:]\n snippet = quote5_re.search(str)\n\n for pair in subs:\n str = str.replace(pair[0], pair[1])\n return str\n","sub_path":"usfm/doublequotes.py","file_name":"doublequotes.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88521988","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport subprocess\nimport argparse\n\ndef process(threads, rootPath, lang1, lang2, docThreshold, bleuThreshold):\n #print(\"rootPath\", rootPath)\n #print(\"lang1\", lang1)\n #print(\"lang2\", lang2)\n #print(\"__file__\", __file__)\n\n scriptDir = os.path.dirname(os.path.realpath(__file__))\n print(\"scriptDir\", scriptDir)\n\n # /home/hieu/workspace/github/paracrawl/bleualign-cpp/build/bleualign_cpp\n # --text1 en.extracted.gz\n # --text2 zh.extracted.gz\n # --text2translated zh.extracted.translated.gz\n # --output_dir t\n # --matches en-zh.matches --matches_threshold .1\n\n cmd = 'ls ' + rootPath + \\\n ' | parallel -v --no-notice -v -j ' + str(threads) + ' \"' + scriptDir + '/../modules/bleualign-cpp/build/bleualign_cpp ' + \\\n ' --text1 ' + rootPath + '/{}/' + lang1 + '.extracted' + \\\n ' --text2 ' + rootPath + '/{}/' + lang2 + '.extracted' + \\\n ' --text2translated ' + rootPath + '/{}/' + lang2 + '.extracted.translated' + \\\n ' --output-dir ' + rootPath + '/{}/' + \\\n ' --matches ' + rootPath + '/{}/' + lang1 + '-' + lang2 + '.matches' + \\\n ' --doc-threshold ' + str(docThreshold) + \\\n ' --bleu-threshold ' + str(bleuThreshold) + \\\n '\"'\n systemCheck(cmd)\n\ndef systemCheck(cmd):\n sys.stderr.write(\"Executing:\" + cmd + \"\\n\")\n sys.stderr.flush()\n\n subprocess.check_call(cmd, shell=True)\n\n###################################################################################################\ndef main(argv):\n print(\"starting...\")\n\n parser = argparse.ArgumentParser(\n description='Sentence align of all domains in a directory.')\n parser.add_argument('--threads', dest='threads', default=12,\n help='number of concurrent threads')\n parser.add_argument('--root-path', dest='rootPath',\n help='Root path of all domains', required=True)\n parser.add_argument('--lang1', dest='lang1',\n help='First language to parse', required=True)\n parser.add_argument('--lang2', dest='lang2',\n help='Second language to parse', required=True)\n parser.add_argument('--doc-threshold', dest='docThreshold',\n help='Document threshold', required=True)\n parser.add_argument('--bleu-threshold', dest='bleuThreshold',\n help='Sentence-level BLEU threshold', required=True)\n args = parser.parse_args()\n\n process(args.threads, args.rootPath, args.lang1, args.lang2, args.docThreshold, args.bleuThreshold)\n\n print(\"finished\")\n\nif __name__ == '__main__':\n\n main(sys.argv)\n\n\n\n","sub_path":"parallel/bleualign-parallel.py","file_name":"bleualign-parallel.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"320343408","text":"import self_awareness as sf\n\ndebug_mode = \"off\"\n\nobfuscated_tag = \"<obfuscated>\"\n\n\ndef get_pattern_list(*args):\n # pattern_s = \"5248631709\"\n # patt_list = []\n # for i in range(len(pattern_s)):\n # patt_list.append(int(pattern_s[i]))\n\n return [5, 2, 4, 8, 6, 3, 1, 7, 0,\n 9] # means that each 10-latter chunk of text will be shuffled like: 0123456789 --> 5248630719\n\n\nsf.tests(get_pattern_list)\n\n\n# shuffles the input string according to the pattern\n# The input string must be of the same length as the pattern.\ndef obfuscate_chunk(*args):\n i_str = \"\"\n o_str = \"\"\n pattern_list = \"\"\n try:\n i_str = args[0]\n pattern_list = args[1]\n if len(i_str) != len(pattern_list):\n if debug_mode != \"off\":\n print(\"CAUTION: i_str and pattern_list lengths must be same. Pattern of wrong length?\")\n o_str = \"\"\n else:\n o_str = \"\"\n for i in range(len(i_str)):\n o_str += i_str[pattern_list[i]]\n except Exception as e:\n o_str = \"\"\n if debug_mode != \"off\":\n print(\"obfuscate_chunk : \", str(e))\n\n return o_str\n\n\nsf.tests(obfuscate_chunk)\n\n\n# de-obfuscates the input string according to the pattern.\n# The input string must be of the same length as the pattern.\ndef recover_chunk(*args):\n try:\n i_str = args[0]\n pattern_list = args[1]\n if len(i_str) != len(pattern_list):\n if debug_mode != \"off\":\n print(\"CAUTION: i_str and pattern_list lengths must be same. Pattern of wrong length?\")\n o_str = \"\"\n o_str_list = [\"\"] * len(i_str)\n for i in range(len(i_str)):\n o_str_list[pattern_list[i]] = i_str[i]\n o_str = \"\"\n for i in range(len(o_str_list)):\n o_str += o_str_list[i]\n\n except:\n o_str = \"\"\n return o_str\n\n\nsf.tests(recover_chunk)\n\n\ndef concatenate_strings_list(*args):\n res = \"\"\n try:\n input_l = args[0]\n if isinstance(input_l, list):\n united_str = \"\"\n for i in range(len(input_l)):\n united_str += input_l[i]\n res = united_str\n except:\n res = \"\"\n return res\n\n\nsf.tests(concatenate_strings_list)\n\n\n# def split_string_into_list_by_template(i_str, tamplate_l, padding_len):\n# o_str_l = []\n# end_c = 0\n# for i in range(0, len(tamplate_l)): \n# start_c = end_c\n# end_c += len(tamplate_l[i]) \n# o_str_l.append(i_str[start_c : end_c])\n# o_str_l.append(i_str[end_c : end_c+padding_len])\n# return o_str_l \n# sf.tests(split_string_into_list_by_template)\n\n\n# the modes are \"obfuscate\" or \"recover\" \ndef shuffle_str(*args):\n try:\n i_str, pattern_list, mode = args[0:4]\n pat_len = len(pattern_list)\n o_str = \"\"\n for i in range(int(len(i_str) / pat_len)):\n test_str = i_str[i * pat_len: (i + 1) * pat_len]\n if mode == \"obfuscate\":\n shuf_str = obfuscate_chunk(test_str, pattern_list)\n else:\n shuf_str = recover_chunk(test_str, pattern_list)\n o_str += shuf_str\n except:\n o_str = \"\"\n return o_str\n\n\nsf.tests(shuffle_str)\n\n\n# Obfuscates or de-obfuscates a text stored in a list of strings.\n# The modes are \"obfuscate\" or \"recover\"\ndef shuffle_str_list(*args):\n try:\n str_l, pattern_list, mode = args\n united_str = concatenate_strings_list(str_l)\n pat_len = len(pattern_list)\n padding_len = pat_len - len(united_str) % pat_len\n united_str += \" \" * padding_len\n united_str_shuffled = shuffle_str(united_str, pattern_list, mode)\n # print(pat_len)\n res = united_str_shuffled\n except:\n res = []\n return res\n\n\nsf.tests(shuffle_str_list)\n\n\ndef check_if_already_obfusc(*args):\n res = False\n try:\n str_l = args[0]\n if len(str_l) > 1: # obfuscated files must have the tag about it, on a separate line\n test_str = str_l[-1].strip()\n if test_str == obfuscated_tag:\n res = True\n else:\n res = False\n except:\n res = False\n return res\n\n\nsf.tests(check_if_already_obfusc)\n","sub_path":"text_shuffle.py","file_name":"text_shuffle.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281754684","text":"import csv\nimport json\nfrom textblob import TextBlob\nfrom nltk.corpus import wordnet\nfrom features import build_vocab\nfrom googletrans import Translator\nfrom spellchecker import SpellChecker\nfrom multiprocessing import Process, Queue\n\nneu_features = \"neu_features.txt\"\nneu_feature_file = open(neu_features, \"r\")\n\nneg_features = \"neg_features.txt\"\nneg_feature_file = open(neg_features, \"r\")\n\npos_features = \"pos_features.txt\"\npos_feature_file = open(pos_features, \"r\")\n\nneu_set = set()\nneu_set_c = set()\n\nneg_set = set()\nneg_set_c = set()\n\npos_set = set()\npos_set_c = set()\npos_set_cc = set()\n\nfor neu_word in neu_feature_file:\n\n neu_set.add(neu_word)\n neu_set_c.add(neu_word)\n\nfor neg_word in neg_feature_file:\n\n neg_set.add(neg_word)\n neg_set_c.add(neg_word)\n\nfor pos_word in pos_feature_file:\n\n pos_set.add(pos_word)\n pos_set_c.add(pos_word)\n pos_set_cc.add(pos_word)\n\n\npos_set.difference_update(neg_set)\npos_set.difference_update(neu_set)\n\nneg_set.difference_update(pos_set_c)\nneg_set.difference_update(neu_set)\n\nneu_set.difference_update(neg_set_c)\nneu_set.difference_update(pos_set_cc)\n\npos_set.discard('EMOPOS')\npos_set.discard('EMONEG')\npos_set.discard('EMONEU')\n\nneg_set.discard('EMOPOS')\nneg_set.discard('EMONEG')\nneg_set.discard('EMONEU')\n\nneu_set.discard('EMOPOS')\nneu_set.discard('EMONEG')\nneu_set.discard('EMONEU')\n\npos_set.add('EMOPOS')\nneg_set.add('EMONEG')\nneu_set.add('EMONEU')\n\nprint(\"Training the Model, Sit Back and Relax....\")\n\nrow = []\ni = 1\n\nwith open('train.json') as json_data:\n\n d = json.load(json_data)\n\ncolumn_headers = ['pos_score', 'neu_score', 'neg_score', 'sub_score', 'value']\n\nwith open('data.csv', 'w') as csvFile:\n\n writer = csv.writer(csvFile)\n writer.writerow(column_headers)\n\n for each in d:\n\n print(\"..\", i, \"..\")\n\n sentence = each[\"text\"]\n senti_value = each[\"sentiment\"]\n word_id = each[\"lang_tagged_text\"]\n word_id = word_id.replace(\" \", \"\")\n word_id = word_id.lower()\n \n tokens = build_vocab(sentence)\n\n pos_score = 0\n neu_score = 0\n neg_score = 0\n sub_score = 0\n \n for token in tokens:\n \n if token in pos_set:\n\n pos_score += 30\n continue\n \n if token in neu_set:\n\n neu_score += 20\n continue\n\n if token in neg_set:\n \n neg_score += 10\n continue\n \n else:\n\n start = word_id.find(token)\n\n if(start == -1):\n continue\n\n else:\n \n end = start + len(token)\n check = word_id[end+1]\n\n if(check == \"e\"):\n\n spell = SpellChecker()\n\n if(spell.known(token)):\n\n testimonial = TextBlob(token)\n polarity = testimonial.sentiment.polarity\n theory = testimonial.sentiment.subjectivity\n\n if(polarity >= .2):\n pos_score += 30\n sub_score += theory\n\n elif(polarity > -.2 and polarity < .2):\n neu_score += 20\n sub_score += theory\n\n else:\n neg_score += 10\n sub_score += theory\n\n else:\n\n new_token = spell.correction(token)\n\n testimonial = TextBlob(new_token)\n polarity = testimonial.sentiment.polarity\n theory = testimonial.sentiment.subjectivity\n\n if(polarity >= .2):\n pos_score += 30\n sub_score += theory\n\n elif(polarity > -.2 and polarity < .2):\n neu_score += 20\n sub_score += theory\n\n else:\n neg_score += 10\n sub_score += theory\n \n elif(check == 'h'):\n \n translator = Translator()\n trans_token = translator.translate(token, dest='en', src='hi')\n trans_token = trans_token.text\n\n spell = SpellChecker()\n trans_token = spell.correction(trans_token)\n\n testimonial = TextBlob(trans_token)\n polarity = testimonial.sentiment.polarity\n theory = testimonial.sentiment.subjectivity\n\n if(polarity >= .2):\n pos_score += 30\n sub_score += theory\n\n elif(polarity > -.2 and polarity < .2):\n pos_score += 20\n sub_score += theory\n\n else:\n neg_score += 10\n sub_score += theory\n\n else:\n continue\n \n row.append(pos_score)\n row.append(neu_score)\n row.append(neg_score)\n row.append(sub_score)\n row.append(senti_value)\n \n writer.writerow(row)\n row = []\n i += 1\n\nprint(\"Done!!! Now run accuracy.py\")","sub_path":"four_feat/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"607344803","text":"import os\nimport argparse\nimport torch\nfrom torch import nn\nfrom nets.resnet_pre_activation import *\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport time\nfrom utils.trainer import AverageMeter\nimport math\nfrom utils.measure import *\nfrom collections import OrderedDict\n\n\nparser = argparse.ArgumentParser(description='Network Slimming---Prune')\nparser.add_argument('--dataset', type=str, default='cifar10',\n help='training dataset (default:cifar10)'\n )\nparser.add_argument('--num-classes', type=int, default=10,\n help='humber of classes'\n )\nparser.add_argument(\n '--validate-batch-size',\n type=int,\n default=1000,\n metavar='N',\n help='batch size of validation (default:1000)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disable cuda training'\n )\nparser.add_argument('--prune-rate', type=float, default=0.5,\n help='sparse rate (default:0.5)'\n )\nparser.add_argument('--model', default='', type=str, metavar='PATH',\n help='path to model needed to be pruned'\n )\nparser.add_argument('--save', default='./', type=str, metavar='PATH',\n help='path to save pruned model (default:./)'\n )\nparser.add_argument(\n '--gpu-devices', type=str, default='0',\n help='decide which gpu devices to use.For exmaple:0,1')\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\nprint('Using gpu devices:{}'.format(os.environ['CUDA_VISIBLE_DEVICES']))\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nif args.save:\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n\nkwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}\nif args.dataset == 'cifar10':\n normalize = transforms.Normalize(\n mean=[0.491, 0.482, 0.447],\n std=[0.247, 0.243, 0.262])\n validate_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n ),\n batch_size=args.validate_batch_size, shuffle=False, **kwargs\n )\nelif args.dataset == 'cifar100':\n normalize = transforms.Normalize(\n mean=[0.507, 0.487, 0.441],\n std=[0.267, 0.256, 0.276])\n # normalize = transforms.Normalize((.5,.5,.5),(.5,.5,.5))\n validate_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100('./data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n ),\n batch_size=args.validate_batch_size, shuffle=False, **kwargs\n )\n\n\ndef validate(model, criterion):\n model.eval()\n\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n time_stamp = time.time()\n for data, label in validate_loader:\n if args.cuda:\n data, label = data.cuda(), label.cuda()\n data, label = Variable(data, volatile=True), Variable(label)\n output = model(data)\n loss = criterion(output, label)\n\n prec1, prec5 = accuracy(output.data, label.data, topk=(1, 5))\n losses.update(loss.data[0], data.size(0))\n top1.update(prec1[0], data.size(0))\n top5.update(prec5[0], data.size(0))\n\n print('\\n Validate_Avg_Loss: {loss.avg:.4f},\\t'\n 'Top1_Acc: {top1.avg:.2f}%\\t'\n 'Top5_Acc: {top5.avg:.2f}%\\t'\n 'Time: {:.4f}s\\n'\n .format(\n time.time() - time_stamp,\n loss=losses,\n top1=top1,\n top5=top5,\n ))\n return top1.avg, top5.avg, losses.avg\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef cal_prob(weight):\n weight_sum = weight.sum()\n return weight.div(weight_sum)\n\n\ndef pre_process(m, threshold):\n normalization = m.weight.data.abs()\n mask = normalization.gt(threshold).float().cuda()\n remains = int(torch.sum(mask))\n num_channels_at_least = round(normalization.size(0) * 0.05)\n if remains < num_channels_at_least:\n remains = num_channels_at_least\n bn, bn_sorted_idx = torch.sort(normalization)\n mask[bn_sorted_idx[-num_channels_at_least:]] = 1\n return mask, remains\n\n\nmodel = preactivation_resnet164(num_classes=args.num_classes, resolution=32)\nif args.cuda:\n model.cuda()\nstate_dict = torch.load(args.model)\nnew_state_dict = OrderedDict()\nfor k,v in state_dict.items():\n name = k[7:]\n new_state_dict[name] = v\nmodel.load_state_dict(new_state_dict)\ncriterion = nn.CrossEntropyLoss()\n# print(model)\n\n\nprint('\\nPruning Start\\n')\ntotal = 0\ncount_bn = 0\nbn = []\n\n\ndef flatten(l): return [item for sublist in l for item in sublist]\n\n\nfor m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count_bn += 1\n if count_bn != 1:\n total += m.weight.data.size(0)\n bn.append(m.weight.data.abs()) # .mul_(m.weight.size(0))\n if count_bn == 3:\n count_bn = 0\n\nbn = sorted(flatten(bn))\nthreshold_idx = math.floor(total * args.prune_rate)\nthreshold = bn[threshold_idx]\nprint(\"Pruning Threshold: {}\".format(threshold))\n# threshold = 0.1\n\npruned = 0\ncfg = []\ncfg_mask = []\nfirst_dropout = False\n\nremains = 0\ni = 0\ncfgs = []\ncount_bn = 0\n\nfor l in model.conv_layers:\n cfg = []\n for m in l.modules():\n if isinstance(m, nn.BatchNorm2d):\n count_bn += 1\n if count_bn != 1:\n mask, remains = pre_process(m, threshold)\n # print(mask)\n else:\n mask = torch.ones(m.weight.size()).float().cuda()\n remains = int(torch.sum(mask))\n pruned += mask.shape[0] - remains\n m.weight.data.mul_(mask)\n m.bias.data.mul_(mask)\n cfg.append(remains)\n cfg_mask.append(mask.clone())\n print(\n 'Layer_idx: {:d} \\t Total_channels: {:d} \\t Remained_channels: {:d}'.format(\n i, mask.shape[0], remains))\n i += 1\n if count_bn == 3:\n count_bn = 0\n\n cfgs.append(cfg)\n\nfor j in range(len(cfgs)-1):\n cfgs[j].append(cfgs[j+1][0])\nremains = model.bn.weight.size(0)\ncfgs[-1].append(remains)\nprint(\n 'Layer_idx: {:d} \\t Total_channels: {:d} \\t Remained_channels: {:d}'.format(\n i,\n remains,\n remains))\n\n\npruned_ratio = pruned / total\n\nprint(\"Pre-processing done! {}\".format(pruned_ratio))\nvalidate(model, criterion)\nprint(cfgs)\nnew_model = preactivation_resnet164(\n num_classes=args.num_classes,\n resolution=32,\n cfgs=cfgs)\nprint(new_model)\nif args.cuda:\n new_model.cuda()\n\nlayer_idx = 0\nstart_mask = torch.ones(3)\nend_mask = cfg_mask[layer_idx]\n\n# process the first conv_layer\nnew_model.conv1.weight.data = model.conv1.weight.data.clone()\n\npre_is_BN = False\ndiscard_idx = None\nresidual_bn_bias = None\nabsorted_bias = None\n\nfor (l, l_new) in zip(model.conv_layers, new_model.conv_layers):\n for (m, m_new) in zip(l.children(),\n l_new.children()):\n if isinstance(m, nn.BatchNorm2d):\n pre_is_BN = True\n discard_idx = torch.squeeze(torch.nonzero(end_mask.eq(0)))\n idx1 = torch.squeeze(torch.nonzero(end_mask))\n\n m_new.weight.data = m.weight.data[idx1].clone()\n m_new.bias.data = m.bias.data[idx1].clone()\n\n if discard_idx.dim() > 0:\n residual_bn_bias = m.bias.data[discard_idx].clone()\n else:\n residual_bn_bias = None\n\n m_new.running_mean = m.running_mean[idx1].clone()\n m_new.running_var = m.running_var[idx1].clone()\n\n if absorted_bias is not None:\n m_new.running_mean.sub_(absorted_bias)\n\n start_mask = end_mask\n layer_idx += 1\n if layer_idx < len(cfg_mask):\n end_mask = cfg_mask[layer_idx]\n else:\n pass\n\n elif isinstance(m, nn.Conv2d):\n if pre_is_BN:\n idx0 = torch.squeeze(torch.nonzero(start_mask))\n idx1 = torch.squeeze(torch.nonzero(end_mask))\n w = m.weight.data[:, idx0.tolist(), :, :]\n m_new.weight.data = w[idx1.tolist(), :, :, :].clone()\n m_new.bias.data = m.bias.data[idx1.tolist()].clone()\n\n if residual_bn_bias is not None:\n w = m.weight.data[:, discard_idx.tolist(), :, :]\n w = w[idx1.tolist(), :, :, :].clone()\n dim0, dim1 = w.size(0), w.size(1)\n w = w.view(dim0, dim1, -1).transpose(1, 2)\n absorted_bias = w.mul_(residual_bn_bias).sum(2).sum(1)\n else:\n absorted_bias = None\n\n pre_is_BN = False\n else:\n m_new.weight.data = m.weight.data.clone()\n # print(m_new,m_new.weight.size())\n else:\n pass\nnew_model.bn.weight.data = model.bn.weight.data.clone()\nnew_model.bn.bias.data = model.bn.bias.data.clone()\nnew_model.bn.running_mean = model.bn.running_mean.clone()\nnew_model.bn.running_var = model.bn.running_var.clone()\nnew_model.fc.weight.data = model.fc.weight.data.clone()\nnew_model.fc.bias.data = model.fc.bias.data.clone()\n\n\nprint('Pruning done! Channel pruning result:{}'.format(cfgs))\ntorch.save({'cfg': cfgs, 'model_state_dict': new_model.state_dict()},\n os.path.join(args.save, 'model_pruned.pkl'))\n# # torch.save({'cfg': 'D', 'model_state_dict':model.state_dict()},os.path.join(args.save,'fake_pruned_model.pkl'))\nvalidate(new_model, criterion)\n\nmodel.cpu()\nnew_model.cpu()\nprint('Model before pruning:')\nold_model_params = measure_layer_param(model)\nold_model_flops = measure_layer_flops(model)\n\nprint('Model after pruning:')\nnew_model_params = measure_layer_param(new_model)\nnew_model_flops = measure_layer_flops(new_model)\n\nprint('Params_reduced:{:.2f}%\\nFLOPs_reduced:{:.2f}%'.format(\n (old_model_params - new_model_params) * 100 / old_model_params,\n (old_model_flops - new_model_flops) * 100 / old_model_flops\n ))\n","sub_path":"prune_resnet.py","file_name":"prune_resnet.py","file_ext":"py","file_size_in_byte":10734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598767933","text":"from collections import deque\n\n# https://oj.leetcode.com/problems/minimum-window-substring/\nclass Solution:\n # @return a string\n # @return a string\n def minWindow(self, S, T):\n minIdx = {}\n count = {}\n queue = {}\n currCount = {}\n currTotal = 0\n total = len(T)\n maxIdx = None\n minWindowSize = len(S) + 1\n minWindowLeft = None\n minWindowRight = None\n\n for c in T:\n if c in count:\n count[c] += 1\n else:\n count[c] = 1\n currCount[c] = 0\n\n for c in count:\n if count[c] > 1:\n queue[c] = deque()\n\n for i in range(len(S)):\n c = S[i]\n if c not in count:\n # not interested\n continue\n\n if count[c] > currCount[c]:\n # don't have all words in window yet\n currCount[c] += 1\n currTotal += 1\n maxIdx = i\n\n if currCount[c] == 1:\n # update min, if we are seeing for the first time\n minIdx[c] = i\n\n if count[c] > 1:\n # update multi words in queue\n queue[c].append(i)\n\n else:\n # have all in window, just update window\n if count[c] == 1:\n minIdx[c] = i\n else:\n # remove exiting min\n queue[c].popleft()\n\n # add current at the end\n queue[c].append(i)\n\n # take next as min\n minIdx[c] = queue[c][0]\n maxIdx = i\n\n if total == currTotal:\n currMin = min(minIdx.values())\n if minWindowSize > (maxIdx - currMin + 1):\n minWindowSize = (maxIdx - currMin + 1)\n minWindowLeft = currMin\n minWindowRight = maxIdx\n\n if minWindowLeft is None:\n return \"\"\n\n return S[minWindowLeft:minWindowRight+1]\n","sub_path":"Algos/Others/MinWindow.py","file_name":"MinWindow.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385605919","text":"#!/usr/bin/python\n\nimport numpy\n\ndef outlierCleaner(predictions, ages, net_worths):\n \"\"\"\n Clean away the 10% of points that have the largest\n residual errors (difference between the prediction\n and the actual net worth).\n\n Return a list of tuples named cleaned_data where \n each tuple is of the form (age, net_worth, error).\n \"\"\"\n \n cleaned_data = []\n\n ### your code goes here\n errors = numpy.fabs(predictions - net_worths)\n combined = numpy.hstack((ages, net_worths, errors))\n combined = combined[combined[:,2].argsort()] # sort on 3rd column errors\n \n cleaned_data = [(combined[i][0], combined[i][1], combined[i][2]) for i in range(int(len(combined)*0.9))]\n \n return cleaned_data\n\n","sub_path":"outliers/outlier_cleaner.py","file_name":"outlier_cleaner.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144801761","text":"\n_twitter_api = None\n\ndef get_twitter_api():\n global _twitter_api\n if not _twitter_api:\n from credentials import app_key, app_secret\n from twython import Twython, TwythonError\n twitter = Twython(app_key=app_key, app_secret=app_secret, oauth_version=2)\n access_token = twitter.obtain_access_token() # Store `access_token`(?)\n _twitter_api = Twython(app_key=app_key, access_token=access_token)\n \n return _twitter_api","sub_path":"source/conn.py","file_name":"conn.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643885723","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 22 13:01:52 2021\n\n@author: LENOVO\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 20 18:03:35 2021\n\n@author: LENOVO\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 6 11:33:00 2021\n\n@author: LENOVO\n\"\"\"\n\n#Support Vector Regression\n\n\nimport pandas as pd # Libreria para analisis de Datos#\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split# divide en DF en subconjuntos de entrenamientos aleatorio\nfrom sklearn.utils import resample\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\nimport seaborn as sns # graficas y estadisticas , maquillaje#\n# sns.set(style=\"white\")\n# sns.set(style=\"whitegrid\", color_codes=True)\n\nimport os # Operative System #\nimport matplotlib.pyplot as plt \nplt.rc(\"font\", size=14)\n\nimport statsmodels.api as sm # libria para encontrar varias funciones de estimaciones de moleslos estadisticos#\nfrom sklearn.metrics import (confusion_matrix, accuracy_score)\nfrom countryinfo import CountryInfo\n\nos.chdir(\"C:/Users/LENOVO/Documents/Clase#10/\")\n\n#Region\ncwd = os.getcwd() #asigna la variable cwd a la directorio de trabajo#\ncsv1_file='Position_Salaries.csv' #asignacion dle nombre del archivo a una bandeja #\n\n\ndataset =pd.read_csv(csv1_file)\n\nX=dataset.iloc[:,1:-1].values\ny=dataset.iloc[:,-1].values\nprint(X)\nprint(y)\n\n# se hace un reshape.para cambiar las dimensiones de un arreglo a multiples dimensiones\n\ny=y.reshape(len(y),1)\nprint(y)\n\n# estandarizamos los datos.\n\nfrom sklearn.preprocessing import StandardScaler\n\n\nsc_X = StandardScaler()\nsc_y = StandardScaler()\n\nX= sc_X.fit_transform(X)\ny= sc_y.fit_transform(y)\nprint(X)\nprint(y)\n\nX.std()\ny.std()\n\n#crear vector suppor vector regration\n\nfrom sklearn.svm import SVR # Clase necsita algunos parametros,Kernel\n\nregressor = SVR(kernel=\"rbf\")\nregressor.fit(X,y.ravel())\n\nprint(regressor)\n\n# Reescala un nivel de 6.5\nsc_X.transform([[6.5]])\n\n# predice el salario rescalado del nivel reescalado\nregressor.predict(sc_X.transform([[6.5]]))\n\n# Prediccion de un nuevo resultado usando transformacion inversa\nsc_y.inverse_transform(regressor.predict(sc_X.transform([[6.5]])))\n\n# Visualizando los resultados\n\nplt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red') ## Kernel function = Radial basis function\n# plt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X)), color = 'blue', label ='rbf')\nplt.title('(SVR)')\nplt.xlabel('Nivel o Cargo Laboral')\nplt.ylabel('Salario')\nplt.show()\n\n# Entrenamiento de los modelos SVR con todo el dataset de level y salario\n# regressor.fit(X,Y) # Warning para que se use Ravel\nregressor = SVR(kernel=\"rbf\")\nregressor.fit(X,y.ravel())\n\n#%%% Polinomial\nregressor1 = SVR(kernel=\"poly\", degree=3)\nregressor1.fit(X,y.ravel())\nprint(regressor1)\n\n# Reescala un nivel de 6.5\nsc_X.transform([[6.5]])\n\n# predice el salario rescalado del nivel reescalado\nregressor1.predict(sc_X.transform([[6.5]]))\n\n# Prediccion de un nuevo resultado usando transformacion inversa\nsc_y.inverse_transform(regressor1.predict(sc_X.transform([[6.5]])))\n\n# Visualizando los resultados\n\n# plt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red') \n# plt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X)), color = 'blue', label ='rbf')\n# plt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor1.predict(X)), color = 'green', label ='poly')\n# plt.title('(POLY vs SVR)')\n# plt.xlabel('Nivel o Cargo Laboral')\n# plt.xticks(fontsize = 20)\n# plt.yticks(fontsize = 20)\n# plt.ylabel('Salario', fontsize = 20)\n# plt.legend(fontsize = 22)\n# plt.show()\n\n\n\n#%%% DecisionTree\nfrom sklearn.tree import DecisionTreeRegressor\nregressor2 = DecisionTreeRegressor(random_state = 0)\nregressor2.fit(X, y)\n\n\n# Reescala un nivel de 6.5\nsc_X.transform([[6.5]])\n\n# predice el salario rescalado del nivel reescalado\nregressor2.predict(sc_X.transform([[6.5]]))\n\n# Prediccion de un nuevo resultado usando transformacion inversa\nsc_y.inverse_transform(regressor2.predict(sc_X.transform([[6.5]])))\n\n# Visualizando los resultados\n\nplt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red') \nplt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor2.predict(X)), color = 'black', label ='tree')\nplt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X)), color = 'blue', label ='rbf')\nplt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor1.predict(X)), color = 'green', label ='poly')\nplt.title('(POLY vs SVR)')\nplt.xlabel('Nivel o Cargo Laboral')\nplt.xticks(fontsize = 20)\nplt.yticks(fontsize = 20)\nplt.ylabel('Salario', fontsize = 20)\nplt.legend(fontsize = 22)\nplt.show()\n\n\n#Obtenemos los coeficientes de Corelacion:\n\n#Modelo SRV\na=regressor.score(X,y)\n#Modelo Polinomial, grado 3\nb=regressor1.score(X,y)\n#Decision Tree\nc=regressor2.score(X,y)\nprint(a,b,c)\n\n\n\n","sub_path":"Diogenes Barreto/Mineria de datos - semestre 4/Tarea_8/Taller 8_Decision Tree.py","file_name":"Taller 8_Decision Tree.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619601879","text":"from flask import Flask, render_template\nimport paho.mqtt.client as mqtt \nimport time \n\napp=Flask(__name__)\nmqtt_broker = \"192.168.2.4\"\nmqtt_user = \"iot\"\nmqtt_pwd = \".csee\"\n\n###########URL##################\npub_topic=\"iot/21400670\"\nurlLed = \"iot/21400670/led\"\nurlLedon = \"iot/21400670/ledon\"\nurlLedoff = \"iot/21400670/ledoff\"\nurlUsbled = \"iot/21400670/usbled\"\nurlUsbledon = \"iot/21400670/usbledon\"\nurlUsbledoff = \"iot/21400670/usbledoff\"\nurlDht22 = \"iot/21400670/dht22\"\nurlDht22_t = \"iot/21400670/dht22_t\"\nurlDht22_h = \"iot/21400670/dht22_h\"\nurlCds = \"iot/21400670/cds\"\nurlPir = \"iot/21400670/pir\"\n\nsubDht22 = \"iot/21400670/sensor/dht22\"\nsubDht22_t = \"iot/21400670/sensor/dht22_t\"\nsubDht22_h = \"iot/21400670/sensor/dht22_h\"\nsubCds = \"iot/21400670/sensor/cds\"\nsubPir = \"iot/21400670/sensor/pir\"\n\ncommonSubDht22 = \"iot/ece30003/sensor/dht22\"\ncommonSubDht22_t =\"iot/ece30003/sensor/dht22_t\"\ncommonSubDht22_h = \"iot/ece30003/sensor/dht22_h\"\ncommonSubCds = \"iot/ece30003/sensor/cds\"\ncommonSubPir = \"iot/ece30003/senor/pir\"\n\nsubLed = \"iot/21400670/check/led\"\nsubUsbled = \"iot/21400670/check/usbled\"\nmessage='none'\n#############option###############\nledBool = False \nledValue = \"Off\"\ntemperatureValue = \"none\" \nhumidityValue = \"none\" \nusbledValue = \"Off\"\nusbledBool = False\npirValue = \"Not detected\"\npirBool = False\ncdsValue = \"none\"\nhomeBool = True \n################### main ######################\n@app.route('/')\ndef home():\n\treturn render_template('index.html', temperature=temperatureValue, humidity=humidityValue, cds=cdsValue)\n\n################## led ########################\n@app.route('/'+urlLed)\ndef led():\n\tmqttc.publish(pub_topic,\"light/led\")\n\treturn home()\n@app.route('/'+urlLedon)\ndef ledon():\n\tmqttc.publish(pub_topic,\"light/ledon\")\n\treturn home()\n@app.route('/'+urlLedoff)\ndef ledoff():\n\tmqttc.publish(pub_topic,\"light/ledoff\")\n\treturn home()\n################## USBLED #####################\n@app.route('/'+urlUsbled)\ndef usbled():\n\tmqttc.publish(pub_topic,\"light/usbled\")\n\treturn home()\n@app.route('/'+urlUsbledon)\ndef usbledon():\n\tmqttc.publish(pub_topic, \"light/usbledon\")\n\treturn home()\n@app.route('/'+urlUsbledoff)\ndef usbledoff():\n\tmqttc.publish(pub_topic, \"light/usbledoff\")\n\treturn home()\n\n################### dht22 ################## \n@app.route('/'+urlDht22)\ndef getDht22():\n\tglobal temperatureValue\n\tglobal humidityValue\n\tprint(\"dht22\");\n\tmqttc.publish(pub_topic, \"sensor/dht22\")\t\n\tmqttc.subscribe(subDht22)\n\ttime.sleep(1)\n\treturn render_template('index.html', temperature=temperatureValue, humidity=humidityValue, cds=cdsValue)\n\n@app.route('/'+urlDht22_t)\ndef getDht22_t():\n\tmqttc.publish(pub_topic, \"sensor/dht22_t\")\n\tmqttc.subscribe(subDht22_t)\n\ttime.sleep(1)\n\tprint(temperatureValue)\n\tmqttc.unsubscribe(subDht22_t)\n\treturn urlDht22_t + message\n@app.route('/'+urlDht22_h)\ndef getDht22_h():\n\tmqttc.publish(pub_topic, \"sensor/dht22_h\")\n\tmqttc.subscribe(subDht22_h)\n\treturn render_template('index.html', temperature=temperatureValue, humidity=humidityValue, cds=cdsValue)\n\n################### cds ######################\n@app.route('/'+urlCds)\ndef getCds():\n\tmqttc.publish(pub_topic, \"sensor/cds\")\n\tmqttc.subscribe(subCds)\n\treturn render_template('index.html', temperature=temperatureValue, humidity=humidityValue, cds=cdsValue)\n@app.route('/'+urlPir)\ndef getPir():\n\tmqttc.subscribe(subPir)\n\treturn render_template('pir.html', pir=pirValue)\n\n################### Function defition #################\ndef on_message(mqttc, userdata, msg):\n\tglobal topic\n\tglobal message\n###\tprint(msg.topic+\" \"+str(msg.payload))\n\ttopic = msg.topic\n\tmessage=str(msg.payload)\n\tprint( str(message)+\"message\")\n\tprint(topic)\ndef on_connection(mqttc, userdata, flags, rc):\n\tprint(\"###Connected with result code \" + str(rc))\n\treturn home()\t\nif __name__==\"__main__\":\n\tmqttc = mqtt.Client(\"rpi3_1\") \n\tmqttc.username_pw_set(mqtt_user, mqtt_pwd) \n\tmqttc.on_message=on_message \n\tmqttc.on_connection=on_connection \n\tmqttc.connect(mqtt_broker, 1883, 60) \n\tmqttc.loop_start()\n\tapp.run(host=\"0.0.0.0\", port=80, debug=True) \n","sub_path":"flask/lab7_rpi3_flask_get_temp.py","file_name":"lab7_rpi3_flask_get_temp.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30812269","text":"from flask_wtf import Form\nfrom wtforms import IntegerField, DateField, StringField,DateTimeField\nfrom wtforms.validators import DataRequired\n\n\nclass PlanForm(Form):\n id = IntegerField('id')\n mother_plan = IntegerField('mother_plan')\n start_time = DateTimeField('start_time')\n end_time = DateTimeField('end_time')\n type = IntegerField('type')\n state = IntegerField('state')\n title = StringField('title')\n\n\nclass AddPlanForm(PlanForm):\n mother_plan = IntegerField('mother_plan', validators=[DataRequired()])\n start_time = DateTimeField('start_time', validators=[DataRequired()])\n type = IntegerField('type', validators=[DataRequired()])\n state = IntegerField('state', validators=[DataRequired()])\n title = StringField('title', validators=[DataRequired()])\n\n\nclass DeletePlanForm(PlanForm):\n id = IntegerField('id', validators=[DataRequired()])\n\n\nclass TestForm(Form):\n id = IntegerField('id')\n # mother_plan = IntegerField('mother_plan')\n # start_time = DateTimeField('start_time')\n # end_time = DateTimeField('end_time')\n # type = IntegerField('type')\n # state = IntegerField('state')\n # title = StringField('title')\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100916305","text":"from typing import List\n\n\nclass Solution: \n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n if n == 0:\n return []\n if n == 1:\n return [0]\n if n == 2:\n return [0, 1]\n\n neighbor = {c: [] for c in range(n)}\n degree = {c: 0 for c in range(n)}\n\n for edge in edges:\n degree[edge[0]] += 1\n degree[edge[1]] += 1\n neighbor[edge[0]].append(edge[1])\n neighbor[edge[1]].append(edge[0])\n\n q = deque([x for x in range(n) if degree[n] == 1])\n\n while q:\n res = []\n while q:\n cur = q.popleft()\n res.append(q)\n for x in neighbor[cur]:\n neighbor[x] -= 1\n if degree[x] == 1:\n queue.append(x)\n return res\n\n def dfs(self, x, neighbor, visited):\n visited[x], flag = True, False\n val, res = -10000, []\n for y in neighbor[x]:\n if not visited[y]:\n flag = True\n val1, res1 = self.dfs(y, neighbor, visited)\n if val1 > val:\n val = val1\n res = res1\n visited[x] = False\n if flag:\n res.append(x)\n return [val + 1, res]\n return [0, [x]]\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n if n == 0:\n return []\n if n == 1:\n return [0]\n if n == 2:\n return [0, 1]\n\n neighbor = {c: [] for c in range(n)}\n visited = [False for _ in range(n)]\n\n for edge in edges:\n neighbor[edge[0]].append(edge[1])\n neighbor[edge[1]].append(edge[0])\n\n val0, res0 = self.dfs(0, neighbor, visited)\n far1 = res0[0]\n val1, res1 = self.dfs(far1, neighbor, visited)\n\n if val1 & 1:\n return [res1[val1 >> 1], res1[(val1 >> 1) + 1]]\n return [res1[val1 >> 1]]\n","sub_path":"minimum-height-trees/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"279242278","text":"import datetime\nimport hashlib\nimport hmac\nimport requests\n\nfrom .errors import DataMonsterError\n\n\nclass Client(object):\n \"\"\"Low level client for interacting with the DataMonster server\"\"\"\n\n server = \"https://dm.adaptivemgmt.com\"\n\n def __init__(self, key_id, secret, server=None, verify=True):\n self.key_id = key_id\n self.secret = secret\n if server:\n self.server = server\n\n self.verify = verify\n\n def compute_hash_string(self, method, path, date_str, secret_str):\n msg_to_hash = \"\\n\".join([method, path, date_str])\n secret_binary = bytes(bytearray.fromhex(secret_str))\n\n return hmac.new(\n secret_binary, msg_to_hash.encode(\"utf-8\"), hashlib.sha256\n ).hexdigest()\n\n def _get_session(self, method, path, headers=None):\n \"\"\"\n :param path: (six.text_type) url path\n :param headers: (dict or None) Additional optional header items\n\n :return: the Response, appropriately formatted/deserialized\n :raises: DataMonsterError, if requests `get` returns with status_code != 200,\n or if content type of response is neither json nor avro\n \"\"\"\n # so that `headers` doesn't have mutable default value {}\n headers = headers or {}\n\n date = datetime.datetime.utcnow()\n date_str = date.strftime(\"%a, %d %b %Y %H:%M:%S\") + \" +0000\"\n\n # Probably should fix this on the server to keep the get params in the hash\n hash_path = path.split(\"?\")[0]\n try:\n hash_str = self.compute_hash_string(\n method, hash_path, date_str, self.secret\n )\n except ValueError:\n raise ValueError(\"Bad key provided\")\n session = requests.Session()\n session.headers[\"Date\"] = date_str\n session.headers[\"Authorization\"] = \"DM {}:{}\".format(self.key_id, hash_str)\n session.headers[\"Accept\"] = \"application/json\"\n session.headers.update(headers)\n\n retry = requests.packages.urllib3.util.retry.Retry(\n total=3,\n backoff_factor=5,\n status_forcelist=(500, 502, 504),\n method_whitelist=frozenset([\"GET\", \"POST\"]),\n )\n adapter = requests.adapters.HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n\n return session\n\n def _format_response(self, response):\n \"\"\"Format the response from a rest call\"\"\"\n\n if response.status_code != 200:\n raise DataMonsterError(response.reason, response.content)\n\n if response.headers[\"Content-Type\"] == \"application/json\":\n return response.json()\n elif response.headers[\"Content-Type\"] == \"avro/binary\":\n return response\n else:\n raise DataMonsterError(\n \"Unexpected content type: {}\".format(response.headers[\"Content-Type\"])\n )\n\n def get(self, path, headers=None, stream=False):\n \"\"\"\n :param path: (six.text_type) url path\n :param headers: (dict or None) Additional optional header items\n\n :return: the Response, appropriately formatted/deserialized\n :raises: DataMonsterError, if requests `get` returns with status_code != 200,\n or if content type of response is neither json nor avro\n \"\"\"\n\n session = self._get_session(\"GET\", path, headers)\n\n url = \"{}{}\".format(self.server, path)\n response = session.get(url, verify=self.verify, stream=stream)\n\n return self._format_response(response)\n\n def post(self, path, json=None, headers=None, stream=False, files=None):\n \"\"\"\n :param path: (six.text_type) url path\n :param data: (dict) post data\n :param headers: (dict or None) Additional optional header items\n\n :return: the Response, appropriately formatted/deserialized\n :raises: DataMonsterError, if requests `get` returns with status_code != 200,\n or if content type of response is neither json nor avro\n \"\"\"\n\n session = self._get_session(\"POST\", path, headers)\n\n url = \"{}{}\".format(self.server, path)\n response = session.post(url, json=json, verify=self.verify, stream=stream, files=files)\n\n return self._format_response(response)\n","sub_path":"datamonster_api/lib/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560503777","text":"# Game board current state\ngame = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\ndef check_for_win(current_game):\n\n def check_row(row, direction):\n # Check if all items in the row are a valid\n # player 1 or 2 and the same player\n if row[0] and row.count(row[0]) == len(row):\n print('Winner player %d [%s]' % (row[0], direction))\n return True\n return False\n\n\n # Check for horizontal match\n for row in current_game:\n if check_row(row, '-'):\n break\n\n else:\n n_elements = len(current_game)\n\n # Check for verticle match\n for column in range(n_elements):\n row = [row[column] for row in current_game]\n if check_row(row, '|'):\n break\n\n else:\n # Check dialog top left to bottom right\n row = [current_game[i][i] for i in range(n_elements)]\n \n # Check dialog top right to bottom left\n if not check_row(row, '\\\\'):\n row = [current_game[(n_elements-1) - i][i] for i in range(n_elements)]\n check_row(row, '/')\n\ndef game_board(current_game, player=0, row=0, column=0, display_only=False):\n if not display_only:\n current_game[row][column] = player\n # Redraw current game board state\n print(' a b c')\n for count, row in enumerate(current_game):\n print(count, row)\n check_for_win(game)\n\n# Start with a blank board\ngame_board(game, display_only=True)\ngame_board(game, player=1, row=0, column=0)\ngame_board(game, player=1, row=1, column=1)\ngame_board(game, player=1, row=2, column=2)\n","sub_path":"tictactoe/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578550264","text":"import pandas as pd\ndef plot_2_animation(df_historical_data = pd.read_csv(\"Data/VisualizationData/Historical_CO2_Emissions_Data.csv\"),\\\n\tdf2 = pd.read_csv(\"Data/VisualizationData/2_Mitigation_Curves_Data.csv\")):\n\t'''\n\tThis function plots the historical CO2 Emissions from 1900-2019 &\n\tplots the mitigation curves from 2000-2026 to meet +2C degrees \n\tin an animation. The animation is also saved as a GIF.\n\t:param\n\t\tInput: df_historical_data --> The dataframe of historical CO2 emissions\n\t\t\t\tdf2 --> The dataframe of the mitigation rates for max +2C degrees\n\t\tOutput: Plot of the curves and animated GIF\n\t'''\n\timport pandas as pd\n\timport matplotlib.pyplot as plt\n\tfrom matplotlib.animation import FuncAnimation\n\tdegree = u'\\N{DEGREE SIGN}'\n\n\tdata_dict = {}\n\tfor i in range(2000,2027):\n\t\tdata_dict[i] = df2[['Year',str(i)]]\n\n\n\tfig, ax = plt.subplots()\n\tax = plt.axes(xlim=(1900, 2100), ylim=(-5, 45))\n\tline1, = ax.plot([], [],lw=2, label = 'Year')\n\tline2, = ax.plot([], [],lw=2, label = 'CO2 Emissions (Gigatonnes)')\n\tyear_text = ax.text(0.03, 0.905, '', fontsize = 14, transform=ax.transAxes)\n\n\n\tdef init():\n\t\tline1.set_data([], [])\n\t\tline2.set_data([], [])\n\t\tyear_text.set_text('')\n\t\treturn line1, line2, year_text\n\tdef animate(i):\n\t\tx = (data_dict[i+2000])['Year']\n\t\ty = (data_dict[i+2000])[str(i+2000)]\n\t\tline2.set_data(x,y)\n\t\tline1.set_data(df_historical_data['Year'], df_historical_data['Historical'])\n\t\tyear_text.set_text(str(i+2000))\n\n\n\t\treturn line1,line2, year_text\n\n\tanim = FuncAnimation(fig, animate, frames=len(data_dict), init_func=init, interval=200, blit=True)\n\n\tplt.xlabel('Year')\n\tplt.ylabel('CO2 Emissions (Gigatonnes)')\n\n\tplt.title('CO2 Emission Mitigation Curves to Limit Warming to 2'+ degree + 'C by 2100')\n\tplt.show()\n\tanim.save('Image/2 Degrees Mitigation.gif', writer='imagemagick', fps=4)\n\ndef plot_2_2020(df_historical_data = pd.read_csv(\"Data/VisualizationData/Historical_CO2_Emissions_Data.csv\"),\\\n\tdf2 = pd.read_csv(\"Data/VisualizationData/2_Mitigation_Curves_Data.csv\")):\n\t'''\n\tThis function plots the historical CO2 Emissions from 1900-2019 &\n\tplots the mitigation curve starting at the beginning of 2020 to \n\tmeet +2C degrees.\n\t:param\n\t\tInput: df_historical_data --> The dataframe of historical CO2 emissions\n\t\t\t\tdf2 --> The dataframe of the mitigation rates for max +2C degrees\n\t\tOutput: Plot of the curves on the same figure.\n\t'''\n\timport pandas as pd\n\timport matplotlib.pyplot as plt\n\tfrom matplotlib.animation import FuncAnimation\n\tdegree = u'\\N{DEGREE SIGN}'\n\n\tdata_dict = {}\n\tfor i in range(2000,2027):\n\t\tdata_dict[i] = df2[['Year',str(i)]]\n\tax = plt.gca()\n\tsize = (20,15)\n\tdf_historical_data.plot(kind='line',x='Year',y='Historical',ax=ax, color = [(0.3647058823,0.73725490196,0.82352941176)], legend = None, figsize = size,linewidth=2)\n\tdata_dict[2019].plot(kind='line',x='Year',y=str(2019),ax=ax, color = 'red', legend = None, figsize = size,linewidth=2)\n\tplt.xticks(range(1900,2120,20))\n\tplt.xlabel('Year')\n\tplt.ylabel('CO2 Emissions (Gigatonnes)')\n\tplt.title('CO2 Emission Mitigation Starting 2020 to Limit Warming to 2'+ degree + 'C by 2100')\n\tplt.figure(1)\n\tplt.show()\n\n","sub_path":"src/plots/Plots_2.py","file_name":"Plots_2.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401629747","text":"import threading\nimport json\n\nfrom packets.packets import makeClientDataDict,PType\n\n\n#holds client data and thread locks\nclass ClientData:\n def __init__(self,sock,address,clientDict):\n self.lock=threading.Lock()\n\n self.lock.acquire()\n self.sock=sock\n\n self.dict = clientDict\n self.lock.release()\n\n def remove(self,clientList):\n self.lock.acquire()\n clientList.remove(self)\n self.sock.close()\n self.lock.release()\n \n def packageData(self):\n return self.dict\n\n\n","sub_path":"server/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"239158592","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\_textable\\widgets\\OWTextableExtractXML.py\n# Compiled at: 2016-11-22 03:34:26\n\"\"\"\nClass OWTextableExtractXML\nCopyright 2012-2016 LangTech Sarl (info@langtech.ch)\n-----------------------------------------------------------------------------\nThis file is part of the Orange-Textable package v2.0.\n\nOrange-Textable v2.0 is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nOrange-Textable v2.0 is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with Orange-Textable v2.0. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n__version__ = '0.15.3'\nimport re, LTTL.Segmenter as Segmenter\nfrom LTTL.Segmentation import Segmentation\nfrom TextableUtils import *\nfrom Orange.OrangeWidgets.OWWidget import *\nimport OWGUI\n\nclass OWTextableExtractXML(OWWidget):\n \"\"\"Orange widget for xml markup extraction\"\"\"\n settingsList = [\n 'conditions',\n 'element',\n 'importElementAs',\n 'mergeDuplicates',\n 'preserveLeaves',\n 'deleteMarkup',\n 'importAnnotations',\n 'autoSend',\n 'autoNumber',\n 'autoNumberKey',\n 'displayAdvancedSettings',\n 'uuid']\n\n def __init__(self, parent=None, signalManager=None):\n OWWidget.__init__(self, parent, signalManager, wantMainArea=0, wantStateInfoWidget=0)\n self.inputs = [\n (\n 'Segmentation', Segmentation, self.inputData, Single)]\n self.outputs = [('Extracted data', Segmentation)]\n self.conditions = list()\n self.importAnnotations = True\n self.autoSend = True\n self.label = 'extracted_xml'\n self.autoNumber = False\n self.autoNumberKey = 'num'\n self.element = ''\n self.importElement = False\n self.importElementAs = ''\n self.mergeDuplicates = False\n self.preserveLeaves = False\n self.deleteMarkup = False\n self.displayAdvancedSettings = False\n self.uuid = None\n self.loadSettings()\n self.uuid = getWidgetUuid(self)\n self.inputSegmentation = None\n self.conditionsLabels = list()\n self.selectedConditionsLabels = list()\n self.newConditionAttribute = ''\n self.newConditionRegex = ''\n self.ignoreCase = False\n self.unicodeDependent = True\n self.multiline = False\n self.dotAll = False\n self.infoBox = InfoBox(widget=self.controlArea)\n self.sendButton = SendButton(widget=self.controlArea, master=self, callback=self.sendData, infoBoxAttribute='infoBox', sendIfPreCallback=self.updateGUI)\n self.advancedSettings = AdvancedSettings(widget=self.controlArea, master=self, callback=self.sendButton.settingsChanged)\n self.advancedSettings.draw()\n xmlExtractionBox = OWGUI.widgetBox(widget=self.controlArea, box='XML Extraction', orientation='vertical')\n OWGUI.lineEdit(widget=xmlExtractionBox, master=self, value='element', orientation='horizontal', label='XML element:', labelWidth=180, callback=self.sendButton.settingsChanged, tooltip='The XML element that will be extracted from the\\ninput segmentation.')\n OWGUI.separator(widget=xmlExtractionBox, height=3)\n xmlExtractionBoxLine2 = OWGUI.widgetBox(widget=xmlExtractionBox, box=False, orientation='horizontal', addSpace=True)\n OWGUI.checkBox(widget=xmlExtractionBoxLine2, master=self, value='importElement', label='Import element with key:', labelWidth=180, callback=self.sendButton.settingsChanged, tooltip='Associate each output segment with an annotation\\nwhose value is the above specified XML element.')\n self.importElementAsLineEdit = OWGUI.lineEdit(widget=xmlExtractionBoxLine2, master=self, value='importElementAs', orientation='horizontal', callback=self.sendButton.settingsChanged, tooltip='Annotation key for the XML element.')\n OWGUI.checkBox(widget=xmlExtractionBox, master=self, value='deleteMarkup', label='Remove markup', callback=self.sendButton.settingsChanged, tooltip='Check this box to remove all XML markup occurring\\nwithin the above specified XML element.')\n OWGUI.separator(widget=xmlExtractionBox, height=3)\n OWGUI.checkBox(widget=xmlExtractionBox, master=self, value='preserveLeaves', label='Prioritize shallow attributes', callback=self.sendButton.settingsChanged, tooltip='This box lets you indicate how you want to solve\\nconflicts that may occur in the case where two\\nor more occurrences of the above specified XML\\nelement are nested and have different values for\\nthe same attribute\\n\\nBy default, the attribute value associated with\\nthe deepest element will be used. Check this box\\nif you would rather use the value of the most\\nshallow element.')\n OWGUI.separator(widget=xmlExtractionBox, height=3)\n conditionsBox = OWGUI.widgetBox(widget=xmlExtractionBox, box='Conditions', orientation='vertical')\n xmlExtractionBoxLine4 = OWGUI.widgetBox(widget=conditionsBox, box=False, orientation='horizontal', addSpace=True)\n self.conditionsListbox = OWGUI.listBox(widget=xmlExtractionBoxLine4, master=self, value='selectedConditionsLabels', labels='conditionsLabels', callback=self.updateConditionsBoxButtons, tooltip='The list of conditions on attribute values that\\nwill be applied to in-/exclude each occurrence\\nof the above specified XML element in the output\\nsegmentation.\\n\\nNote that all conditions must be satisfied for an\\nelement occurrence to be included.\\n\\nColumn 1 shows the name of the attribute.\\nColumn 2 shows the corresponding regex pattern.\\nColumn 3 shows the associated flags.')\n font = QFont()\n font.setFamily('Courier')\n font.setStyleHint(QFont.Courier)\n font.setPixelSize(12)\n self.conditionsListbox.setFont(font)\n xmlExtractionBoxCol2 = OWGUI.widgetBox(widget=xmlExtractionBoxLine4, orientation='vertical')\n self.removeButton = OWGUI.button(widget=xmlExtractionBoxCol2, master=self, label='Remove', callback=self.remove, tooltip='Remove the selected condition from the list.')\n self.clearAllButton = OWGUI.button(widget=xmlExtractionBoxCol2, master=self, label='Clear All', callback=self.clearAll, tooltip='Remove all conditions from the list.')\n xmlExtractionBoxLine5 = OWGUI.widgetBox(widget=conditionsBox, box=False, orientation='vertical')\n addConditionBox = OWGUI.widgetBox(widget=xmlExtractionBoxLine5, box=False, orientation='vertical')\n OWGUI.lineEdit(widget=addConditionBox, master=self, value='newConditionAttribute', orientation='horizontal', label='Attribute:', labelWidth=131, callback=self.updateGUI, tooltip=\"The name of attribute in the condition that will\\nbe added to the list when button 'Add' is clicked.\")\n OWGUI.separator(widget=addConditionBox, height=3)\n OWGUI.lineEdit(widget=addConditionBox, master=self, value='newConditionRegex', orientation='horizontal', label='Regex:', labelWidth=131, callback=self.updateGUI, tooltip=\"The regex pattern associated with the condition\\nthat will be added to the list when button 'Add'\\nis clicked.\")\n OWGUI.separator(widget=addConditionBox, height=3)\n addConditionBoxLine3 = OWGUI.widgetBox(widget=addConditionBox, box=False, orientation='horizontal')\n OWGUI.checkBox(widget=addConditionBoxLine3, master=self, value='ignoreCase', label='Ignore case (i)', labelWidth=131, callback=self.updateGUI, tooltip='Regex pattern is case-insensitive.')\n OWGUI.checkBox(widget=addConditionBoxLine3, master=self, value='unicodeDependent', label='Unicode dependent (u)', callback=self.updateGUI, tooltip='Built-in character classes are Unicode-aware.')\n addConditionBoxLine4 = OWGUI.widgetBox(widget=addConditionBox, box=False, orientation='horizontal')\n OWGUI.checkBox(widget=addConditionBoxLine4, master=self, value='multiline', label='Multiline (m)', labelWidth=131, callback=self.updateGUI, tooltip=\"Anchors '^' and '$' match the beginning and\\nend of each line (rather than just the beginning\\nand end of each input segment).\")\n OWGUI.checkBox(widget=addConditionBoxLine4, master=self, value='dotAll', label='Dot matches all (s)', callback=self.updateGUI, tooltip=\"Meta-character '.' matches any character (rather\\nthan any character but newline).\")\n OWGUI.separator(widget=addConditionBox, height=3)\n self.addButton = OWGUI.button(widget=addConditionBox, master=self, label='Add', callback=self.add, tooltip='Add the current condition to the list.')\n self.advancedSettings.advancedWidgets.append(xmlExtractionBox)\n self.advancedSettings.advancedWidgetsAppendSeparator()\n optionsBox = OWGUI.widgetBox(widget=self.controlArea, box='Options', orientation='vertical')\n optionsBoxLine2 = OWGUI.widgetBox(widget=optionsBox, box=False, orientation='horizontal', addSpace=True)\n OWGUI.checkBox(widget=optionsBoxLine2, master=self, value='autoNumber', label='Auto-number with key:', labelWidth=180, callback=self.sendButton.settingsChanged, tooltip='Annotate output segments with increasing numeric\\nindices.')\n self.autoNumberKeyLineEdit = OWGUI.lineEdit(widget=optionsBoxLine2, master=self, value='autoNumberKey', orientation='horizontal', callback=self.sendButton.settingsChanged, tooltip='Annotation key for output segment auto-numbering.')\n OWGUI.checkBox(widget=optionsBox, master=self, value='importAnnotations', label='Import annotations', callback=self.sendButton.settingsChanged, tooltip='Add to each output segment the annotation keys\\nand values associated with the corresponding\\ninput segment.')\n OWGUI.separator(widget=optionsBox, height=3)\n OWGUI.checkBox(widget=optionsBox, master=self, value='mergeDuplicates', label='Fuse duplicates', callback=self.sendButton.settingsChanged, tooltip='Fuse segments that have the same address.\\n\\nThe annotation of fused segments will be fused\\nas well. In the case where fused segments have\\ndistinct values for the same annotation key, only\\nthe value of the last one will be kept.')\n OWGUI.separator(widget=optionsBox, height=2)\n self.advancedSettings.advancedWidgets.append(optionsBox)\n self.advancedSettings.advancedWidgetsAppendSeparator()\n basicXmlExtractionBox = OWGUI.widgetBox(widget=self.controlArea, box='XML Extraction', orientation='vertical')\n OWGUI.lineEdit(widget=basicXmlExtractionBox, master=self, value='element', orientation='horizontal', label='XML element:', labelWidth=180, callback=self.sendButton.settingsChanged, tooltip='The XML element that will be extracted from the\\ninput segmentation.')\n OWGUI.separator(widget=basicXmlExtractionBox, height=3)\n OWGUI.checkBox(widget=basicXmlExtractionBox, master=self, value='deleteMarkup', label='Remove markup', callback=self.sendButton.settingsChanged, tooltip='Check this box to remove all XML markup occurring\\nwithin the above specified XML element.')\n OWGUI.separator(widget=basicXmlExtractionBox, height=2)\n self.advancedSettings.basicWidgets.append(basicXmlExtractionBox)\n self.advancedSettings.basicWidgetsAppendSeparator()\n OWGUI.rubber(self.controlArea)\n self.sendButton.draw()\n self.infoBox.draw()\n self.sendButton.sendIf()\n self.adjustSizeWithTimer()\n return\n\n def sendData(self):\n \"\"\"(Have LTTL.Segmenter) perform the actual tokenization\"\"\"\n if not self.inputSegmentation:\n self.infoBox.setText('Widget needs input.', 'warning')\n self.send('Extracted data', None, self)\n return\n else:\n if not self.element:\n self.infoBox.setText('Please type an XML element', 'warning')\n self.send('Extracted data', None, self)\n return\n if self.displayAdvancedSettings and self.importElement:\n if self.importElementAs:\n importElementAs = self.importElementAs\n else:\n self.infoBox.setText('Please enter an annotation key for element import.', 'warning')\n self.send('Extracted data', None)\n return\n else:\n importElementAs = None\n if self.displayAdvancedSettings and self.autoNumber:\n if self.autoNumberKey:\n autoNumberKey = self.autoNumberKey\n num_iterations = 2 * len(self.inputSegmentation)\n else:\n self.infoBox.setText('Please enter an annotation key for auto-numbering.', 'warning')\n self.send('Extracted data', None, self)\n return\n else:\n autoNumberKey = None\n num_iterations = len(self.inputSegmentation)\n conditions = dict()\n if self.displayAdvancedSettings:\n for condition_idx in xrange(len(self.conditions)):\n condition = self.conditions[condition_idx]\n attribute = condition[0]\n regex_string = condition[1]\n if condition[2] or condition[3] or condition[4] or condition[5]:\n flags = ''\n if condition[2]:\n flags += 'i'\n if condition[3]:\n flags += 'u'\n if condition[4]:\n flags += 'm'\n if condition[5]:\n flags += 's'\n regex_string += '(?%s)' % flags\n try:\n conditions[attribute] = re.compile(regex_string)\n except re.error as re_error:\n message = 'Please enter a valid regex (error: %s' % re_error.message\n if len(self.conditions) > 1:\n message += ', condition #%i' % (condition_idx + 1)\n message += ').'\n self.infoBox.setText(message, 'error')\n self.send('Extracted data', None, self)\n return\n\n if self.displayAdvancedSettings:\n importAnnotations = self.importAnnotations\n preserveLeaves = self.preserveLeaves\n mergeDuplicates = self.mergeDuplicates\n if mergeDuplicates:\n num_iterations += len(self.inputSegmentation)\n else:\n importAnnotations = True\n mergeDuplicates = False\n preserveLeaves = False\n progressBar = OWGUI.ProgressBar(self, iterations=num_iterations)\n try:\n xml_extracted_data = Segmenter.import_xml(segmentation=self.inputSegmentation, element=self.element, conditions=conditions, import_element_as=importElementAs, label=self.captionTitle, import_annotations=importAnnotations, auto_number_as=autoNumberKey, remove_markup=self.deleteMarkup, merge_duplicates=mergeDuplicates, preserve_leaves=preserveLeaves, progress_callback=progressBar.advance)\n message = '%i segment@p sent to output.' % len(xml_extracted_data)\n message = pluralize(message, len(xml_extracted_data))\n self.infoBox.setText(message)\n self.send('Extracted data', xml_extracted_data, self)\n except ValueError:\n self.infoBox.setText(message='Please make sure that input is well-formed XML.', state='error')\n self.send('Extracted data', None, self)\n\n self.sendButton.resetSettingsChangedFlag()\n progressBar.finish()\n return\n\n def inputData(self, segmentation):\n \"\"\"Process incoming segmentation\"\"\"\n self.inputSegmentation = segmentation\n self.infoBox.inputChanged()\n self.sendButton.sendIf()\n\n def clearAll(self):\n \"\"\"Remove all conditions\"\"\"\n del self.conditions[:]\n del self.selectedConditionsLabels[:]\n self.sendButton.settingsChanged()\n\n def remove(self):\n \"\"\"Remove selected condition\"\"\"\n if self.selectedConditionsLabels:\n index = self.selectedConditionsLabels[0]\n self.conditions.pop(index)\n del self.selectedConditionsLabels[:]\n self.sendButton.settingsChanged()\n\n def add(self):\n \"\"\"Add condition\"\"\"\n self.conditions.append((\n self.newConditionAttribute,\n self.newConditionRegex,\n self.ignoreCase,\n self.unicodeDependent,\n self.multiline,\n self.dotAll))\n self.sendButton.settingsChanged()\n\n def updateGUI(self):\n \"\"\"Update GUI state\"\"\"\n if self.displayAdvancedSettings:\n if self.selectedConditionsLabels:\n cachedLabel = self.selectedConditionsLabels[0]\n else:\n cachedLabel = None\n del self.conditionsLabels[:]\n if len(self.conditions):\n attrs = [ c[0] for c in self.conditions ]\n regexes = [ c[1] for c in self.conditions ]\n maxAttrLen = max([ len(a) for a in attrs ])\n maxRegexLen = max([ len(r) for r in regexes ])\n for index in range(len(self.conditions)):\n format = '%-' + unicode(maxAttrLen + 2) + 's'\n label = format % attrs[index]\n format = '%-' + unicode(maxRegexLen + 2) + 's'\n label += format % regexes[index]\n flags = list()\n if self.conditions[index][2]:\n flags.append('i')\n if self.conditions[index][3]:\n flags.append('u')\n if self.conditions[index][4]:\n flags.append('m')\n if self.conditions[index][5]:\n flags.append('s')\n if len(flags):\n label += '[%s]' % (',').join(flags)\n self.conditionsLabels.append(label)\n\n self.conditionsLabels = self.conditionsLabels\n if cachedLabel is not None:\n self.sendButton.sendIfPreCallback = None\n self.selectedConditionsLabels.listBox.item(cachedLabel).setSelected(1)\n self.sendButton.sendIfPreCallback = self.updateGUI\n if self.newConditionAttribute and self.newConditionRegex:\n self.addButton.setDisabled(False)\n else:\n self.addButton.setDisabled(True)\n if self.importElement:\n self.importElementAsLineEdit.setDisabled(False)\n else:\n self.importElementAsLineEdit.setDisabled(True)\n if self.autoNumber:\n self.autoNumberKeyLineEdit.setDisabled(False)\n else:\n self.autoNumberKeyLineEdit.setDisabled(True)\n self.updateConditionsBoxButtons()\n self.advancedSettings.setVisible(True)\n else:\n self.advancedSettings.setVisible(False)\n self.adjustSizeWithTimer()\n return\n\n def updateConditionsBoxButtons(self):\n \"\"\"Update state of Conditions box buttons\"\"\"\n if self.selectedConditionsLabels:\n self.removeButton.setDisabled(False)\n else:\n self.removeButton.setDisabled(True)\n if self.conditions:\n self.clearAllButton.setDisabled(False)\n else:\n self.clearAllButton.setDisabled(True)\n\n def adjustSizeWithTimer(self):\n qApp.processEvents()\n QTimer.singleShot(50, self.adjustSize)\n\n def setCaption(self, title):\n if 'captionTitle' in dir(self) and title != 'Orange Widget':\n OWWidget.setCaption(self, title)\n self.sendButton.settingsChanged()\n else:\n OWWidget.setCaption(self, title)\n\n def getSettings(self, *args, **kwargs):\n settings = OWWidget.getSettings(self, *args, **kwargs)\n settings['settingsDataVersion'] = __version__.split('.')[:2]\n return settings\n\n def setSettings(self, settings):\n if settings.get('settingsDataVersion', None) == __version__.split('.')[:2]:\n settings = settings.copy()\n del settings['settingsDataVersion']\n OWWidget.setSettings(self, settings)\n return\n\n\nif __name__ == '__main__':\n appl = QApplication(sys.argv)\n ow = OWTextableExtractXML()\n ow.show()\n appl.exec_()\n ow.saveSettings()","sub_path":"pycfiles/Orange_Textable-2.0.1-py2.7/OWTextableExtractXML.py","file_name":"OWTextableExtractXML.py","file_ext":"py","file_size_in_byte":20965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493061351","text":"import json\nfrom bson import json_util\nfrom pymongo import MongoClient\n\nconnection = MongoClient('localhost', 27017)\ndb = connection['market']\ncollection = db['stocks']\n\n# Searches for documents based on passed criteria.\n# If documents are found, they are printed.\n# Documents are then deleted.\n# \n# @param documen\n# - Key,value pair used to identify documents to be deleted\n# \n# \n# @var result\n# - Cursor containing documents pending deletion\n# - If exception, result is set to False\n# - Otherwise, contents are printed then result is set to True\n# \n# @throws TypeError\n# - Thrown if document is incorrect format\n# \n# @return result\n# - True/False based on if documents were successfully deleted\n# - Undefined if deletion fails\n\ndef remove_document(document):\n try:\n collection.delete_one(document)\n result = \"True\"\n except TypeError as te:\n abort(400, str(te))\n \n return result\n\n \ndef main():\n \n myQuery = { \"Ticker\" : \"BRLI\" }\n\n \n deleteResult = collection.find_one_and_delete(myQuery)\n print(deleteResult)\n \nmain() ","sub_path":"delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"93445552","text":"#!/usr/bin/python\n\nimport os\nimport sys\nsys.path.append('/home/whatever/work/tanks_ai/src')\n\nimport sys\nimport pygame\nfrom GamePhysics import MAX_DISTANCE\nfrom Geometry import get_unit_corners, Vector\nfrom MyUtils import debug_dump_load\n\npygame.init()\n\n#create the screen\nwindow = pygame.display.set_mode((1280, 800))\n\n#draw a line - see http://www.pygame.org/docs/ref/draw.html for more\ndef draw_unit(unit):\n c1, c2, c3, c4 = get_unit_corners(unit, factor=1)\n pygame.draw.line(window, (255, 255, 255), (c1.x, c1.y), (c2.x, c2.y))\n pygame.draw.line(window, (255, 255, 255), (c2.x, c2.y), (c3.x, c3.y))\n pygame.draw.line(window, (255, 255, 255), (c3.x, c3.y), (c4.x, c4.y))\n pygame.draw.line(window, (255, 255, 255), (c4.x, c4.y), (c1.x, c1.y))\n\ndef draw_tank(tank):\n draw_unit(tank)\n\n b = tank.angle + tank.turret_relative_angle\n e = Vector(1, 0)\n q = e.rotate(b)\n\n tank_v = Vector(tank.x, tank.y)\n hit_v = tank_v + MAX_DISTANCE * q\n\n pygame.draw.line(window, (255, 0, 0), (tank.x, tank.y), (hit_v.x, hit_v.y))\n\ndata = debug_dump_load(sys.argv[1])\n\nfor unit in data[\"units\"]:\n draw_unit(unit)\n\nfor tank in data[\"tanks\"]:\n draw_tank(tank)\n\n#draw it to the screen\npygame.display.flip()\n\n#input handling (somewhat boilerplate code):\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n else:\n #print event\n pass","sub_path":"tools/dump_visualizer.py","file_name":"dump_visualizer.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"179824894","text":"\"\"\"The main class used for operatings STM32 is STM32Bridge.\n\"\"\"\nimport serial\nimport time\nimport json\nimport logging\nimport atcommands\nimport interperter\nimport context\nfrom Queue import Queue\n\nMAGIC_START_OF_FRAME= str('')\nMAGIC_END_OF_FRAME=str('')\nTIMEOUT_UART=0\n\n# Status for str types\nOK = 'OK'\nWAITING = 'WAITING'\nERROR = 'ERROR'\nTIMEOUT = 'TIMEOUT'\n\n# Default error status, for numeric types\nSTATUS_CODE_ERROR = -1\n\n\njoined = False\n\n\nclass Stm32ATBridge(object):\n \"\"\"\n Bridge which sends AT commands to the STM32 boards through UART.\n Typical usage include\n \n Initilizating and opening serial port to STM32\n bridge.initialize('path to json config file')\n\n Setting up the hardware parameter\n status = bridge.setupHW()\n\n Joining the network\n if not status:\n joined = bridge.join()\n \"\"\"\n def initialize(self, initJsonFilePath):\n \"\"\"Initializes the Bridge, pass the configuration file path here.\n Sets the serial connection to the STM32 microcontroller.\n \"\"\"\n fp = open(initJsonFilePath, 'r')\n self.config = json.load(fp, encoding='ascii')\n\n # queue for commands\n self.context = context.Context()\n \n # known configurations\n # timeout is a tricky variable to setup\n self.serialPort = serial.Serial(self.config['device'], int(self.config['baudrate']), timeout=TIMEOUT_UART)\n self.atcommands = atcommands.ATCommands(self.context)\n self.interperter = interperter.Interperter(self.context, self.serialPort)\n self.interperter.daemon = True # Thread should die on exit\n self.interperter.start()\n\n def joinThread(self):\n self.interperter.join()\n\n # Returns True if already joined, False if not joined\n def setupHW(self):\n \"\"\"Setup the lora board hardware parameters from the config.json file.\n If the boards if already joined to the network, skips and returns True.\n Else sets the parameters up and returns False.\n \"\"\"\n\n # TODO: Add error handling\n\n networkJoinMode = self.config['networkJoinMode']\n\n status = self.atcommands.getNJS()\n logging.info(status)\n\n if status == 0:\n ret = self.atcommands.resetMCU()\n logging.info(ret)\n\n ret = self.atcommands.getOK()\n logging.info(ret)\n\n ret = self.atcommands.setClass(str(self.config['class']))\n logging.info(ret)\n\n\n if networkJoinMode == 0: \n # ABP mode, requires device EUI, application EUI, device address, network session key and app session key\n ret = self.atcommands.setAPPEUI(str(self.config['applicationEUI']))\n logging.info(ret)\n\n ret = self.atcommands.setDADDR(str(self.config['deviceAddress']))\n logging.info(ret)\n\n ret = self.atcommands.setNWKSKEY(str(self.config['networkSessionKey']))\n logging.info(ret)\n\n ret = self.atcommands.setAPPSKEY(str(self.config['applicationSessionKey']))\n logging.info(ret)\n else:\n # OTAA, requires device EUI, application EUI and app key\n ret = self.atcommands.setAPPEUI(str(self.config['applicationEUI']))\n logging.info(ret)\n\n ret = self.atcommands.setAPPKEY(str(self.config['applicationKey']))\n logging.info(ret)\n\n if 'deviceAddress' in self.config: \n ret = self.atcommands.setDADDR(str(self.config['deviceAddress']))\n logging.info(ret)\n\n if 'adaptiveDataRate' in self.config:\n ret = self.atcommands.setADR(str(self.config['adaptiveDataRate']))\n logging.info(ret)\n\n if 'networkID' in self.config:\n ret = self.atcommands.setNWKID(str(self.config['networkID']))\n logging.info(ret)\n\n if 'confirmMode' in self.config:\n ret = self.atcommands.setCFM(self.config['confirmMode'])\n logging.info(ret)\n\n ret = self.atcommands.setNJM(self.config['networkJoinMode'])\n logging.info(ret)\n \n self.cfmStatus = int(self.atcommands.getCFM())\n logging.info(self.cfmStatus)\n\n\n return False # not joined\n \n self.cfmStatus = int(self.atcommands.getCFM())\n logging.info(self.cfmStatus)\n return True # already joined\n\n\n def join(self):\n \"\"\"Joins the loraWAN network. Tries to check if the join is successful for 10 tries.\n Returns False if it cannot join network, True for success.\n \"\"\"\n\n # if the network is already joined, return\n status = self.atcommands.getNJS()\n logging.info(status)\n if status == 1:\n return True\n ret = self.atcommands.JOIN()\n logging.info(ret)\n\n if ret == 'JOINED':\n return True\n\n return False # join unsuccessful\n\n def send(self, port, data, timeout = 20):\n \"\"\"Send text data.\n @port port number\n @data text data.\n Each data will have be as follows\n MAGIC_START_OF_FRAME\n data...\n ...\n ...\n ...\n (next lora frame)\n ...\n ...\n MAGIC_END_OF_FRAME\n \"\"\"\n\n maxFrameSize = self.config['maxFrameSize']\n # by convention one character is one byte\n # two hex makes one character and in one byte\n\n buffer = MAGIC_START_OF_FRAME + data + MAGIC_END_OF_FRAME\n bufferLen = len(buffer)\n bufferParts = [buffer[i : i + maxFrameSize] \n for i in range(0, bufferLen, maxFrameSize)]\n\n for buf in bufferParts:\n # sending part\n logging.info('Sending: %s' % buf)\n ret = self.atcommands.send(port, buf, self.cfmStatus, timeout)\n logging.debug(ret)\n\n if self.cfmStatus == 1:\n # If we enable confirmation, then we can skip the retiress\n continue\n\n while ret != 'OK':\n if ret == 'AT_PARAM_ERROR':\n raise Exception('Param error in send.')\n elif ret == 'AT_NO_NETWORK_JOINED':\n self.join()\n elif ret == 'AT_BUSY_ERROR':\n time.sleep(0.25) # If device is busy, sleep for 1 second\n ret = self.atcommands.send(port, buf, 0, timeout)\n logging.debug(ret)\n\n\n def _convertToHex(self, string):\n return ''.join(hex(ord(c))[2:] for c in string)\n\n def sendb(self, port, data, timeout = 20):\n \"\"\"Send binary data.\n @port port number\n @data text data.\n Each data will have be as follows\n MAGIC_START_OF_FRAME\n data...\n ...\n ...\n ...\n (next lora frame)\n ...\n ...\n MAGIC_END_OF_FRAME\n \"\"\"\n\n maxFrameSize = self.config['maxFrameSize'] * 2\n # by convention one character is one byte\n # two hex makes one character and in one byte\n\n buffer = self._convertToHex(MAGIC_START_OF_FRAME) + data + self._convertToHex(MAGIC_END_OF_FRAME)\n bufferLen = len(buffer)\n bufferParts = [buffer[i : i + maxFrameSize] \n for i in range(0, bufferLen, maxFrameSize)]\n\n for buf in bufferParts:\n # sending part\n ret = self.atcommands.sendb(port, buf, self.cfmStatus, timeout)\n logging.debug(ret)\n\n if self.cfmStatus == 1:\n # If we enable confirmation, then we can skip the retires\n continue\n\n while ret != 'OK':\n if ret == 'AT_PARAM_ERROR':\n raise Exception('Param error in send.')\n elif ret == 'AT_NO_NETWORK_JOINED':\n self.join()\n elif ret == 'AT_BUSY_ERROR':\n time.sleep(1) # If device is busy, sleep for 1 second\n ret = self.atcommands.sendb(port, buf, 0, timeout)\n logging.debug(ret)\n \n def close(self):\n self.atcommands.close()\n\n\n\n\n\n\n\n\n","sub_path":"modules/stm32ATbridge/stm32ATbridge/stm32ATbridge.py","file_name":"stm32ATbridge.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638497417","text":"import pandas as pd\n\nfilePath = \"F:\\\\Study\\\\RobotFramework\\\\RF_Pilot\\\\TestAsset\\\\DataTables\\\\\"\n\ndef readRow(fileDetail, rowId):\n\tfileName = filePath + fileDetail.split('.')[0] + \".xlsx\"\n\tsheetName = fileDetail.split('.')[1]\n\tdataframe = pd.read_excel(fileName, sheetName)\n\t#df.set_index(\"TestCaseName\", inplace=True)\n\trowDict = dataframe.set_index('TestCaseName').T.to_dict('dict')[rowId]\n\treturn rowDict\n\n\n### Add validation to check if file present\n### Add validation to check sheet exists\n### Add validation to check if column exists\n\n\n","sub_path":"ExcelReader.py","file_name":"ExcelReader.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"642220840","text":"# MIT License\n#\n# Copyright (c) 2018-2019 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\" custom_path = \"sooooorc.rpm\"\nFunctional tests for srpm comand\n\"\"\"\nfrom pathlib import Path\nimport os\nimport flexmock\nfrom subprocess import check_output\nimport unittest\nfrom packit.api import PackitAPI\nfrom tests.integration.testbase import PackitUnittestOgr\n\n\nclass ProposeUpdate(PackitUnittestOgr):\n def setUp(self):\n super().setUp()\n self.api = PackitAPI(\n config=self.conf, package_config=self.pc, upstream_local_project=self.lp\n )\n self.api._up = self.upstream\n self.api._dg = self.dg\n # Do not upload package, because no credentials given in CI\n flexmock(self.api).should_receive(\"_handle_sources\").and_return(None)\n self.set_git_user()\n\n @unittest.skip(\n \"Issue in ogr causing that User is not stored in persistent yaml files for pagure\"\n )\n def test_propose_update(self):\n # change specfile little bit to have there some change\n specfile_location = os.path.join(self.lp.working_dir, \"python-ogr.spec\")\n with open(specfile_location, \"a\") as myfile:\n myfile.write(\"# test text\")\n check_output(\n f\"cd {self.lp.working_dir}; git commit -m 'test change' python-ogr.spec\",\n shell=True,\n )\n self.api.sync_release(\"master\")\n\n\ndef test_srpm(api_instance):\n u, d, api = api_instance\n api.create_srpm()\n assert list(Path.cwd().glob(\"*.src.rpm\"))[0].exists()\n\n\ndef test_srpm_custom_path(api_instance):\n u, d, api = api_instance\n custom_path = \"sooooorc.rpm\"\n api.create_srpm(output_file=custom_path)\n assert Path.cwd().joinpath(custom_path).is_file()\n","sub_path":"tests/integration/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63859397","text":"import cmudict\nimport pickle\nimport os\n\nclass Evaluator():\n def __init__(self):\n self.cmudict = cmudict.dict()\n\n self.title_bank = None\n self.folder = os.path.dirname(os.path.realpath(__file__))\n\n # Try reading content for the title_bank\n \n try:\n with open(os.path.join(self.folder, \"data\", \"titles.pickle\"), \"rb\") as f:\n self.title_bank = pickle.load(f)\n \n except FileNotFoundError:\n from title_scrape import download_gutenberg, gutenberg_preprocess\n \n download_gutenberg()\n gutenberg_preprocess()\n \n with open(os.path.join(self.folder, \"data\", \"titles.pickle\"), \"rb\") as f:\n self.title_bank = pickle.load(f)\n\n\n # Modified from https://www.python-course.eu/levenshtein_distance.php\n def __iterative_levenshtein(self, s, t, weights=(1, 1, 1)):\n \"\"\" \n iterative_levenshtein(s, t) -> ldist\n ldist is the Levenshtein distance between the strings \n s and t.\n For all i and j, dist[i,j] will contain the Levenshtein \n distance between the first i characters of s and the \n first j characters of t\n\n weight_dict: keyword parameters setting the costs for characters,\n the default value for a character will be 1\n \"\"\"\n rows = len(s)+1\n cols = len(t)+1\n\n\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for row in range(1, rows):\n dist[row][0] = dist[row-1][0] + weights[0]\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for col in range(1, cols):\n dist[0][col] = dist[0][col-1] + weights[1]\n\n for col in range(1, cols):\n for row in range(1, rows):\n deletes = weights[0]\n inserts = weights[1]\n subs = max( (weights[2], weights[2]))\n if s[row-1] == t[col-1]:\n subs = 0\n else:\n subs = subs\n dist[row][col] = min(dist[row-1][col] + deletes,\n dist[row][col-1] + inserts,\n dist[row-1][col-1] + subs) # substitution\n\n return dist[row][col]\n\n\n def editDistance(self, phenotype, weights=(1, 1, 1)):\n \"\"\"\n Calculate the shortest levenshtein distance between phenotype and known titles.\n\n Args:\n phenotype (str) : Candidate phenotype.\n title_bank (dict) : Known titles, needs to have dictionaries as values, and those disctionaries need to have\n 'title' key.\n weights (tuple of floats) : Weights for different operations. In order: Delete, Insert, Substitute\n\n Returns:\n int : Shortest edit distance.\n \"\"\"\n\n # Checking for exact match from the dictionary is fast\n if phenotype in self.title_bank:\n return 0\n\n closest = 1000\n\n for _, b_info in self.title_bank.items():\n # Skip candidates using lower-bound of the levenshtein distance.\n # Does not take the weights into account\n if abs(len(phenotype.strip()) - len(b_info[\"title\"].strip())) > closest:\n continue\n\n levenshtein = self.__iterative_levenshtein(phenotype.strip(), b_info[\"title\"].strip(), weights)\n closest = min(closest, levenshtein)\n\n return closest\n\n def evaluate(self, title):\n \"\"\"Runs the different evaluation schemes, which return values between 0 and 1, and returns an average over them.\n \n Args:\n title (list) : list of words forming the title when.\n\n Returns:\n float : Weighted average of the different evaluations.\n \"\"\"\n val = 0\n val += self.eval_novelty(\" \".join(title))\n val += self.eval_alliteration(title)\n return val / 2.0\n\n\n def eval_novelty(self, title):\n if self.title_bank is None:\n return 0.8\n else:\n dist = self.editDistance(title, (1, 1, 1))\n # Scale with the title length\n # Can be higher than 1 if weights are not all 1.\n dist = min(1.0, (dist/len(title))*(len(title)//5))\n return dist\n\n\n def eval_alliteration(self, title):\n unique_phonemes = []\n title_length = 1\n for word in title:\n try:\n phonemes = self.cmudict[word][0]\n title_length += len(phonemes)\n for phoneme in phonemes:\n if phoneme not in unique_phonemes:\n unique_phonemes.append(phoneme)\n except:\n #word was not in dict\n continue\n return len(unique_phonemes) / title_length\n\n\n def get_alliteration_score(self, ratio):\n \"\"\" A function that has it maximum = 1 when ratio is 1/2, meaning half of the phonemes in the\n title are non-unique, otherwise it grows close to 0\n \"\"\"\n return (-4*(ratio-0.5))**2 + 1\n","sub_path":"tittles/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195425069","text":"from flask import Flask, request, redirect, url_for,render_template\n\napp = Flask(__name__)\napp.config['templates_auto_loads']=True\n\n# @app.route('/')\n# def hello_world():\n# a=2/0\n# return 'Hello World!'\n\n# @app.route('/<any(article,blog):url_path>/<id>/')\n# def item(url_path,id):\n# parm=request.args.get('name')\n# print(\"姓名是什���==%s\"%parm)\n# return url_path\n@app.route('/list/<int:page>/')\ndef ceshi(page):\n books=[\n {\n \"name\":\"三国演义\",\n 'author':\"罗贯中\"\n },\n {\n \"name\": \"红楼梦\",\n 'author': \"曹雪芹\"\n },\n {\n \"name\": \"西游记\",\n 'author': \"吴承恩\"\n },\n {\n \"name\": \"水浒传\",\n 'author': \"施耐庵\"\n }\n ]\n\n return render_template(\"index.html\",books=books)\n\n@app.route('/')\ndef index():\n return render_template('son.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"知了课堂/知了课堂.py","file_name":"知了课堂.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192604557","text":"from plotting.app import PlotterWindow\nfrom plotting import graph, domain, widget\nfrom prak import numerical\nfrom functools import partial\nimport numpy\nLOGISTISCH_MAP = \"\"\"\nuniform float r;\nvec4 f(vec4 x) {\n return vec4(x.x, r*x.x*(1-x.x), 0, 0.5);\n}\n\"\"\"\n\nGERADE = \"\"\"\nvec4 f(vec4 x) {\n return vec4(x.x, x.x, 1, 1);\n}\n\"\"\"\n\nwindow = PlotterWindow(axis=(1.0,1.0), origin=(-0.0,0.0),\n bg_color=[.9,.9,.9,1])\n\nuniforms = window.plotter.get_uniform_manager()\nuniforms.set_global('r', 2.75, 0.001)\nwindow.add_widget('test', widget.Uniforms(uniforms, font_color=[.0, .0, .0, 1]))\n\ncdomain = domain.Axis(50)\nwindow.plotter.add_graph('bifurkation', graph.Line2d(cdomain, LOGISTISCH_MAP))\nwindow.plotter.get_graph('bifurkation').set_colors(color_min=[.0,0.0,.0,1], color_max=[0.0,.0,.0,1])\n\nwindow.plotter.add_graph('gerade', graph.Line2d(cdomain, GERADE))\nwindow.plotter.get_graph('gerade').set_colors(color_min=[.0,0.0,.0,1], color_max=[0.0,.0,.0,1])\n\nlog_fnc = lambda x: uniforms.get_global('r')*x*(1-x)\npydomain = domain.PythonCodeDomain(600)\npydomain.calculata_domain = partial(numerical.iteration_attractor_quadruple_opt1, log_fnc, 300)\nwindow.plotter.add_graph('iteration', graph.Line2d(pydomain))\nwindow.plotter.get_graph('iteration').set_colors(color_min=[.0,0.0,.0,1], color_max=[0.0,.0,.0,1])\n\nwindow.run()\n","sub_path":"prak/logistisch-opt-behavior.py","file_name":"logistisch-opt-behavior.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399605137","text":"from TAU_Israel.modules import translate, synonymous_codons\n\n# --------------------------------------------------------------\n\ndef optimize_sequence(target_gene, high_expression_organisms, low_expression_organisms, local_maximum, tuning_param=0.5, n_initiation_codons=12):\n \"\"\"\n\n :param target_gene: Gene object, which is to be optimized\n :param high_expression_organisms: list of Organism objects. The organisms where we want to express the target gene in\n :param low_expression_organisms: list of Organism objects. The organisms where we go not want the expression\n :param tuning_param: a number from 0 to 1, which describes the priority in optimization,\n 0 - optimization only for no-expression organisms, 1 - only for expression organisms\n\n :param n_initiation_codons: number of codons of the sequence which need to be optimized due to the initiation rules\n :return: Optimised gene sequence according to the organisms' features\n\n The function calculates the difference between the features of each codon. Each feature has its own weight (ratio)\n \"\"\"\n\n\n optimized_sequence = ''\n\n optimal_codons = find_optimal_codons(high_expression_organisms, low_expression_organisms, tuning_param, local_maximum) # optimal codons->dict(AA:codon)\n\n target_protein = translate(target_gene)\n\n # optimize the initiation\n\n optimized_sequence += optimize_initiation(target_gene[:n_initiation_codons*3])\n\n for aa in target_protein[n_initiation_codons:]:\n optimized_sequence += optimal_codons[aa]\n\n return optimized_sequence\n\n# --------------------------------------------------------------\ndef calc_diff(expression_organisms, low_expression_organisms, codons):\n \"\"\"\n\n :param expression_organisms: list of expression organisms\n :param low_expression_organisms: list of no_expression organisms\n :param codons: list of codons to calculate the difference for\n :return: dict (codon:score).\n\n The function finds the difference between the features for each codon in codons list.\n Each feature has ratio, which is actually a weight of the feature in the optimization.\n\n\n\n The function works only for one expression organism and one no-expression organism\n \"\"\"\n\n expression_organisms_features = expression_organisms[0].features\n no_expression_organisms_features = low_expression_organisms[0].features\n\n diff = {}\n for codon in codons:\n diff[codon] = 0\n for i in range(len(expression_organisms_features)):\n if no_expression_organisms_features[i].weights[codon] == 0:\n diff[codon] += 1000\n continue\n\n diff[codon] += expression_organisms_features[i].ratio * \\\n (expression_organisms_features[i].weights[codon] / no_expression_organisms_features[i].weights[codon])\n\n # we need to turn the values upside down, because we are looking for minimal value in find_optimal_codons_function\n\n diff[codon] = 1/diff[codon]\n\n return diff\n\n# --------------------------------------------------------------\ndef loss_function(high_expression_organisms, low_expression_organisms, codons, tuning_param, local_maximum):\n \"\"\"\n\n :param high_expression_organisms: list of expression organisms\n :param low_expression_organisms: list of no_expression organisms\n :param codons: list of codons to calculate the loss for\n :param tuning_param:\n :return: loss - dict (codon:score)\n\n The function iterates through each feature in each organism, and sums up loss for each codon\n \"\"\"\n\n loss = {}\n if local_maximum: #high_expression is optimized, low expression is deoptimized\n for high_expression_organism in high_expression_organisms:\n loss = iterate_through_feature([high_expression_organism], codons, loss, tuning_param, high_expression=True)\n\n for low_expression_organism in low_expression_organisms:\n loss = iterate_through_feature([low_expression_organism], codons, loss, tuning_param, high_expression=False)\n else:\n loss = iterate_through_feature(high_expression_organisms, codons, loss, tuning_param, high_expression=True)\n loss = iterate_through_feature(low_expression_organisms, codons, loss, tuning_param, high_expression=False)\n\n return loss\n\n\n# --------------------------------------------------------------\ndef iterate_through_feature(organisms, codons, loss, tuning_param, high_expression):\n \"\"\"\n\n :param organisms: List of organism objects for which the sequence is optimized\n :param codons: list of codons to choose from\n :param loss: loss(dict) taken from a previous iteration\n :param high_expression: Whether current organism is being optimized for\n high expression or low expression\n :return: updated loss dictionary\n\n The funciton calculates loss for each codon for the organism,\n and adds it to the loss from the previously calculated organisms.\n \"\"\"\n\n\n for feature_name in [feature.index_name for feature in organisms[0].features]:\n\n max_value = find_max_value_per_feature(organisms, feature_name, codons)\n\n for organism in organisms:\n\n feature = [feature for feature in organism.features if feature.index_name == feature_name]\n f = feature[0]\n\n for codon in codons:\n loss[codon] = 0\n try: # todo: temporal change. When synonymous codons dict is done, erase 'try-except'\n # optimized organisms should have small loss\n if high_expression:\n loss[codon] += (tuning_param * f.ratio * ((f.weights[codon] / max_value - 1) ** 2))\n # print(organism.name)\n # print(organism.std)\n else:\n loss[codon] += (1 - tuning_param) * f.ratio * ((f.weights[codon] / max_value) ** 2)\n except:\n continue\n\n return loss\n\n# --------------------------------------------------------------\ndef find_max_value_per_feature(organisms, feature_name, codons):\n\n values = []\n for organism in organisms:\n for feature in organism.features:\n if feature.index_name == feature_name:\n try: # todo: temporal change. When synonymous codons dict is done, erase 'try-except'\n values.extend([feature.weights[codon] for codon in codons])\n except:\n values.append(0)\n max_value = max(values)\n\n if max_value == 0:\n max_value = 0.000001\n\n return max_value\n# --------------------------------------------------------------\ndef find_optimal_codons(high_expression_organisms, low_expression_organisms, tuning_param, local_maximum, evaluation_function=loss_function):\n \"\"\"\n\n :param high_expression_organisms: list of Organism objects. The organisms where we want to express the target gene in\n :param low_expression_organisms: list of Organism objects. The organisms where we do not want the expression in\n :param tuning_param:\n :param evaluation_function: function which evaluates loss\n :return: Dictionary in the format Amino Acid: Optimal codon.\n\n\n \"\"\"\n\n optimal_codons = {}\n\n for aa, codons in synonymous_codons.items():\n loss = evaluation_function(high_expression_organisms, low_expression_organisms, codons, tuning_param, local_maximum=local_maximum)\n optimal_codons[aa] = min(loss, key=loss.get)\n\n return optimal_codons\n\n\n\n# --------------------------------------------------------------\ndef optimize_initiation(seq):\n \"\"\"\n\n :param seq: Seq object (only the initiation part)\n :return: optimized initiation sequence\n\n For now, the function returns the same sequence as in the input, tbd.\n \"\"\"\n return seq\n","sub_path":"modules/ORF/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":7774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"557709480","text":"\"\"\"\n System State for vote\n Default values are dafined as module variables\n\"\"\"\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom django.db import models\nfrom datetime import date\nfrom apps.enrollment.courses.models.semester import Semester\n\nDEFAULT_YEAR = date.today().year - 2\nDEFAULT_MAX_POINTS = 50\nDEFAULT_MAX_VOTE = 3\nDEFAULT_DAY_BEG = 1 #\nDEFAULT_DAY_END = 31 # Te dane trzeba będzie tak ustawić\nDEFAULT_MONTH_BEG = 1 # żeby były prawdziwe. Na razie tak\nDEFAULT_MONTH_END = 7 # jest wygodnie, chociażby do testów\nDEFAULT_VOTE_BEG = date(DEFAULT_YEAR, 6, 10)\nDEFAULT_VOTE_END = date(DEFAULT_YEAR, 7, 10)\nDEFAULT_CORRECTION_BEG = date(DEFAULT_YEAR, DEFAULT_MONTH_BEG, DEFAULT_DAY_BEG)\nDEFAULT_CORRECTION_END = date(DEFAULT_YEAR, DEFAULT_MONTH_END, DEFAULT_DAY_END)\n\n\nclass SystemState(models.Model):\n \"\"\"\n System state for vote\n \"\"\"\n\n semester_winter = models.ForeignKey(Semester, on_delete=models.CASCADE,\n verbose_name='Semestr zimowy',\n related_name='winter_votes',\n null=True, blank=True)\n\n semester_summer = models.ForeignKey(Semester,\n on_delete=models.CASCADE,\n verbose_name='Semestr letni',\n related_name='summer_votes',\n null=True, blank=True)\n\n year = models.IntegerField(\n verbose_name='Rok akademicki',\n default=date.today().year)\n\n max_points = models.IntegerField(\n verbose_name='Maksimum punktów na przedmioty',\n default=DEFAULT_MAX_POINTS)\n\n max_vote = models.IntegerField(\n verbose_name='Maksymalna wartość głosu',\n default=DEFAULT_MAX_VOTE)\n\n vote_beg = models.DateField(\n verbose_name='Początek głosowania',\n default=DEFAULT_VOTE_BEG)\n\n vote_end = models.DateField(\n verbose_name='Koniec głosowania',\n default=DEFAULT_VOTE_END)\n\n winter_correction_beg = models.DateField(\n verbose_name='Początek korekty zimowej',\n default=DEFAULT_CORRECTION_BEG)\n\n winter_correction_end = models.DateField(\n verbose_name='Koniec korekty zimowej',\n default=DEFAULT_CORRECTION_END)\n\n summer_correction_beg = models.DateField(\n verbose_name='Początek korekty letniej',\n default=DEFAULT_CORRECTION_BEG)\n\n summer_correction_end = models.DateField(\n verbose_name='Koniec korekty letniej',\n default=DEFAULT_CORRECTION_END)\n\n class Meta:\n verbose_name = 'ustawienia głosowania'\n verbose_name_plural = 'ustawienia głosowań'\n app_label = 'vote'\n\n def __str__(self):\n return \"Ustawienia systemu na rok \" + str(self.year)\n\n @staticmethod\n def get_state(year=None):\n \"\"\"\n Gets actual system state from database\n Creates one if necessary\n \"\"\"\n if not year:\n year = date.today().year\n try:\n return SystemState.objects.get(year=year)\n except ObjectDoesNotExist:\n return SystemState.create_default_state(year)\n\n @staticmethod\n def create_default_state(year=None):\n \"\"\"\n Creates system state from default variables\n \"\"\"\n if not year:\n year = date.today().year\n new_state = SystemState()\n new_state.year = year\n new_state.max_points = DEFAULT_MAX_POINTS\n new_state.vote_beg = date(year, 6, 10)\n new_state.vote_end = date(year, 7, 10)\n new_state.save()\n return new_state\n\n def is_system_active(self):\n return self.is_vote_active() or self.is_correction_active()\n\n def is_vote_active(self):\n \"\"\"\n Checks if vote is active\n \"\"\"\n today = date.today()\n\n return self.vote_beg <= today <= self.vote_end\n\n def is_correction_active(self):\n \"\"\"\n Checks if correction is active\n \"\"\"\n\n return self.is_winter_correction_active() or self.is_summer_correction_active()\n\n def is_winter_correction_active(self):\n today = date.today()\n\n return self.winter_correction_beg <= today <= self.winter_correction_end\n\n def is_summer_correction_active(self):\n today = date.today()\n\n return self.summer_correction_beg <= today <= self.summer_correction_end\n","sub_path":"zapisy/apps/offer/vote/models/system_state.py","file_name":"system_state.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"76602844","text":"import asyncio\nfrom signal import SIGINT, SIGTERM\n\n\nasync def app1():\n try:\n while True:\n print(\"App 1 is running\")\n await asyncio.sleep(1)\n except asyncio.CancelledError:\n print(\"App 1 is shutting down\")\n\n\nasync def app2():\n try:\n while True:\n print(\"App 2 is running\")\n await asyncio.sleep(1)\n except asyncio.CancelledError:\n print(\"App 2 is shutting down\")\n\n\ndef signal_handler(sig):\n loop = asyncio.get_running_loop()\n for task in asyncio.all_tasks(loop=loop):\n task.cancel()\n print(f\"Got signal: {sig!s}, shutting down.\")\n loop.remove_signal_handler(SIGTERM)\n loop.add_signal_handler(SIGINT, lambda: None)\n\n\nasync def main():\n loop = asyncio.get_running_loop()\n for sig in (SIGTERM, SIGINT):\n loop.add_signal_handler(sig, signal_handler, sig)\n\n try:\n await asyncio.gather(app1(), app2())\n except asyncio.CancelledError:\n print(\"All apps are shutting down.\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","sub_path":"parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648900162","text":"import cv2\nimport numpy as np\nfolder_path = 'D:/GitRepos/Uni/Thesis/Simulation/PythonCode/Output/'\n\nimg_a = cv2.imread('img_99.png')\nimg_b = cv2.imread('img_100.png')\n\nimg_a = cv2.copyMakeBorder(img_a,10,10,10,5,cv2.BORDER_CONSTANT)\nimg_b = cv2.copyMakeBorder(img_b,10,10,5,10,cv2.BORDER_CONSTANT)\nimg_combine = np.hstack((img_a, img_b))\n\ncv2.imwrite(folder_path + \"combined_image.png\", img_combine)","sub_path":"Simulation/PythonCode/image_tiler.py","file_name":"image_tiler.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"253368153","text":"#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\n\nfrom federatedml.param.intersect_param import IntersectParam\nfrom federatedml.secureprotol import gmpy_math\nfrom federatedml.secureprotol.encrypt import RsaEncrypt\nfrom federatedml.secureprotol.hash.hash_factory import Hash\nfrom federatedml.util import consts\nfrom federatedml.util import LOGGER\nfrom federatedml.transfer_variable.transfer_class.raw_intersect_transfer_variable import RawIntersectTransferVariable\nfrom federatedml.transfer_variable.transfer_class.rsa_intersect_transfer_variable import RsaIntersectTransferVariable\n\n\nclass Intersect(object):\n def __init__(self):\n super().__init__()\n self.model_param = IntersectParam()\n self.transfer_variable = None\n\n self._guest_id = None\n self._host_id = None\n self._host_id_list = None\n\n def load_params(self, param):\n raise NotImplementedError(\"this method should not be called here\")\n\n @property\n def guest_party_id(self):\n return self._guest_id\n\n @guest_party_id.setter\n def guest_party_id(self, guest_id):\n if not isinstance(guest_id, int):\n raise ValueError(\"party id should be integer, but get {}\".format(guest_id))\n self._guest_id = guest_id\n\n @property\n def host_party_id(self):\n return self._host_id\n\n @host_party_id.setter\n def host_party_id(self, host_id):\n if not isinstance(host_id, int):\n raise ValueError(\"party id should be integer, but get {}\".format(host_id))\n self._host_id = host_id\n\n @property\n def host_party_id_list(self):\n return self._host_id_list\n\n @host_party_id_list.setter\n def host_party_id_list(self, host_id_list):\n if not isinstance(host_id_list, list):\n raise ValueError(\n \"type host_party_id should be list, but get {} with {}\".format(type(host_id_list), host_id_list))\n self._host_id_list = host_id_list\n\n def run_intersect(self, data_instances):\n raise NotImplementedError(\"method init must be define\")\n\n def set_flowid(self, flowid=0):\n if self.transfer_variable is not None:\n self.transfer_variable.set_flowid(flowid)\n\n def _get_value_from_data(self, intersect_ids, data_instances):\n if intersect_ids is not None:\n intersect_ids = intersect_ids.join(data_instances, lambda i, d: d)\n intersect_ids.schema = data_instances.schema\n LOGGER.info(\"obtain intersect data_instances!\")\n\n return intersect_ids\n\n @staticmethod\n def get_common_intersection(intersect_ids_list: list):\n if len(intersect_ids_list) == 1:\n return intersect_ids_list[0]\n\n intersect_ids = None\n for i, value in enumerate(intersect_ids_list):\n if intersect_ids is None:\n intersect_ids = value\n continue\n intersect_ids = intersect_ids.join(value, lambda id, v: \"id\")\n\n return intersect_ids\n\n @staticmethod\n def hash(value, hash_operator, salt=''):\n h_value = hash_operator.compute(value, postfit_salt=salt)\n return h_value\n\n\nclass RsaIntersect(Intersect):\n def __init__(self):\n super().__init__()\n # self.intersect_cache_param = intersect_params.intersect_cache_param\n self.rcv_e = None\n self.rcv_n = None\n self.e = None\n self.d = None\n self.n = None\n # self.r = None\n self.transfer_variable = RsaIntersectTransferVariable()\n self.role = None\n\n def load_params(self, param):\n self.only_output_key = param.only_output_key\n self.sync_intersect_ids = param.sync_intersect_ids\n self.random_bit = param.random_bit\n self.rsa_params = param.rsa_params\n self.split_calculation = self.rsa_params.split_calculation\n self.random_base_fraction = self.rsa_params.random_base_fraction\n self.first_hash_operator = Hash(self.rsa_params.hash_method, False)\n self.final_hash_operator = Hash(self.rsa_params.final_hash_method, False)\n self.salt = self.rsa_params.salt\n\n @staticmethod\n def extend_pair(v1, v2):\n return v1 + v2\n\n @staticmethod\n def pubkey_id_process(data, fraction, random_bit, rsa_e, rsa_n, hash_operator=None, salt=''):\n if fraction and fraction <= consts.MAX_BASE_FRACTION:\n LOGGER.debug(f\"fraction value: {fraction} provided, use fraction in pubkey id process\")\n count = max(round(data.count() * max(fraction, consts.MIN_BASE_FRACTION)), 1)\n\n def group_kv(kv_iterator):\n res = []\n for k, v in kv_iterator:\n if hash_operator is not None:\n v = (k, v)\n k = int(Intersect.hash(k, hash_operator, salt), 16)\n res.append((k % count, [(k, v)]))\n return res\n\n reduced_pair_group = data.mapReducePartitions(group_kv, RsaIntersect.extend_pair)\n\n def pubkey_id_generate(k, pair):\n r = random.SystemRandom().getrandbits(random_bit)\n r_e = gmpy_math.powmod(r, rsa_e, rsa_n)\n for hash_sid, v in pair:\n processed_id = r_e * hash_sid % rsa_n\n yield processed_id, (v[0], r)\n\n return reduced_pair_group.flatMap(pubkey_id_generate)\n else:\n LOGGER.debug(f\"fraction not provided or invalid, fraction value: {fraction}.\")\n return data.map(lambda k, v: RsaIntersect.pubkey_id_process_per(k, v, random_bit, rsa_e, rsa_n,\n hash_operator, salt))\n\n @staticmethod\n def generate_rsa_key(rsa_bit=1024):\n LOGGER.info(f\"Generated {rsa_bit}-bit RSA key.\")\n encrypt_operator = RsaEncrypt()\n encrypt_operator.generate_key(rsa_bit)\n return encrypt_operator.get_key_pair()\n\n def generate_protocol_key(self):\n if self.role == consts.HOST:\n e, d, n = self.generate_rsa_key(self.rsa_params.key_length)\n else:\n e, d, n = [], [], []\n for i in range(len(self.host_party_id_list)):\n e_i, d_i, n_i = self.generate_rsa_key(self.rsa_params.key_length)\n e.append(e_i)\n d.append(d_i)\n n.append(n_i)\n return e, d, n\n\n @staticmethod\n def pubkey_id_process_per(hash_sid, v, random_bit, rsa_e, rsa_n, hash_operator=None, salt=''):\n r = random.SystemRandom().getrandbits(random_bit)\n if hash_operator:\n processed_id = gmpy_math.powmod(r, rsa_e, rsa_n) * int(Intersect.hash(hash_sid, hash_operator, salt), 16) % rsa_n\n return processed_id, (hash_sid, r)\n else:\n processed_id = gmpy_math.powmod(r, rsa_e, rsa_n) * hash_sid % rsa_n\n return processed_id, (v[0], r)\n\n @staticmethod\n def prvkey_id_process(hash_sid, v, rsa_d, rsa_n, final_hash_operator, salt, first_hash_operator=None):\n if first_hash_operator:\n processed_id = Intersect.hash(gmpy_math.powmod(int(Intersect.hash(hash_sid, first_hash_operator, salt), 16),\n rsa_d,\n rsa_n),\n final_hash_operator,\n salt)\n return processed_id, hash_sid\n else:\n processed_id = Intersect.hash(gmpy_math.powmod(hash_sid, rsa_d, rsa_n),\n final_hash_operator,\n salt)\n return processed_id, v[0]\n\n def cal_prvkey_ids_process_pair(self, data_instances, d, n, first_hash_operator=None):\n return data_instances.map(\n lambda k, v: self.prvkey_id_process(k, v, d, n,\n self.final_hash_operator,\n self.rsa_params.salt,\n first_hash_operator)\n )\n\n @staticmethod\n def sign_id(hash_sid, rsa_d, rsa_n):\n return gmpy_math.powmod(hash_sid, rsa_d, rsa_n)\n\n @staticmethod\n def map_raw_id_to_encrypt_id(raw_id_data, encrypt_id_data):\n encrypt_id_data_exchange_kv = encrypt_id_data.map(lambda k, v: (v, k))\n encrypt_raw_id = raw_id_data.join(encrypt_id_data_exchange_kv, lambda r, e: e)\n encrypt_common_id = encrypt_raw_id.map(lambda k, v: (v, \"id\"))\n\n return encrypt_common_id\n\n def filter_intersect_ids(self, encrypt_intersect_ids):\n if len(encrypt_intersect_ids) > 1:\n raw_intersect_ids = [e.map(lambda k, v: (v, 1)) for e in encrypt_intersect_ids]\n intersect_ids = self.get_common_intersection(raw_intersect_ids)\n else:\n intersect_ids = encrypt_intersect_ids[0]\n intersect_ids = intersect_ids.map(lambda k, v: (v, 1))\n return intersect_ids\n\n @staticmethod\n def extract_intersect_ids(intersect_ids, all_ids):\n intersect_ids = intersect_ids.join(all_ids, lambda e, h: h)\n return intersect_ids\n\n def split_calculation_process(self, data_instances):\n raise NotImplementedError(\"This method should not be called here\")\n\n def unified_calculation_process(self, data_instances):\n raise NotImplementedError(\"This method should not be called here\")\n\n def run_intersect(self, data_instances):\n LOGGER.info(\"Start RSA Intersection\")\n if self.split_calculation:\n # H(k), (k, v)\n hash_data_instances = data_instances.map(\n lambda k, v: (int(Intersect.hash(k, self.first_hash_operator, self.salt), 16), (k, v)))\n intersect_ids = self.split_calculation_process(hash_data_instances)\n else:\n intersect_ids = self.unified_calculation_process(data_instances)\n if not self.only_output_key:\n intersect_ids = self._get_value_from_data(intersect_ids, data_instances)\n return intersect_ids\n\n\nclass RawIntersect(Intersect):\n def __init__(self):\n super().__init__()\n self.role = None\n self.transfer_variable = RawIntersectTransferVariable()\n self.task_version_id = None\n self.tracker = None\n\n def load_params(self, param):\n self.only_output_key = param.only_output_key\n self.sync_intersect_ids = param.sync_intersect_ids\n self.with_encode = param.with_encode\n self.encode_params = param.encode_params\n self.join_role = param.join_role\n self.hash_operator = Hash(param.encode_params.encode_method, param.encode_params.base64)\n self.salt = self.encode_params.salt\n\n def intersect_send_id(self, data_instances):\n sid_hash_pair = None\n if self.with_encode and self.encode_params.encode_method != \"none\":\n if Hash.is_support(self.encode_params.encode_method):\n # hash_operator = Hash(self.encode_params.encode_method, self.encode_params.base64)\n sid_hash_pair = data_instances.map(\n lambda k, v: (Intersect.hash(k, self.hash_operator, self.salt), k))\n data_sid = sid_hash_pair.mapValues(lambda v: 1)\n else:\n raise ValueError(\"Unknown encode_method, please check the configuration of encode_param\")\n else:\n data_sid = data_instances.mapValues(lambda v: 1)\n\n LOGGER.info(\"Send id role is {}\".format(self.role))\n\n if self.role == consts.GUEST:\n send_ids_federation = self.transfer_variable.send_ids_guest\n recv_role = consts.HOST\n elif self.role == consts.HOST:\n send_ids_federation = self.transfer_variable.send_ids_host\n recv_role = consts.GUEST\n else:\n raise ValueError(\"Unknown intersect role, please check the code\")\n\n send_ids_federation.remote(data_sid,\n role=recv_role,\n idx=-1)\n\n LOGGER.info(\"Remote data_sid to role-join\")\n intersect_ids = None\n if self.sync_intersect_ids:\n if self.role == consts.HOST:\n intersect_ids_federation = self.transfer_variable.intersect_ids_guest\n elif self.role == consts.GUEST:\n intersect_ids_federation = self.transfer_variable.intersect_ids_host\n else:\n raise ValueError(\"Unknown intersect role, please check the code\")\n\n recv_intersect_ids_list = intersect_ids_federation.get(idx=-1)\n LOGGER.info(\"Get intersect ids from role-join!\")\n\n ids_list_size = len(recv_intersect_ids_list)\n LOGGER.info(\"recv_intersect_ids_list's size is {}\".format(ids_list_size))\n\n recv_intersect_ids = self.get_common_intersection(recv_intersect_ids_list)\n\n if self.role == consts.GUEST and len(self.host_party_id_list) > 1:\n LOGGER.info(\n \"raw intersect send role is guest, and has {} hosts, remote the final intersect_ids to hosts\".format(\n len(self.host_party_id_list)))\n self.transfer_variable.sync_intersect_ids_multi_hosts.remote(recv_intersect_ids,\n role=consts.HOST,\n idx=-1)\n\n if sid_hash_pair and recv_intersect_ids is not None:\n hash_intersect_ids_map = recv_intersect_ids.join(sid_hash_pair, lambda r, s: s)\n intersect_ids = hash_intersect_ids_map.map(lambda k, v: (v, 'intersect_id'))\n else:\n intersect_ids = recv_intersect_ids\n else:\n LOGGER.info(\"Not Get intersect ids from role-join!\")\n\n if not self.only_output_key:\n intersect_ids = self._get_value_from_data(intersect_ids, data_instances)\n\n return intersect_ids\n\n def intersect_join_id(self, data_instances):\n LOGGER.info(\"Join id role is {}\".format(self.role))\n\n sid_hash_pair = None\n if self.with_encode and self.encode_params.encode_method != \"none\":\n if Hash.is_support(self.encode_params.encode_method):\n hash_operator = Hash(self.encode_params.encode_method, self.encode_params.base64)\n sid_hash_pair = data_instances.map(\n lambda k, v: (hash_operator.compute(k, postfit_salt=self.encode_params.salt), k))\n data_sid = sid_hash_pair.mapValues(lambda v: 1)\n else:\n raise ValueError(\"Unknown encode_method, please check the configure of hash_param\")\n else:\n data_sid = data_instances.mapValues(lambda v: 1)\n\n if self.role == consts.HOST:\n send_ids_federation = self.transfer_variable.send_ids_guest\n elif self.role == consts.GUEST:\n send_ids_federation = self.transfer_variable.send_ids_host\n else:\n raise ValueError(\"Unknown intersect role, please check the code\")\n\n recv_ids_list = send_ids_federation.get(idx=-1)\n\n ids_list_size = len(recv_ids_list)\n LOGGER.info(\"Get ids_list from role-send, ids_list size is {}\".format(len(recv_ids_list)))\n\n if ids_list_size == 1:\n hash_intersect_ids = recv_ids_list[0].join(data_sid, lambda i, d: \"intersect_id\")\n elif ids_list_size > 1:\n hash_intersect_ids_list = []\n for ids in recv_ids_list:\n hash_intersect_ids_list.append(ids.join(data_sid, lambda i, d: \"intersect_id\"))\n hash_intersect_ids = self.get_common_intersection(hash_intersect_ids_list)\n else:\n hash_intersect_ids = None\n LOGGER.info(\"Finish intersect_ids computing\")\n\n if self.sync_intersect_ids:\n if self.role == consts.GUEST:\n intersect_ids_federation = self.transfer_variable.intersect_ids_guest\n send_role = consts.HOST\n elif self.role == consts.HOST:\n intersect_ids_federation = self.transfer_variable.intersect_ids_host\n send_role = consts.GUEST\n else:\n raise ValueError(\"Unknown intersect role, please check the code\")\n\n intersect_ids_federation.remote(hash_intersect_ids,\n role=send_role,\n idx=-1)\n LOGGER.info(\"Remote intersect ids to role-send\")\n\n if self.role == consts.HOST and len(self.host_party_id_list) > 1:\n LOGGER.info(\n \"raw intersect join role is host, and has {} hosts, get the final intersect_ids from guest\".format(\n len(self.host_party_id_list)))\n hash_intersect_ids = self.transfer_variable.sync_intersect_ids_multi_hosts.get(idx=0)\n\n if sid_hash_pair:\n hash_intersect_ids_map = hash_intersect_ids.join(sid_hash_pair, lambda r, s: s)\n intersect_ids = hash_intersect_ids_map.map(lambda k, v: (v, 'intersect_id'))\n else:\n intersect_ids = hash_intersect_ids\n\n if not self.only_output_key:\n intersect_ids = self._get_value_from_data(intersect_ids, data_instances)\n\n if self.task_version_id is not None:\n namespace = \"#\".join([str(self.guest_party_id), str(self.host_party_id), \"mountain\"])\n for k, v in enumerate(recv_ids_list):\n table_name = '_'.join([self.task_version_id, str(k)])\n self.tracker.job_tracker.save_as_table(v, table_name, namespace)\n LOGGER.info(\"save guest_{}'s id in name:{}, namespace:{}\".format(k, table_name, namespace))\n\n return intersect_ids\n","sub_path":"python/federatedml/statistic/intersect/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":18398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"49143730","text":"import glob\nimport copy\n\nimport PIL\nimport cv2\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport matplotlib.pyplot as plt\n\n\nclass record:\n def __init__(self,**kw): self.__dict__.update(kw)\n\n\ndef disp_img(img, title, h, w):\n cv2.namedWindow(title, flags=cv2.WINDOW_NORMAL)\n cv2.imshow(title, img)\n cv2.resizeWindow(title, (w, h))\n\n\ndef isintarray(a):\n return a.dtype in [np.dtype('B'),np.dtype('int16'),np.dtype('int32'),np.dtype('int64'),\n np.dtype('uint16'),np.dtype('uint32'),np.dtype('uint64')]\n\ndef isintegerarray(a):\n return a.dtype in [np.dtype('int32'),np.dtype('int64'),np.dtype('uint32'),np.dtype('uint64')]\n\n\ndef dim0(s):\n \"\"\"Dimension of the slice list for dimension 0.\"\"\"\n return s[0].stop-s[0].start\n\n\ndef dim1(s):\n \"\"\"Dimension of the slice list for dimension 1.\"\"\"\n return s[1].stop-s[1].start\n\n\ndef norm_max(a):\n return a/np.amax(a)\n\n\ndef width(s):\n return s[1].stop-s[1].start\n\n\ndef area(a):\n \"\"\"Return the area of the slice list (ignores anything past a[:2].\"\"\"\n return np.prod([max(x.stop - x.start, 0) for x in a[:2]])\n\ndef center(s):\n ycenter = np.mean([s[0].stop,s[0].start])\n xcenter = np.mean([s[1].stop,s[1].start])\n return (ycenter, xcenter)\n\n\ndef r_dilation(image,size,origin=0):\n \"\"\"Dilation with rectangular structuring element using maximum_filter\"\"\"\n return ndimage.maximum_filter(image,size,origin=origin)\n\n\ndef r_erosion(image,size,origin=0):\n \"\"\"Erosion with rectangular structuring element using maximum_filter\"\"\"\n return ndimage.minimum_filter(image,size,origin=origin)\n\n\ndef rb_dilation(image,size,origin=0):\n \"\"\"Binary dilation using linear filters.\"\"\"\n output = np.zeros(image.shape,'f')\n ndimage.uniform_filter(image,size,output=output,origin=origin,mode='constant',cval=0)\n return np.array(output>0,'i')\n\n\ndef rb_erosion(image,size,origin=0):\n \"\"\"Binary erosion using linear filters.\"\"\"\n output = np.zeros(image.shape,'f')\n ndimage.uniform_filter(image,size,output=output,origin=origin,mode='constant',cval=1)\n return np.array(output==1,'i')\n\n\ndef rb_opening(image,size,origin=0):\n \"\"\"Binary opening using linear filters.\"\"\"\n image = rb_erosion(image,size,origin=origin)\n return rb_dilation(image,size,origin=origin)\n\n\ndef select_regions(binary,f,min=0,nbest=100000):\n \"\"\"Given a scoring function f over slice tuples (as returned by\n find_objects), keeps at most nbest regions whose scores is higher\n than min.\"\"\"\n labels,n = ndimage.label(binary)\n objects = ndimage.find_objects(labels)\n scores = [f(o) for o in objects]\n best = np.argsort(scores)\n keep = np.zeros(len(objects)+1,'i')\n if nbest > 0:\n for i in best[-nbest:]:\n if scores[i]<=min: continue\n keep[i+1] = 1\n return keep[labels]\n\n\ndef check_binary(image):\n assert image.dtype=='B' or image.dtype=='i' or image.dtype==np.dtype('bool'),\\\n \"array should be binary, is %s %s\"%(image.dtype,image.shape)\n assert np.amin(image)>=0 and np.amax(image)<=1,\\\n \"array should be binary, has values %g to %g\"%(np.amin(image),np.amax(image))\n\n\ndef read_image_binary(fname, dtype='i'):\n \"\"\"Read an image from disk and return it as a binary image\n of the given dtype.\"\"\"\n pil = PIL.Image.open(fname)\n a = pil2array(pil)\n if a.ndim == 3: a = np.amax(a, axis=2)\n return np.array(a > 0.5 * (np.amin(a) + np.amax(a)), dtype)\n\n\ndef pil2array(im, alpha=0):\n if im.mode == \"L\":\n a = np.fromstring(im.tobytes(), 'B')\n a.shape = im.size[1], im.size[0]\n return a\n if im.mode == \"RGB\":\n a = np.fromstring(im.tobytes(), 'B')\n a.shape = im.size[1], im.size[0], 3\n return a\n if im.mode == \"RGBA\":\n a = np.fromstring(im.tobytes(), 'B')\n a.shape = im.size[1], im.size[0], 4\n if not alpha: a = a[:, :, :3]\n return a\n return pil2array(im.convert(\"L\"))\n\n\ndef correspondences(labels1,labels2):\n \"\"\"Given two labeled images, compute an array giving the correspondences\n between labels in the two images.\"\"\"\n q = 100000\n assert np.amin(labels1)>=0 and np.amin(labels2)>=0\n assert np.amax(labels2)<q\n combo = labels1*q+labels2\n result = np.unique(combo)\n result = np.array([result//q,result%q])\n return result\n\n\ndef find(condition):\n \"Return the indices where ravel(condition) is true\"\n res, = np.nonzero(np.ravel(condition))\n return res\n\n\ndef propagate_labels(image,labels,conflict=0):\n \"\"\"Given an image and a set of labels, apply the labels\n to all the regions in the image that overlap a label.\n Assign the value `conflict` to any labels that have a conflict.\"\"\"\n rlabels,_ = ndimage.label(image)\n cors = correspondences(rlabels,labels)\n outputs = np.zeros(np.amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]\n\n\ndef spread_labels(labels,maxdist=9999999):\n \"\"\"Spread the given labels to the background\"\"\"\n distances,features = ndimage.distance_transform_edt(labels==0,return_distances=True,return_indices=True)\n indexes = features[0]*labels.shape[1]+features[1]\n spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)\n spread *= (distances<maxdist)\n return spread\n\n\ndef compute_lines(segmentation, minscale):\n \"\"\"Given a line segmentation map, computes a list\n of tuples consisting of 2D slices and masked images.\"\"\"\n lobjects = ndimage.find_objects(segmentation)\n lines = []\n for i,o in enumerate(lobjects):\n if o is None: continue\n if dim0(o) < minscale: continue\n mask = (segmentation[o]==i+1)\n if np.amax(mask)==0: continue\n result = record()\n result.label = i+1\n result.bounds = o\n result.mask = mask\n lines.append(result)\n return lines\n\n\ndef reading_order(lines, highlight=None):\n \"\"\"Given the list of lines (a list of 2D slices), computes\n the partial reading order. The output is a binary 2D array\n such that order[i,j] is true if line i comes before line j\n in reading order.\"\"\"\n order = np.zeros((len(lines),len(lines)),'B')\n def x_overlaps(u,v):\n return u[1].start<v[1].stop and u[1].stop>v[1].start\n def above(u,v):\n return u[0].start<v[0].start\n def left_of(u,v):\n return u[1].stop<v[1].start\n def separates(w,u,v):\n if w[0].stop<min(u[0].start,v[0].start): return 0\n if w[0].start>max(u[0].stop,v[0].stop): return 0\n if w[1].start<u[1].stop and w[1].stop>v[1].start: return 1\n for i,u in enumerate(lines):\n for j,v in enumerate(lines):\n if x_overlaps(u,v):\n if above(u,v):\n order[i,j] = 1\n else:\n if [w for w in lines if separates(w,u,v)]==[]:\n if left_of(u,v): order[i,j] = 1\n if j==highlight and order[i,j]:\n print((i, j), end=' ')\n y0,x0 = center(lines[i])\n y1,x1 = center(lines[j])\n plt.plot([x0,x1+200],[y0,y1])\n return order\n\n\ndef topsort(order):\n \"\"\"Given a binary array defining a partial order (o[i,j]==True means i<j),\n compute a topological sort. This is a quick and dirty implementation\n that works for up to a few thousand elements.\"\"\"\n n = len(order)\n visited = np.zeros(n)\n L = []\n def visit(k):\n if visited[k]: return\n visited[k] = 1\n for l in find(order[:,k]):\n visit(l)\n L.append(k)\n for k in range(n):\n visit(k)\n return L\n\n\ndef array2pil(a):\n if a.dtype==np.dtype(\"B\"):\n if a.ndim==2:\n return PIL.Image.frombytes(\"L\",(a.shape[1],a.shape[0]),a.tostring())\n elif a.ndim==3:\n return PIL.Image.frombytes(\"RGB\",(a.shape[1],a.shape[0]),a.tostring())\n else:\n raise TypeError(\"bad image rank\")\n elif a.dtype==np.dtype('float32'):\n return PIL.Image.fromstring(\"F\",(a.shape[1],a.shape[0]),a.tostring())\n else:\n raise TypeError(\"unknown image type\")\n\n\ndef int2rgb(image):\n \"\"\"Converts a rank 3 array with RGB values stored in the\n last axis into a rank 2 array containing 32 bit RGB values.\"\"\"\n assert image.ndim==2\n assert isintarray(image)\n a = np.zeros(list(image.shape)+[3],'B')\n a[:,:,0] = (image>>16)\n a[:,:,1] = (image>>8)\n a[:,:,2] = image\n return a\n\n\ndef make_seg_white(image):\n assert isintegerarray(image),\"%s: wrong type for segmentation\"%image.dtype\n image = image.copy()\n image[image==0] = 0xffffff\n return image\n\n\ndef midrange(image,frac=0.5):\n \"\"\"Computes the center of the range of image values\n (for quick thresholding).\"\"\"\n return frac*(np.amin(image)+np.amax(image))\n\n\ndef write_page_segmentation(fname,image):\n \"\"\"Writes a page segmentation, that is an RGB image whose values\n encode the segmentation of a page.\"\"\"\n assert image.ndim==2\n assert image.dtype in [np.dtype('int32'),np.dtype('int64')]\n a = int2rgb(make_seg_white(image))\n im = array2pil(a)\n im.save(fname)\n\n\ndef write_image_binary(fname,image,verbose=0):\n \"\"\"Write a binary image to disk. This verifies first that the given image\n is, in fact, binary. The image may be of any type, but must consist of only\n two values.\"\"\"\n if verbose: print(\"# writing\", fname)\n assert image.ndim==2\n image = np.array(255*(image>midrange(image)),'B')\n im = array2pil(image)\n im.save(fname)\n\n\ndef remove_noise(line,minsize=8):\n \"\"\"Remove small pixels from an image.\"\"\"\n if minsize==0: return line\n bin = (line>0.5*np.amax(line))\n labels,n = ndimage.label(bin)\n sums = ndimage.sum(bin,labels,range(n+1))\n sums = sums[labels]\n good = np.minimum(bin,1-(sums>0)*(sums<minsize))\n return good\n\n\ndef pad_image(image,d,cval=np.inf):\n result = np.ones(np.array(image.shape)+2*d)\n result[:,:] = np.amax(image) if cval==np.inf else cval\n result[d:-d,d:-d] = image\n return result\n\n\ndef extract(image,y0,x0,y1,x1,mode='nearest',cval=0):\n h,w = image.shape\n ch,cw = y1-y0,x1-x0\n y,x = np.clip(y0,0,max(h-ch,0)),np.clip(x0,0,max(w-cw, 0))\n sub = image[y:y+ch,x:x+cw]\n try:\n r = ndimage.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)\n if cw > w or ch > h:\n pady0, padx0 = max(-y0, 0), max(-x0, 0)\n r = ndimage.affine_transform(r, np.eye(2), offset=(pady0, padx0), cval=1, output_shape=(ch, cw))\n return r\n\n except RuntimeError:\n # workaround for platform differences between 32bit and 64bit\n # scipy.ndimage\n dtype = sub.dtype\n sub = np.array(sub,dtype='float64')\n sub = ndimage.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)\n sub = np.array(sub,dtype=dtype)\n return sub\n\n\ndef extract_masked(image,linedesc,pad=5,expand=0):\n \"\"\"Extract a subimage from the image using the line descriptor.\n A line descriptor consists of bounds and a mask.\"\"\"\n y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \\\n linedesc.bounds[0].stop,linedesc.bounds[1].stop]]\n if pad>0:\n mask = pad_image(linedesc.mask,pad,cval=0)\n else:\n mask = linedesc.mask\n line = extract(image,y0-pad,x0-pad,y1+pad,x1+pad)\n if expand>0:\n mask = ndimage.maximum_filter(mask,(expand,expand))\n line = np.where(mask,line,np.amax(line))\n return line\n\n\ndef glob_all(args):\n \"\"\"Given a list of command line arguments, expand all of them with glob.\"\"\"\n result = []\n for arg in args:\n if arg[0]==\"@\":\n with open(arg[1:],\"r\") as stream:\n expanded = stream.read().split(\"\\n\")\n expanded = [s for s in expanded if s!=\"\"]\n else:\n expanded = sorted(glob.glob(arg))\n if len(expanded)<1:\n raise FileNotFoundError(\"%s: expansion did not yield any files\"%arg)\n result += expanded\n return result\n\n\ndef erode_hlines_and_vlines(binary, scale, args):\n\n # generate the kernels\n min_width = int(args.hline_perc * binary.shape[1])\n min_height = int(args.vline_perc * binary.shape[0])\n hkernel = np.ones((1, min_width), dtype='uint8')\n vkernel = np.ones((min_height, 1), dtype='uint8')\n bin_copy = copy.deepcopy(binary)\n\n # remove horizontal lines\n _binary = cv2.erode(binary, hkernel, iterations=1, borderValue=0, borderType=cv2.BORDER_CONSTANT)\n labels, _ = ndimage.label(_binary)\n objects = ndimage.find_objects(labels)\n extr = 1\n for i, b in enumerate(objects):\n # extend the found lines half the padding kernel size\n y_slc = slice(max(b[0].start-extr,0), min(b[0].stop+extr, binary.shape[0]))\n x_slc = slice(max(b[1].start-(min_width//2),0), min(b[1].stop+(min_width//2), binary.shape[1]))\n bin_copy[y_slc, x_slc] = 0\n\n # remove vertical lines\n _binary = cv2.erode(binary, vkernel, iterations=1, borderValue=0, borderType=cv2.BORDER_CONSTANT)\n labels, _ = ndimage.label(_binary)\n objects = ndimage.find_objects(labels)\n extr = 1\n for i, b in enumerate(objects):\n # extend the found lines half the padding kernel size\n y_slc = slice(max(b[0].start-(min_height//2),0), min(b[0].stop+(min_height//2), binary.shape[0]))\n x_slc = slice(max(b[1].start-extr,0), min(b[1].stop+extr, binary.shape[1]))\n binary[y_slc, x_slc] = 0\n\n # merge the results\n binary = np.array((bin_copy + binary == 2), dtype='uint8')\n\n return binary\n\n\ndef remove_hlines_and_vlines(binary, scale, args):\n min_width = int(args.hline_perc * binary.shape[1])\n min_height = int(args.vline_perc * binary.shape[0])\n labels, _ = ndimage.label(binary)\n objects = ndimage.find_objects(labels)\n\n for i, b in enumerate(objects):\n if width(b) > min_width:\n section = binary[b]\n for rw in range(section.shape[0]):\n run_values, run_starts, run_lengths = find_runs(section[rw, :])\n run_lengths = run_lengths[run_values == 1]\n run_starts = run_starts[run_values == 1]\n run_starts = run_starts[run_lengths > min_width]\n run_lengths = run_lengths[run_lengths > min_width]\n y_slc = slice(b[0].start + rw, b[0].start + rw + 1)\n for i, rn in enumerate(run_starts):\n x_slc = slice(b[1].start + rn, b[1].start + rn + run_lengths[i])\n binary[y_slc, x_slc] = 0\n\n if dim0(b) > min_height:\n section = binary[b]\n for cl in range(section.shape[1]):\n run_values, run_starts, run_lengths = find_runs(section[:, cl])\n run_lengths = run_lengths[run_values == 1]\n run_starts = run_starts[run_values == 1]\n run_starts = run_starts[run_lengths > min_height]\n run_lengths = run_lengths[run_lengths > min_height]\n x_slc = slice(b[1].start + cl, b[1].start + cl + 1)\n for i, rn in enumerate(run_starts):\n y_slc = slice(b[0].start + rn, b[0].start + rn + run_lengths[i])\n binary[y_slc, x_slc] = 0\n\n binary = rb_opening(binary, 2)\n if args.extra_erode:\n binary = rb_erosion(binary, 2)\n return binary\n\n\ndef binary_objects(binary):\n labels, n = ndimage.label(binary)\n objects = ndimage.find_objects(labels)\n return objects\n\n\ndef estimate_scale(binary):\n objects = binary_objects(binary)\n bysize = sorted(objects, key=area)\n scalemap = np.zeros(binary.shape)\n for o in bysize:\n if np.amax(scalemap[o]) > 0: continue\n scalemap[o] = area(o) ** 0.5\n scale = np.median(scalemap[(scalemap > 3) & (scalemap < 100)])\n return scale\n\n\ndef compute_boxmap(binary, scale, threshold=(.5, 4), dtype='i'):\n objects = binary_objects(binary)\n bysize = sorted(objects, key=area)\n boxmap = np.zeros(binary.shape, dtype)\n for o in bysize:\n if area(o) ** .5 < threshold[0] * scale: continue\n if area(o) ** .5 > threshold[1] * scale: continue\n boxmap[o] = 1\n return boxmap\n\n\ndef find_runs(x):\n \"\"\"Find runs of consecutive items in an array.\"\"\"\n\n # ensure array\n x = np.asanyarray(x)\n if x.ndim != 1:\n raise ValueError('only 1D array supported')\n n = x.shape[0]\n\n # handle empty array\n if n == 0:\n return np.array([]), np.array([]), np.array([])\n\n else:\n # find run starts\n loc_run_start = np.empty(n, dtype=bool)\n loc_run_start[0] = True\n np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])\n run_starts = np.nonzero(loc_run_start)[0]\n # find run values\n run_values = x[loc_run_start]\n # find run lengths\n run_lengths = np.diff(np.append(run_starts, n))\n return run_values, run_starts, run_lengths","sub_path":"ocropy_segmenter/misc_components.py","file_name":"misc_components.py","file_ext":"py","file_size_in_byte":16928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514718700","text":"# Copyright 2011-2012 Canonical Ltd.\n#\n# This file is part of u1db.\n#\n# u1db is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License version 3\n# as published by the Free Software Foundation.\n#\n# u1db is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with u1db. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Tests for the remote sync targets\"\"\"\n\nimport cStringIO\n\nfrom u1db import (\n errors,\n)\n\nfrom leap.soledad.tests import u1db_tests as tests\n\nfrom u1db.remote import (\n http_app,\n http_target,\n oauth_middleware,\n)\n\n\nclass TestHTTPSyncTargetBasics(tests.TestCase):\n\n def test_parse_url(self):\n remote_target = http_target.HTTPSyncTarget('http://127.0.0.1:12345/')\n self.assertEqual('http', remote_target._url.scheme)\n self.assertEqual('127.0.0.1', remote_target._url.hostname)\n self.assertEqual(12345, remote_target._url.port)\n self.assertEqual('/', remote_target._url.path)\n\n\nclass TestParsingSyncStream(tests.TestCase):\n\n def test_wrong_start(self):\n tgt = http_target.HTTPSyncTarget(\"http://foo/foo\")\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"{}\\r\\n]\", None)\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"\\r\\n{}\\r\\n]\", None)\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"\", None)\n\n def test_wrong_end(self):\n tgt = http_target.HTTPSyncTarget(\"http://foo/foo\")\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"[\\r\\n{}\", None)\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"[\\r\\n\", None)\n\n def test_missing_comma(self):\n tgt = http_target.HTTPSyncTarget(\"http://foo/foo\")\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream,\n '[\\r\\n{}\\r\\n{\"id\": \"i\", \"rev\": \"r\", '\n '\"content\": \"c\", \"gen\": 3}\\r\\n]', None)\n\n def test_no_entries(self):\n tgt = http_target.HTTPSyncTarget(\"http://foo/foo\")\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"[\\r\\n]\", None)\n\n def test_extra_comma(self):\n tgt = http_target.HTTPSyncTarget(\"http://foo/foo\")\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream, \"[\\r\\n{},\\r\\n]\", None)\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream,\n '[\\r\\n{},\\r\\n{\"id\": \"i\", \"rev\": \"r\", '\n '\"content\": \"{}\", \"gen\": 3, \"trans_id\": \"T-sid\"}'\n ',\\r\\n]',\n lambda doc, gen, trans_id: None)\n\n def test_error_in_stream(self):\n tgt = http_target.HTTPSyncTarget(\"http://foo/foo\")\n\n self.assertRaises(errors.Unavailable,\n tgt._parse_sync_stream,\n '[\\r\\n{\"new_generation\": 0},'\n '\\r\\n{\"error\": \"unavailable\"}\\r\\n', None)\n\n self.assertRaises(errors.Unavailable,\n tgt._parse_sync_stream,\n '[\\r\\n{\"error\": \"unavailable\"}\\r\\n', None)\n\n self.assertRaises(errors.BrokenSyncStream,\n tgt._parse_sync_stream,\n '[\\r\\n{\"error\": \"?\"}\\r\\n', None)\n\n\ndef make_http_app(state):\n return http_app.HTTPApp(state)\n\n\ndef http_sync_target(test, path):\n return http_target.HTTPSyncTarget(test.getURL(path))\n\n\ndef make_oauth_http_app(state):\n app = http_app.HTTPApp(state)\n application = oauth_middleware.OAuthMiddleware(app, None, prefix='/~/')\n application.get_oauth_data_store = lambda: tests.testingOAuthStore\n return application\n\n\ndef oauth_http_sync_target(test, path):\n st = http_sync_target(test, '~/' + path)\n st.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,\n tests.token1.key, tests.token1.secret)\n return st\n\n\nclass TestRemoteSyncTargets(tests.TestCaseWithServer):\n\n scenarios = [\n ('http', {'make_app_with_state': make_http_app,\n 'make_document_for_test': tests.make_document_for_test,\n 'sync_target': http_sync_target}),\n ('oauth_http', {'make_app_with_state': make_oauth_http_app,\n 'make_document_for_test': tests.make_document_for_test,\n 'sync_target': oauth_http_sync_target}),\n ]\n\n def getSyncTarget(self, path=None):\n if self.server is None:\n self.startServer()\n return self.sync_target(self, path)\n\n def test_get_sync_info(self):\n self.startServer()\n db = self.request_state._create_database('test')\n db._set_replica_gen_and_trans_id('other-id', 1, 'T-transid')\n remote_target = self.getSyncTarget('test')\n self.assertEqual(('test', 0, '', 1, 'T-transid'),\n remote_target.get_sync_info('other-id'))\n\n def test_record_sync_info(self):\n self.startServer()\n db = self.request_state._create_database('test')\n remote_target = self.getSyncTarget('test')\n remote_target.record_sync_info('other-id', 2, 'T-transid')\n self.assertEqual(\n (2, 'T-transid'), db._get_replica_gen_and_trans_id('other-id'))\n\n def test_sync_exchange_send(self):\n self.startServer()\n db = self.request_state._create_database('test')\n remote_target = self.getSyncTarget('test')\n other_docs = []\n\n def receive_doc(doc):\n other_docs.append((doc.doc_id, doc.rev, doc.get_json()))\n\n doc = self.make_document('doc-here', 'replica:1', '{\"value\": \"here\"}')\n new_gen, trans_id = remote_target.sync_exchange(\n [(doc, 10, 'T-sid')], 'replica', last_known_generation=0,\n last_known_trans_id=None, return_doc_cb=receive_doc)\n self.assertEqual(1, new_gen)\n self.assertGetDoc(\n db, 'doc-here', 'replica:1', '{\"value\": \"here\"}', False)\n\n def test_sync_exchange_send_failure_and_retry_scenario(self):\n self.startServer()\n\n def blackhole_getstderr(inst):\n return cStringIO.StringIO()\n\n self.patch(self.server.RequestHandlerClass, 'get_stderr',\n blackhole_getstderr)\n db = self.request_state._create_database('test')\n _put_doc_if_newer = db._put_doc_if_newer\n trigger_ids = ['doc-here2']\n\n def bomb_put_doc_if_newer(doc, save_conflict,\n replica_uid=None, replica_gen=None,\n replica_trans_id=None):\n if doc.doc_id in trigger_ids:\n raise Exception\n return _put_doc_if_newer(doc, save_conflict=save_conflict,\n replica_uid=replica_uid,\n replica_gen=replica_gen,\n replica_trans_id=replica_trans_id)\n self.patch(db, '_put_doc_if_newer', bomb_put_doc_if_newer)\n remote_target = self.getSyncTarget('test')\n other_changes = []\n\n def receive_doc(doc, gen, trans_id):\n other_changes.append(\n (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))\n\n doc1 = self.make_document('doc-here', 'replica:1', '{\"value\": \"here\"}')\n doc2 = self.make_document('doc-here2', 'replica:1',\n '{\"value\": \"here2\"}')\n self.assertRaises(\n errors.HTTPError,\n remote_target.sync_exchange,\n [(doc1, 10, 'T-sid'), (doc2, 11, 'T-sud')],\n 'replica', last_known_generation=0, last_known_trans_id=None,\n return_doc_cb=receive_doc)\n self.assertGetDoc(db, 'doc-here', 'replica:1', '{\"value\": \"here\"}',\n False)\n self.assertEqual(\n (10, 'T-sid'), db._get_replica_gen_and_trans_id('replica'))\n self.assertEqual([], other_changes)\n # retry\n trigger_ids = []\n new_gen, trans_id = remote_target.sync_exchange(\n [(doc2, 11, 'T-sud')], 'replica', last_known_generation=0,\n last_known_trans_id=None, return_doc_cb=receive_doc)\n self.assertGetDoc(db, 'doc-here2', 'replica:1', '{\"value\": \"here2\"}',\n False)\n self.assertEqual(\n (11, 'T-sud'), db._get_replica_gen_and_trans_id('replica'))\n self.assertEqual(2, new_gen)\n # bounced back to us\n self.assertEqual(\n ('doc-here', 'replica:1', '{\"value\": \"here\"}', 1),\n other_changes[0][:-1])\n\n def test_sync_exchange_in_stream_error(self):\n self.startServer()\n\n def blackhole_getstderr(inst):\n return cStringIO.StringIO()\n\n self.patch(self.server.RequestHandlerClass, 'get_stderr',\n blackhole_getstderr)\n db = self.request_state._create_database('test')\n doc = db.create_doc_from_json('{\"value\": \"there\"}')\n\n def bomb_get_docs(doc_ids, check_for_conflicts=None,\n include_deleted=False):\n yield doc\n # delayed failure case\n raise errors.Unavailable\n\n self.patch(db, 'get_docs', bomb_get_docs)\n remote_target = self.getSyncTarget('test')\n other_changes = []\n\n def receive_doc(doc, gen, trans_id):\n other_changes.append(\n (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))\n\n self.assertRaises(\n errors.Unavailable, remote_target.sync_exchange, [], 'replica',\n last_known_generation=0, last_known_trans_id=None,\n return_doc_cb=receive_doc)\n self.assertEqual(\n (doc.doc_id, doc.rev, '{\"value\": \"there\"}', 1),\n other_changes[0][:-1])\n\n def test_sync_exchange_receive(self):\n self.startServer()\n db = self.request_state._create_database('test')\n doc = db.create_doc_from_json('{\"value\": \"there\"}')\n remote_target = self.getSyncTarget('test')\n other_changes = []\n\n def receive_doc(doc, gen, trans_id):\n other_changes.append(\n (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))\n\n new_gen, trans_id = remote_target.sync_exchange(\n [], 'replica', last_known_generation=0, last_known_trans_id=None,\n return_doc_cb=receive_doc)\n self.assertEqual(1, new_gen)\n self.assertEqual(\n (doc.doc_id, doc.rev, '{\"value\": \"there\"}', 1),\n other_changes[0][:-1])\n\n def test_sync_exchange_send_ensure_callback(self):\n self.startServer()\n remote_target = self.getSyncTarget('test')\n other_docs = []\n replica_uid_box = []\n\n def receive_doc(doc):\n other_docs.append((doc.doc_id, doc.rev, doc.get_json()))\n\n def ensure_cb(replica_uid):\n replica_uid_box.append(replica_uid)\n\n doc = self.make_document('doc-here', 'replica:1', '{\"value\": \"here\"}')\n new_gen, trans_id = remote_target.sync_exchange(\n [(doc, 10, 'T-sid')], 'replica', last_known_generation=0,\n last_known_trans_id=None, return_doc_cb=receive_doc,\n ensure_callback=ensure_cb)\n self.assertEqual(1, new_gen)\n db = self.request_state.open_database('test')\n self.assertEqual(1, len(replica_uid_box))\n self.assertEqual(db._replica_uid, replica_uid_box[0])\n self.assertGetDoc(\n db, 'doc-here', 'replica:1', '{\"value\": \"here\"}', False)\n\n\nload_tests = tests.load_with_scenarios\n","sub_path":"src/leap/soledad/tests/u1db_tests/test_remote_sync_target.py","file_name":"test_remote_sync_target.py","file_ext":"py","file_size_in_byte":12042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335940704","text":"# -*- coding: utf-8 -*-\n\"\"\"\nVästtrafik API\n\"\"\"\n\nimport base64\nimport json\nimport requests\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom .boards import ArrivalBoard, DepartureBoard\n\nTOKEN_URL = 'https://api.vasttrafik.se/token'\nAPI_BASE_URL = 'https://api.vasttrafik.se/bin/rest.exe/v2'\nDATE_FORMAT = '%Y-%m-%d'\nTIME_FORMAT = '%H:%M'\n\n\nclass Error(Exception):\n pass\n\n\ndef _get_node(response, *ancestors):\n \"\"\" Traverse tree to node \"\"\"\n document = response\n for ancestor in ancestors:\n if ancestor not in document:\n return {}\n else:\n document = document[ancestor]\n return document\n\n\nclass JourneyPlanner:\n \"\"\" Journey planner class\"\"\"\n\n def __init__(self, key, secret, expiry=59):\n self._key = key\n self._secret = secret\n self._expiry = expiry\n self._token = None\n self._token_expire_date = None\n self.update_token()\n\n def update_token(self):\n \"\"\" Get token from key and secret \"\"\"\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'Basic ' + base64.b64encode(\n (self._key + ':' + self._secret).encode()).decode()\n }\n data = {'grant_type': 'client_credentials'}\n\n response = requests.post(TOKEN_URL, data=data, headers=headers)\n obj = json.loads(response.content.decode('UTF-8'))\n self._token = obj['access_token']\n self._token_expire_date = (\n datetime.now() +\n timedelta(minutes=self._expiry))\n\n # LOCATION\n\n def get_all_stops(self):\n \"\"\" location.allstops \"\"\"\n response = self._request(\n 'location.allstops')\n return _get_node(response, 'LocationList', 'StopLocation')\n\n def get_stops_by_nearby_stops(self, origin_coord_lat, origin_coord_long):\n \"\"\" location.nearbystops \"\"\"\n response = self._request(\n 'location.nearbystops',\n originCoordLat=origin_coord_lat,\n originCoordLong=origin_coord_long)\n return _get_node(response, 'LocationList', 'StopLocation')\n\n def get_stops_by_nearby_address(self, origin_coord_lat, origin_coord_long):\n \"\"\" location.nearbyaddress \"\"\"\n response = self._request(\n 'location.nearbyaddress',\n originCoordLat=origin_coord_lat,\n originCoordLong=origin_coord_long)\n return _get_node(response, 'LocationList', 'CoordLocation')\n\n def get_stops_by_name(self, name):\n \"\"\" location.name \"\"\"\n response = self._request(\n 'location.name',\n input=name)\n return _get_node(response, 'LocationList', 'StopLocation')\n\n # ARRIVAL BOARD\n\n def get_arrival_board_at_stop(self, stop_id, date=None, direction=None):\n \"\"\" arrivalBoard \"\"\"\n date = date if date else datetime.now()\n request_parameters = {\n 'id': stop_id,\n 'date': date.strftime(DATE_FORMAT),\n 'time': date.strftime(TIME_FORMAT)\n }\n if direction:\n request_parameters['directiona'] = direction\n response = self._request(\n 'arrivalBoard',\n **request_parameters)\n return ArrivalBoard(_get_node(response, 'ArrivalBoard', 'Arrival'), date.strftime(TIME_FORMAT))\n\n # DEPARTURE BOARD\n\n def get_departure_board_at_stop(self, stop_id, date=None, direction=None):\n \"\"\" departureBoard \"\"\"\n date = date if date else datetime.now()\n request_parameters = {\n 'id': stop_id,\n 'date': date.strftime(DATE_FORMAT),\n 'time': date.strftime(TIME_FORMAT)\n }\n if direction:\n request_parameters['direction'] = direction\n response = self._request(\n 'departureBoard',\n **request_parameters)\n return DepartureBoard(_get_node(response, 'DepartureBoard', 'Departure'), date.strftime(TIME_FORMAT))\n\n def get_arrival_board_from_stop_name(self, stop_name, date=None, direction=None):\n return self.__get_board_from_stop_name('arrival', stop_name, date, direction)\n\n def get_departure_board_from_stop_name(self, stop_name, date=None, direction=None):\n return self.__get_board_from_stop_name('departure', stop_name, date, direction)\n\n def __get_board_from_stop_name(self, board_type, stop_name, date=None, direction=None):\n first_matched_stop = self.get_stops_by_name(stop_name)[0]\n assert stop_name.casefold() in first_matched_stop['name'].casefold(), \\\n 'Stop match {} does not match found stop {}'.format(stop_name, first_matched_stop['name'])\n if board_type == 'arrival':\n return self.get_arrival_board_at_stop(first_matched_stop[\"id\"], date, direction)\n elif board_type == 'departure':\n return self.get_departure_board_at_stop(first_matched_stop[\"id\"], date, direction)\n else:\n raise Exception\n\n # TRIP\n\n def get_trip_from_origin_dest_id(self, origin_id, dest_id, date=None):\n \"\"\" trip \"\"\"\n date = date if date else datetime.now()\n response = self._request(\n 'trip',\n originId=origin_id,\n destId=dest_id,\n date=date.strftime(DATE_FORMAT),\n time=date.strftime(TIME_FORMAT))\n return _get_node(response, 'TripList', 'Trip')\n\n def _request(self, service, **parameters):\n \"\"\" request builder \"\"\"\n urlformat = \"{baseurl}/{service}?{parameters}&format=json\"\n url = urlformat.format(\n baseurl=API_BASE_URL,\n service=service,\n parameters=\"&\".join([\n \"{}={}\".format(key, value) for key, value in parameters.items()\n ]))\n if datetime.now() > self._token_expire_date:\n self.update_token()\n headers = {'Authorization': 'Bearer ' + self._token}\n res = requests.get(url, headers=headers)\n if res.status_code == 200:\n return json.loads(res.content.decode('UTF-8'))\n else:\n raise Error('Error: ' + str(res.status_code) +\n str(res.content))\n","sub_path":"vasttrafik/journey_planner.py","file_name":"journey_planner.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150831827","text":"# This file is Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>\n# This file is Copyright (c) 2020 Dolu1990 <charles.papon.90@gmail.com>\n# License: BSD\n\nimport os\n\nfrom migen import *\n\nfrom litex.soc.interconnect import wishbone\nfrom litex.soc.interconnect.csr import *\nfrom litex.soc.cores.cpu import CPU, CPU_GCC_TRIPLE_RISCV32\n\nfrom litedram.common import LiteDRAMNativePort\n\n\nCPU_VARIANTS = {\n \"1c\": \"VexRiscv\",\n \"2c\": \"VexRiscv\",\n \"4c\": \"VexRiscv\",\n \"8c\": \"VexRiscv\",\n \"mp1c\": \"VexRiscv\",\n \"mp2c\": \"VexRiscv\",\n \"mp4c\": \"VexRiscv\",\n \"mp8c\": \"VexRiscv\",\n}\n\n\nGCC_FLAGS = {\n # /-------- Base ISA\n # |/------- Hardware Multiply + Divide\n # ||/----- Atomics\n # |||/---- Compressed ISA\n # ||||/--- Single-Precision Floating-Point\n # |||||/-- Double-Precision Floating-Point\n # imacfd\n \"1c\": \"-march=rv32ima -mabi=ilp32\",\n \"2c\": \"-march=rv32ima -mabi=ilp32\",\n \"4c\": \"-march=rv32ima -mabi=ilp32\",\n \"8c\": \"-march=rv32ima -mabi=ilp32\",\n \"mp1c\": \"-march=rv32ima -mabi=ilp32\",\n \"mp2c\": \"-march=rv32ima -mabi=ilp32\",\n \"mp4c\": \"-march=rv32ima -mabi=ilp32\",\n \"mp8c\": \"-march=rv32ima -mabi=ilp32\",\n}\n\nclass Open(Signal): pass\n\nclass VexRiscvSMP(CPU):\n name = \"vexriscv\"\n human_name = \"VexRiscv SMP\"\n variants = CPU_VARIANTS\n data_width = 32\n endianness = \"little\"\n gcc_triple = CPU_GCC_TRIPLE_RISCV32\n linker_output_format = \"elf32-littleriscv\"\n nop = \"nop\"\n io_regions = {0x80000000: 0x80000000} # origin, length\n\n @property\n def mem_map(self):\n return {\n \"rom\": 0x00000000,\n \"sram\": 0x10000000,\n \"main_ram\": 0x40000000,\n \"csr\": 0xf0000000,\n \"clint\": 0xf0010000,\n }\n\n @property\n def gcc_flags(self):\n flags = GCC_FLAGS[self.variant]\n flags += \" -D__vexriscv__\"\n flags += \" -DUART_POLLING\"\n return flags\n\n def __init__(self, platform, variant):\n variant = \"2c\" if variant == \"standard\" else variant\n assert variant in CPU_VARIANTS, \"Unsupported variant %s\" % variant\n self.platform = platform\n self.variant = variant\n self.human_name = self.human_name + \"-\" + variant.upper()\n self.cluster_name = \"VexRiscvLitexSmp{mp}Cluster_{n}c\".format(mp=\"Mp\" if \"mp\" in variant else \"\", n=variant[-2]) # FIXME\n self.reset = Signal()\n self.jtag_clk = Signal()\n self.jtag_enable = Signal()\n self.jtag_capture = Signal()\n self.jtag_shift = Signal()\n self.jtag_update = Signal()\n self.jtag_reset = Signal()\n self.jtag_tdo = Signal()\n self.jtag_tdi = Signal()\n self.interrupt = Signal(32)\n self.pbus = pbus = wishbone.Interface()\n self.cbus = cbus = wishbone.Interface()\n self.plicbus = plicbus = wishbone.Interface()\n\n self.periph_buses = [pbus]\n self.memory_buses = [] # Added dynamically\n\n os.system(\"cp images/{}.dtb images/dtb\".format(variant)) # FIXME: generate dts/dtb dynamically\n\n # # #\n\n self.cpu_params = dict(\n # Clk / Rst\n i_clk = ClockSignal(),\n i_reset = ResetSignal() | self.reset,\n i_debugResetIn = ResetSignal() | self.reset,\n o_io_debugReset = Open(),\n\n # Interrupts\n i_io_interrupts = self.interrupt,\n\n # JTAG\n i_jtag_clk = self.jtag_clk,\n i_io_jtagInstruction_enable = self.jtag_enable,\n i_io_jtagInstruction_capture = self.jtag_capture,\n i_io_jtagInstruction_shift = self.jtag_shift,\n i_io_jtagInstruction_update = self.jtag_update,\n i_io_jtagInstruction_reset = self.jtag_reset,\n i_io_jtagInstruction_tdi = self.jtag_tdi,\n o_io_jtagInstruction_tdo = self.jtag_tdo,\n\n # Peripheral Bus (Master)\n o_io_peripheral_CYC = pbus.cyc,\n o_io_peripheral_STB = pbus.stb,\n i_io_peripheral_ACK = pbus.ack,\n o_io_peripheral_WE = pbus.we,\n o_io_peripheral_ADR = pbus.adr,\n i_io_peripheral_DAT_MISO = pbus.dat_r,\n o_io_peripheral_DAT_MOSI = pbus.dat_w,\n o_io_peripheral_SEL = pbus.sel,\n i_io_peripheral_ERR = pbus.err,\n o_io_peripheral_CTI = pbus.cti,\n o_io_peripheral_BTE = pbus.bte,\n\n # CLINT Bus (Slave)\n i_io_clint_CYC = cbus.cyc,\n i_io_clint_STB = cbus.stb,\n o_io_clint_ACK = cbus.ack,\n i_io_clint_WE = cbus.we,\n i_io_clint_ADR = cbus.adr,\n o_io_clint_DAT_MISO = cbus.dat_r,\n i_io_clint_DAT_MOSI = cbus.dat_w,\n\n # PLIC Bus (Slave)\n i_io_plic_CYC = plicbus.cyc,\n i_io_plic_STB = plicbus.stb,\n o_io_plic_ACK = plicbus.ack,\n i_io_plic_WE = plicbus.we,\n i_io_plic_ADR = plicbus.adr,\n o_io_plic_DAT_MISO = plicbus.dat_r,\n i_io_plic_DAT_MOSI = plicbus.dat_w,\n\n )\n if \"mp\" in variant:\n ncpus = int(variant[-2]) # FIXME\n for n in range(ncpus):\n ibus = LiteDRAMNativePort(mode=\"both\", address_width=32, data_width=128)\n dbus = LiteDRAMNativePort(mode=\"both\", address_width=32, data_width=128)\n self.memory_buses.append(ibus)\n self.memory_buses.append(dbus)\n self.cpu_params.update({\n # Instruction Memory Bus (Master)\n \"o_io_iMem_{}_cmd_valid\".format(n) : ibus.cmd.valid,\n \"i_io_iMem_{}_cmd_ready\".format(n) : ibus.cmd.ready,\n \"o_io_iMem_{}_cmd_payload_we\".format(n) : ibus.cmd.we,\n \"o_io_iMem_{}_cmd_payload_addr\".format(n) : ibus.cmd.addr,\n \"o_io_iMem_{}_wdata_valid\".format(n) : ibus.wdata.valid,\n \"i_io_iMem_{}_wdata_ready\".format(n) : ibus.wdata.ready,\n \"o_io_iMem_{}_wdata_payload_data\".format(n) : ibus.wdata.data,\n \"o_io_iMem_{}_wdata_payload_we\".format(n) : ibus.wdata.we,\n \"i_io_iMem_{}_rdata_valid\".format(n) : ibus.rdata.valid,\n \"o_io_iMem_{}_rdata_ready\".format(n) : ibus.rdata.ready,\n \"i_io_iMem_{}_rdata_payload_data\".format(n) : ibus.rdata.data,\n\n # Data Memory Bus (Master)\n \"o_io_dMem_{}_cmd_valid\".format(n) : dbus.cmd.valid,\n \"i_io_dMem_{}_cmd_ready\".format(n) : dbus.cmd.ready,\n \"o_io_dMem_{}_cmd_payload_we\".format(n) : dbus.cmd.we,\n \"o_io_dMem_{}_cmd_payload_addr\".format(n) : dbus.cmd.addr,\n \"o_io_dMem_{}_wdata_valid\".format(n) : dbus.wdata.valid,\n \"i_io_dMem_{}_wdata_ready\".format(n) : dbus.wdata.ready,\n \"o_io_dMem_{}_wdata_payload_data\".format(n) : dbus.wdata.data,\n \"o_io_dMem_{}_wdata_payload_we\".format(n) : dbus.wdata.we,\n \"i_io_dMem_{}_rdata_valid\".format(n) : dbus.rdata.valid,\n \"o_io_dMem_{}_rdata_ready\".format(n) : dbus.rdata.ready,\n \"i_io_dMem_{}_rdata_payload_data\".format(n) : dbus.rdata.data,\n })\n else:\n ibus = LiteDRAMNativePort(mode=\"both\", address_width=32, data_width=128)\n dbus = LiteDRAMNativePort(mode=\"both\", address_width=32, data_width=128)\n self.memory_buses.append(ibus)\n self.memory_buses.append(dbus)\n self.cpu_params.update(\n # Instruction Memory Bus (Master)\n o_io_iMem_cmd_valid = ibus.cmd.valid,\n i_io_iMem_cmd_ready = ibus.cmd.ready,\n o_io_iMem_cmd_payload_we = ibus.cmd.we,\n o_io_iMem_cmd_payload_addr = ibus.cmd.addr,\n o_io_iMem_wdata_valid = ibus.wdata.valid,\n i_io_iMem_wdata_ready = ibus.wdata.ready,\n o_io_iMem_wdata_payload_data = ibus.wdata.data,\n o_io_iMem_wdata_payload_we = ibus.wdata.we,\n i_io_iMem_rdata_valid = ibus.rdata.valid,\n o_io_iMem_rdata_ready = ibus.rdata.ready,\n i_io_iMem_rdata_payload_data = ibus.rdata.data,\n\n # Data Memory Bus (Master)\n o_io_dMem_cmd_valid = dbus.cmd.valid,\n i_io_dMem_cmd_ready = dbus.cmd.ready,\n o_io_dMem_cmd_payload_we = dbus.cmd.we,\n o_io_dMem_cmd_payload_addr = dbus.cmd.addr,\n o_io_dMem_wdata_valid = dbus.wdata.valid,\n i_io_dMem_wdata_ready = dbus.wdata.ready,\n o_io_dMem_wdata_payload_data = dbus.wdata.data,\n o_io_dMem_wdata_payload_we = dbus.wdata.we,\n i_io_dMem_rdata_valid = dbus.rdata.valid,\n o_io_dMem_rdata_ready = dbus.rdata.ready,\n i_io_dMem_rdata_payload_data = dbus.rdata.data,\n )\n\n # Add verilog sources\n self.add_sources(platform, variant)\n\n def set_reset_address(self, reset_address):\n assert not hasattr(self, \"reset_address\")\n self.reset_address = reset_address\n assert reset_address == 0x00000000\n\n def add_sources(self, platform, variant):\n platform.add_source(os.path.join(os.path.dirname(__file__), \"..\", \"verilog\", \"RamXilinx.v\"))\n platform.add_source(os.path.join(os.path.dirname(__file__), \"..\", \"verilog\", self.cluster_name + \".v\"))\n\n def do_finalize(self):\n assert hasattr(self, \"reset_address\")\n self.specials += Instance(self.cluster_name, **self.cpu_params)\n","sub_path":"vexriscv_smp/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414727195","text":"from __future__ import print_function\nimport jsonpickle\nimport httplib\nimport os\nimport filescan\nfrom CurrencyFeed import cf_definitions\nfrom . import fs_definitions\nfrom . import percentmap\nfrom datetime import datetime\nimport shutil\n\n\ndef scantask():\n\tprint(\"starting scantask\")\n\n\tpercentMap = percentmap.createPercentMap()\n\t\n\tunionMap = diffMaps(percentMap)\n\n\tfullJson = jsonpickle.encode(percentMap, unpicklable=False)\n\twith open(fs_definitions.mapFile, \"w\") as fh:\n\t\tfh.write(fullJson)\n\n\tunionJson = jsonpickle.encode(unionMap, unpicklable=False)\n\n\theaders = { \"Content-type\" : \"application/json\" }\n\n\tconn = httplib.HTTPConnection(fs_definitions.uploadServer)\n\tconn.request(\"POST\", fs_definitions.uploadPath, unionJson, headers)\n\tresponse = conn.getresponse()\n\n\tif (response.status != 200):\n\t\traise Exception(\"Status code: \" + str(response.status))\n\n\tmsg = response.read()\n\tif (msg != \"OK\"):\n\t\traiseException(\"Unexpected response: \" + msg)\n\n\ndef diffMaps(percentMap):\n\t\n\ttry:\n\t\twith open(fs_definitions.mapFile, \"r\") as oldMapFh:\n\t\t\toldMapJson = oldMapFh.read()\n\t\t\toldMap = jsonpickle.decode(oldMapJson)\n\texcept Exception:\n\t\treturn percentMap\n\t\n\tunionMap = {}\n\tfor key, value in oldMap.iteritems():\n\t\tif percentMap[key] != value:\n\t\t\tunionMap[key] = value\n\n\treturn unionMap\n","sub_path":"filescan/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375100985","text":"'''\r\nCreated on Jan 18, 2010\r\n\r\n@author: Paul\r\n'''\r\nfrom SQLEng import SQLEng\r\n\r\nclass pduSender(object):\r\n '''\r\n classdocs\r\n This class is designed for Gammu-smsd\r\n Inserting a record into MySQL\r\n Gammu-smsd will send the record\r\n Using command line will cause smsd stop for a while\r\n '''\r\n def getMesg(self,byteArray):\r\n mesg=\"\"\r\n for byte in byteArray:\r\n if byte < 16 :\r\n val=hex(byte)\r\n if val==\"0x0\" : val=\"00\"\r\n else :\r\n val=val.lstrip(\"0x\")\r\n val=\"{0}{1}\".format('0',val)\r\n else :\r\n val=hex(byte)\r\n val=val.lstrip(\"0x\")\r\n mesg+=val\r\n return mesg\r\n def send(self,to,byteArray):\r\n sEng=SQLEng()\r\n sEng.exeSQL(sEng.getInsetSentBox(to, self.getMesg(byteArray)))\r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n pass\r\n ","sub_path":"django_projects/care_reminder/src/gammuSender.py","file_name":"gammuSender.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250383347","text":"#! /usr/bin/python\n\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom numpy import genfromtxt, savetxt\n\ndef main():\n #create the training & test sets, skipping the header row with [1:]\n dataset = genfromtxt(open('./input/train.csv','r'), delimiter=',', dtype='f8')[1:] \n target = [x[0] for x in dataset]\n train = [x[1:] for x in dataset]\n test = genfromtxt(open('./input/test.csv','r'), delimiter=',', dtype='f8')[1:]\n \n #create and train the random forest\n rf = RandomForestClassifier(n_estimators=100, n_jobs=80)\n rf.fit(train, target)\n\n #savetxt('Data/submission2.csv', rf.predict(test), delimiter=',', fmt='%f')\n savetxt('submissions/randomForest.csv', np.c_[range(1,len(test)+1),rf.predict(test)], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d')\n\nif __name__==\"__main__\":\n main()\n\n\n","sub_path":"kaggle/digitrecognizer/rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"129142090","text":"#!/usr/bin/python\n\n# Import\nimport RPi.GPIO as GPIO\nimport time\nimport datetime\nimport os\nimport subprocess\nimport sys\nprint\n\"BEWEGUNGSMELDER\"\nprint\n\"\"\n\n# Board Mode: Angabe der Pin-Nummer\nGPIO.setmode(GPIO.BOARD)\n\n# GPIO Pin definieren fuer den Dateneingang vom Sensor\nPIR_GPIO = 23\nGPIO.setup(PIR_GPIO, GPIO.IN)\n\n\n\n#io.setmode(io.BCM)\nSHUTOFF_DELAY = 120 # in seconds, how long the monitor will be on until next button press or PIR detection\n#PIR_PIN = 23 # 15 on the board (this needn't to be a PIR. Can be a button also)\n\n\ndef main():\n subprocess.call(\"sh /home/pi/magic_mirror.sh\", shell = True)\n turned_off = True\n turn_off()\n\n last_motion_time = time.time()\n\n while True:\n read = GPIO.input(PIR_GPIO)\n \n if read == 1:\n print(\"ALARM %s: Bewegung erkannt!\" % datetime.datetime.now())\n last_motion_time = time.time()\n if turned_off:\n turned_off = False\n turn_on()\n else:\n if not turned_off and time.time() > (last_motion_time +\n SHUTOFF_DELAY):\n turned_off = True\n sys.stdout.flush()\n turn_off()\n\n\ndef turn_on():\n subprocess.call(\"sh /home/pi/monitor_on.sh\", shell=True)\n\n\ndef turn_off():\n subprocess.call(\"sh /home/pi/monitor_off.sh\", shell=True)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n GPIO.cleanup()\n","sub_path":"motiondetector.py","file_name":"motiondetector.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"276467540","text":"#-*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n__author__ = 'Jayme Tosi Neto'\n\nextra = {}\nif sys.version_info >= (3,):\n pass\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, '../README.txt')).read()\nCHANGES = open(os.path.join(here, '../CHANGES.txt')).read()\n\nrequires = [\n 'colander',\n 'deform',\n 'dogpile.cache',\n 'dogpile.core',\n 'Flask',\n 'flask-admin',\n 'Flask-Babel',\n 'Flask-Classy',\n 'flask-login',\n 'flask_mongoengine',\n 'Flask-Script',\n 'Flask-Session',\n 'Jinja2',\n 'mongoengine',\n 'nose',\n 'Unidecode',\n 'MarkupSafe',\n 'wtforms',\n 'Werkzeug',\n 'itsdangerous',\n #'wsgiref',\n]\n\nsetup(\n name='mosca',\n version='0.1.2.1',\n description='mosca',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n #https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n 'Development Status :: 1 - Planning',\n\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n 'Topic :: Software Development :: Libraries',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent'],\n author='Jayme Tosi Neto;',\n author_email='kalkehcoisa@gmail.com;',\n url='',\n keywords='web wsgi bfg flask mysql',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n test_suite='mosca',\n install_requires=requires,\n dependency_links=[\n ],\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = mosca:main\n [console_scripts]\n initialize_db = mosca.scripts.initializedb:main\n run_dev = mosca.scripts.run_dev:main\n \"\"\",\n)\n","sub_path":"etc/mosca/mosca/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15674226","text":"import socket\nimport time\n\ndef main():\n REC_IP = \"127.0.0.1\" #receiving machines IP \n REC_PORT = 5000 #port receiving machine listens on \n SERVER_ADDR = (REC_IP,REC_PORT)\n #create the socket \n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n data = raw_input(\"Enter the number you wish to plot\")\n while True:\n sock.sendto(data, SERVER_ADDR)\n time.sleep(1)\n data = raw_input(\"Enter the number you wish to plot:\\n\")\nif __name__ == '__main__':\n main()\n","sub_path":"HostPC_Code_Base/simdata.py","file_name":"simdata.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16128471","text":"\n# CITS3002 2021 (Semester 1) Assignment - Sockets.\n# Jakub Wysocki [22716248]\n# Code Tested on: Windows 10\n\n\"\"\"\nThis server aims to create a connection between clients, and allow multiple clients to \nplay a game of tiles with one another.\n\nThis version of the server implements all FOUR Tiers, as outlined in the project outline.\nThis functionality includes, but is not limited to:\n\n- Being able to join in a currently in-progress game, and be \nsend the neccesary data to be in time with the rest of the connected clients.\nThese clients will act as spectators, recieving all game-state data, and are able\nto participate, potentially, in the next game.\n- Client's are able to disconnect and be eliminted from the game,\n without disrupting game progress.\n- If enough clients are connected then a new game is started; clients are given some time to reflect\nafter each game, specified in WON_DELAY_S.\n- The server detects AFK players after AFK_TIMER seconds of inactivity and chooses a random move\nto be played for them.\n\nTo start the server (on windows!) you can use the start_server.bat file.\nTo start a game client (again, windows!) you can use the start_game_client.bat file.\nOtherwise, you are able to use ~ python server.py to start the server.\n\n * Additional Information will be outlined in the Project's Report, bundled with this file during submission.\n\n\"\"\"\n\n\n# IMPORTS\nimport socket\nimport sys\nimport tiles\nimport selectors\nimport types\nimport random\nimport time\n\n# CONSTANTS\nMAX_PLAYERS = tiles.PLAYER_LIMIT\nWON_DELAY_S = 5 #seconds\nAFK_TIMER = 10 #seconds\nSTART_WAIT = 10 #seconds\n\n# GLOBALS\nsel = selectors.DefaultSelector()\nboard = tiles.Board()\ncurrentTurn = 0\ntime_spent_afk = 0\nwait_timer = 0\nfirst_start = True\nfirst_turn = True\nfirst_timer = True\nforce_start = False\nstarted_idnums = []\nlive_idnums = []\nclient_connections = []\njoined_msgs = []\neliminated_clients = []\ndisconnected_clients = []\nmessages_sent = []\nplayer_hand_dict = {}\nafk_dict = {}\nid_game_state = {}\n# client crash problem\n\ndef client_handler(key, mask):\n \"\"\"\n Once clients are accepted, this is where they will be handled.\n This function contains the majority of the server logic, and this\n is looped over all connected ID's constantly.\n\n It accepted the key, and the mask parameters which contain all that is needed\n to utilize the current socket belonging to the client which is being handled.\n\n This function does not return any meaningful objects, rather, return is used to\n break execution of this function to stop processing the current client.\n \"\"\"\n\n # - SET UP DATA -\n data = key.data\n host, port = data.addr\n name = '{}:{}'.format(host, port)\n idnum = data.idnum\n \n\n # - GLOBALS -\n global currentTurn\n global first_start\n global joined_msgs\n global started_idnums\n global eliminated_clients\n global player_hand_dict\n global afk_dict\n global time_spent_afk\n global first_turn\n global id_game_state\n global wait_timer\n global force_start\n global first_timer\n\n msg = receive_msg_client(key, mask, idnum)\n\n # -- NEW PLAYER JOINS --\n if idnum not in live_idnums:\n if idnum not in disconnected_clients:\n new_player_joined(name, idnum)\n # reset waiting timer\n first_timer = True\n\n # -- START FIRST GAME --\n if len(live_idnums) >= MAX_PLAYERS or force_start:\n if first_start:\n #reset globals\n first_start = False\n force_start = False\n first_timer = True\n wait_timer = 0\n #start game\n start_new_game()\n else:\n # - CANNOT CONTINUE UNLESS SUFFICIENT PLAYERS CONNECTED -\n if first_start:\n # START GAME IF NO NEW PLAYERS JOIN WITHIN START_WAIT SECONDS (THE GIVEN TESTER NEEDED THIS)\n if first_timer:\n wait_timer = time.perf_counter()\n first_timer = False\n else:\n if (time.perf_counter() - wait_timer) >= START_WAIT:\n if len(live_idnums) > 1:\n force_start = True\n return\n \n # -- SKIP DISCONNECTED PLAYERS --\n if started_idnums[currentTurn] in disconnected_clients:\n currentTurn += 1\n if currentTurn > len(started_idnums) - 1:\n currentTurn = 0\n print(\"Player {1} ({0}) eliminated, skipping turn...\".format(name, idnum))\n # --- SEND NEXT TURN MESSAGE ---\n send_msg_all_clients(tiles.MessagePlayerTurn(started_idnums[currentTurn]).pack())\n\n # -- SKIP CURRENT IDNUM -- IF -> (1) It isn't their turn -> (2) They are a spectator --\n if idnum != started_idnums[currentTurn]:\n return\n\n # -- GAME COMPLETE -> START A NEW GAME --\n if len(eliminated_clients) >= (len(started_idnums)-1):\n time.sleep(WON_DELAY_S)\n if len(live_idnums) < MAX_PLAYERS:\n started_idnums = []\n first_start = True\n return\n else:\n start_new_game()\n return\n\n # -- SKIP ELIMINATED PLAYERS --\n if idnum in eliminated_clients:\n currentTurn += 1\n if currentTurn > len(started_idnums) - 1:\n currentTurn = 0\n print(\"Player {1} ({0}) eliminated, skipping turn...\".format(name, idnum))\n # --- SEND NEXT TURN MESSAGE ---\n send_msg_all_clients(tiles.MessagePlayerTurn(started_idnums[currentTurn]).pack())\n return\n\n # -- -- START AFK TIMER -- --\n if first_turn:\n first_turn = False\n afk_dict[started_idnums[currentTurn]] = time.perf_counter()\n\n if not msg:\n if started_idnums[currentTurn] not in afk_dict:\n return\n time_spent_afk += time.perf_counter() - afk_dict[started_idnums[currentTurn]]\n afk_dict[started_idnums[currentTurn]] = time.perf_counter()\n\n # AN AFK PLAYER IS MANAGED HERE\n if time_spent_afk >= AFK_TIMER:\n print(\"Player {}, ({}) AFK: Random Move.\".format(idnum, name))\n if not first_start: # Check if game has started\n time_spent_afk = 0\n first_turn = True\n # MAKE A VALID, RANDOM MOVE FOR THE PLAYER\n make_valid_move(started_idnums[currentTurn], None, True)\n return\n else:\n time_spent_afk = 0\n if started_idnums[currentTurn] in afk_dict:\n afk_dict.pop(started_idnums[currentTurn])\n\n # AN AFK PLAYER SHOULDN'T GET HERE\n first_turn = True\n\n # -- -- END AFK TIMER -- --\n\n # MAKE A VALID MOVE, BASED ON CLIENT MESSAGE.\n make_valid_move(started_idnums[currentTurn], msg, False)\n\n\ndef accept_new_client(socket, times_connected):\n \"\"\"\n This is where new sockets are accepted and set up to be used be the client handler.\n This method uses the selector library to manage multiple connections.\n \"\"\"\n global client_connections\n connection, address = socket.accept() # READ, SET IN SELECTOR BELOW\n client_connections.append([connection, times_connected])\n print(\"New connection accepted from {}, connection id: {}\".format(address, times_connected))\n connection.setblocking(False) # NEED THIS OR SERVER HANGS\n data = types.SimpleNamespace(addr=address, idnum=times_connected)\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n sel.register(connection, events, data=data)\n\ndef send_msg_all_clients(msg):\n \"\"\"\n This function sends out one message, msg, to all currently active connections.\n \"\"\"\n global client_connections\n global messages_sent\n messages_sent.append(msg)\n for client in client_connections:\n client[0].send(msg)\n\ndef msg_specific_client(msg, idnum):\n \"\"\"\n This function sends a message, msg, to a specific client, based on their idnum.\n \"\"\"\n global client_connections\n for client in client_connections:\n if client[1] == idnum:\n client[0].send(msg)\n\ndef new_player_joined(name, idnum):\n \"\"\"\n This function sets up a new client connecting, and joining a game.\n All clients join as effective \"spectators\", until a new game is begun.\n This function handles the delivery of the Welcome Message,\n and the delivery of the Player Joined Messages\n \"\"\"\n global live_idnums\n global messages_sent\n global client_connections\n if idnum not in live_idnums:\n live_idnums.append(idnum)\n # --- SEND A WELCOME MESSAGE ---\n msg_specific_client(tiles.MessageWelcome(idnum).pack(), idnum)\n # --- SEND PLAYER JOINED (ALL CLIENTS, but this one) ---\n if joined_msgs:\n for msg in joined_msgs:\n msg_specific_client(msg, idnum)\n # To preserve Me! Message, send to all but this idnum\n for client in client_connections:\n if client[1] != idnum:\n client[0].send(tiles.MessagePlayerJoined(name, idnum).pack())\n messages_sent.append(tiles.MessagePlayerJoined(name, idnum).pack())\n joined_msgs.append(tiles.MessagePlayerJoined(name, idnum).pack())\n # --- SEND ALL MOVES UP TO THIS POINT ---\n if started_idnums:\n # Adding below fixes tester for fifth test, manually it works fine\n # The tester doesn't seem to recognise the new game start for spectators.\n # See the below comment.\n for msg in messages_sent:\n #test_msg, consumed = tiles.read_message_from_bytearray(msg)\n #if not isinstance(test_msg, tiles.MessageGameStart):\n msg_specific_client(msg, idnum)\n\n\ndef start_new_game():\n \"\"\"\n This void parameterless function, starts a new game for a randomly chosen selection of\n currently connected clients. It responsible for setting up global data and distributing \n tiles to players.\n \"\"\"\n\n global currentTurn\n global eliminated_clients\n global started_idnums\n global disconnected_count\n global messages_sent\n global board\n global player_hand_dict\n global afk_dict\n global id_game_state\n\n # reset globals\n disconnected_count = 0\n currentTurn = 0\n eliminated_clients = []\n messages_sent = []\n afk_dict = {}\n player_hand_dict = {}\n id_game_state = {}\n board.reset()\n\n # --- SEND NEW GAME MESSAGE (ALL CLIENTS) ---\n send_msg_all_clients(tiles.MessageGameStart().pack())\n \n # this, by definition, chooses random clients to play, and a random turn order.\n num_players = 4\n if len(live_idnums) < 4:\n num_players = len(live_idnums)\n started_idnums = random.sample(live_idnums, num_players)\n\n\n # --- SET ID GAME STATE ---\n for idnum in started_idnums:\n id_game_state[idnum] = [\"first\", []] \n # [1] is last tile placement (x and y position)\n # [0] is defined as follows\n # \"first\" turn is a unique event.\n # \"second\" turn is a unique event.\n # \"normal\" (3 -> n) turn is a unique event.\n\n\n # --- SEND TILES TO (ACTIVE) CLIENTS ---\n for idnum in started_idnums:\n player_hand_dict[idnum] = []\n for _ in range(tiles.HAND_SIZE):\n tileid = tiles.get_random_tileid()\n player_hand_dict[idnum].append(tileid)\n msg_specific_client(tiles.MessageAddTileToHand(tileid).pack(), idnum)\n \n # --- SEND NEW TURN MESSAGE (ALL CLIENTS) ---\n send_msg_all_clients(tiles.MessagePlayerTurn(started_idnums[currentTurn]).pack())\n\n\n#-------------------------------------------------- LEAVEING THEN JOINGING THEN LECINVG AIN .... MIGHT ISSUES\ndef receive_msg_client(key, mask, idnum):\n \"\"\"\n This function is used to read information send by a client, using their\n established socket with the server. This function is called on every iteration\n of the client handler. \n\n Players who disconnect are also managed in this function, and their relevant \n information is changed here.\n\n When there is no data avalaible to read by the connection, this function will return False,\n otherwise, it will return the recieved message.\n \"\"\"\n global live_idnums\n global client_connections\n global disconnected_count\n\n buffer = bytearray()\n connection = key.fileobj\n data = key.data\n\n if mask & selectors.EVENT_READ:\n try:\n chunk = connection.recv(4096)\n except:\n print(\"Connection to {} lost, closing connection.\".format(data.addr))\n chunk = b''\n # can move turn/eliminated checks here to not save moves made by players\n # Eliminate if disconnected.\n if not chunk:\n print('Closing connection to', data.addr)\n sel.unregister(connection)\n connection.close()\n\n # DISCONNECT SPECIAL CONDITIONS\n\n # REMOVE FROM LIVEID LIST\n live_idnums.remove(idnum)\n # REMOVE FROM CONNECTIONS\n for client in client_connections:\n if client[1] == idnum:\n client_connections.remove(client)\n # ELIMINATE FROM GAME\n if idnum in started_idnums:\n if idnum not in eliminated_clients:\n eliminated_clients.append(idnum)\n # DO NOT ELIMINATE BEFORE FIRST TURN???!!!\n send_msg_all_clients(tiles.MessagePlayerEliminated(idnum).pack())\n disconnected_clients.append(idnum)\n #Remove from AFK DICT\n if idnum in afk_dict:\n afk_dict.pop(idnum)\n\n buffer.extend(chunk)\n msg, consumed = tiles.read_message_from_bytearray(buffer)\n print('received message {}'.format(msg))\n buffer = buffer[consumed:]\n return msg\n else:\n return False\n\ndef make_valid_move(idnum, msg, is_random):\n \"\"\"In this function we will attempt to make a valid move for the given idnum,\n this is regardless if its a tile move or a token move. There is also an option\n to generate a random move, without using the given message field.\n\n The random move is used for player who have been AFK for over\n AFK TIMER seconds. For this use case, use None for a msg value.\n For a random move, is_random should be set to True, False otherwise.\n \"\"\"\n global started_idnums\n global eliminated_clients\n global player_hand_dict\n global live_idnums\n global currentTurn\n global id_game_state\n \n move_found = False\n random_tile = False\n random_token = False\n\n # - FOR RANDOM TILE MOVMENT\n # all current tileids in hand\n hand = player_hand_dict[started_idnums[currentTurn]].copy()\n # all possible rotation values\n r_val = list(range(0, 4))\n # all possible x values\n x_pos = list(range(0, tiles.BOARD_WIDTH))\n # all possible y values\n y_pos = list(range(0, tiles.BOARD_HEIGHT))\n # Ensure that moves are random.\n random.shuffle(x_pos)\n random.shuffle(y_pos)\n random.shuffle(r_val)\n random.shuffle(hand)\n\n # - FOR RANDOM TOKEN MOVMENT\n # all possible token locations, randomized\n t_pos = list(range(0, 8))\n random.shuffle(t_pos)\n \n # This uses a brute force method to find a valid tile, will run slower on\n # larger boards. There is a possibility of using inbuilt ig_game_state to make\n # smarter decisions if required, so the extra code has been left in place. \n # Currently a != \"second\" would suffice.\n if is_random:\n ### FOR FIRST TURN ###\n if id_game_state[started_idnums[currentTurn]][0] == \"first\":\n # chooses a random spot on edge of board to play.\n for x in x_pos:\n for y in y_pos:\n for r in r_val:\n for tileid in hand:\n if not move_found:\n if board.set_tile(x, y, tileid, r, idnum):\n msg = tiles.MessagePlaceTile(idnum, tileid, r, x, y)\n move_found = True\n random_tile = True\n\n ### FOR TOKEN MOVEMENTS ###\n if id_game_state[started_idnums[currentTurn]][0] == \"second\":\n # choose a random token position to play.\n for x in x_pos:\n for y in y_pos:\n for t in t_pos:\n if not move_found:\n if board.set_player_start_position(idnum, x, y, t):\n msg = tiles.MessageMoveToken(idnum, x, y, t)\n move_found = True\n random_token = True\n\n ### FOR NORMAL TILE MOVMENT ###\n if id_game_state[started_idnums[currentTurn]][0] == \"normal\":\n # chooses a random spot on board to play.\n for x in x_pos:\n for y in y_pos:\n for r in r_val:\n for tileid in hand:\n if not move_found:\n if board.set_tile(x, y, tileid, r, idnum):\n msg = tiles.MessagePlaceTile(idnum, tileid, r, x, y)\n move_found = True\n random_tile = True\n\n # ---- NEXT TURN BEGINS HERE ----\n # sent by the player to put a tile onto the board (in all turns except\n # their second)\n if isinstance(msg, tiles.MessagePlaceTile):\n #This checks if the player has this tile in their hand -- FAIR PLAY, CHECK HAND.\n if msg.tileid not in player_hand_dict[started_idnums[currentTurn]]:\n print(\"Player {0} attempted to play a tile, {1}, not in their hand: {2}.\".format(idnum, msg.tileid, player_hand_dict[started_idnums[currentTurn]]))\n return\n if random_tile or board.set_tile(msg.x, msg.y, msg.tileid, msg.rotation, msg.idnum):\n # notify client that placement was successful\n send_msg_all_clients(msg.pack())\n\n # check for token movement\n check_ids = []\n for started_idnum in started_idnums:\n if started_idnum not in eliminated_clients:\n check_ids.append(started_idnum)\n\n positionupdates, eliminated = board.do_player_movement(check_ids)\n\n # pickup a new tile, update hand dictionary\n player_hand_dict[started_idnums[currentTurn]].remove(msg.tileid)\n tileid = tiles.get_random_tileid()\n msg_specific_client(tiles.MessageAddTileToHand(tileid).pack(), started_idnums[currentTurn])\n player_hand_dict[started_idnums[currentTurn]].append(tileid)\n\n #update current id's game state\n if id_game_state[started_idnums[currentTurn]][0] == \"first\":\n id_game_state[started_idnums[currentTurn]][0] = \"second\"\n\n for msg in positionupdates: # - --------------------------------------------------------------------------------------------- eliminated tokens move, fix maybe???\n send_msg_all_clients(msg.pack())\n \n for idnum in eliminated:\n if idnum not in eliminated_clients:\n print(\"player {} was eliminated!\".format(idnum))\n eliminated_clients.append(idnum)\n send_msg_all_clients(tiles.MessagePlayerEliminated(idnum).pack())\n\n # start next turn\n currentTurn += 1\n if currentTurn > len(started_idnums) - 1:\n currentTurn = 0\n send_msg_all_clients(tiles.MessagePlayerTurn(started_idnums[currentTurn]).pack())\n\n # sent by the player in the second turn, to choose their token's\n # starting path\n elif isinstance(msg, tiles.MessageMoveToken):\n if not board.have_player_position(msg.idnum) or random_token:\n if random_token or board.set_player_start_position(msg.idnum, msg.x, msg.y, msg.position):\n\n # check for token movement\n check_ids = []\n for started_idnum in started_idnums:\n if started_idnum not in eliminated_clients:\n check_ids.append(started_idnum)\n positionupdates, eliminated = board.do_player_movement(check_ids)\n\n if id_game_state[started_idnums[currentTurn]][0] == \"second\":\n id_game_state[started_idnums[currentTurn]][0] = \"normal\" \n\n for msg in positionupdates:\n send_msg_all_clients(msg.pack())\n \n for idnum in eliminated:\n if idnum not in eliminated_clients:\n print(\"player {} was eliminated!\".format(idnum))\n eliminated_clients.append(idnum)\n send_msg_all_clients(tiles.MessagePlayerEliminated(idnum).pack())\n \n # start next turn\n currentTurn += 1\n if currentTurn > len(started_idnums) - 1:\n currentTurn = 0\n send_msg_all_clients(tiles.MessagePlayerTurn(started_idnums[currentTurn]).pack())\n return\n\n\n# #\n# This is where the programs \"execution\" begins. #\n# #\n\n# A list of tuples outlining the connections to the server\ntimes_connected = 0\n\n# create a TCP/IP socket\nlisten_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# listen on all network interfaces\nHOSTNAME = ''\nPORT_NUM = 30020\nserver_address = (HOSTNAME, PORT_NUM)\nlisten_sock.bind(server_address)\n\nprint('listening on {}'.format(listen_sock.getsockname()))\n\n# leave as system default\nlisten_sock.listen()\n# this will not \"hang\" the server\nlisten_sock.setblocking(False)\n\n# selector usage\n# this connects the selector with the listening socket\n# data=None identifes the listening socket\nsel.register(listen_sock, selectors.EVENT_READ, data=None)\n\nwhile True:\n events = sel.select(timeout=None)\n for key, mask in events:\n if key.data is None: # Listening socket selector returns None, new client needs accepting.\n accept_new_client(key.fileobj, times_connected)\n times_connected += 1\n else:\n client_handler(key, mask) # A client socket which has already been accepted.","sub_path":"project1files/my_server.py","file_name":"my_server.py","file_ext":"py","file_size_in_byte":20447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"81608578","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 17 14:56:40 2021\r\n\r\n@author: kdutta01\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport os\r\nimport numpy as np\r\nimport skimage.transform as trans\r\nfrom keras.models import Model\r\nfrom keras.layers import Conv3D, Conv3DTranspose, MaxPooling3D, concatenate, Input, Dropout\r\nfrom keras.optimizers import Adam\r\n#from keras.utils import plot_model\r\nfrom tensorflow.keras import backend as K\r\n\r\nK.set_image_data_format('channels_last')\r\n\r\n#Defining Loss Functions and Accuracy\r\n\r\nsmooth = 1\r\ndef dice_coef(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\r\n\r\ndef dice_loss(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (1 -(2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))\r\n\r\ndef common_layer(filters, layer):\r\n layer = Conv3D(filters, \r\n kernel_size = (3,3,3), \r\n padding = 'same',\r\n activation = 'relu',\r\n kernel_initializer = 'glorot_normal')(layer)\r\n return layer\r\n\r\ndef rec_layer(filters,layer):\r\n reconv1 = Conv3D(filters, kernel_size = (3,3,3), padding = 'same', activation='relu')(layer)\r\n reconc1 = concatenate([layer, reconv1], axis=4)\r\n drop_inter = Dropout(0.3)(reconc1)\r\n reconv2 = Conv3D(filters, kernel_size = (3, 3, 3), activation='relu', padding='same')(drop_inter)\r\n reconc2 = concatenate([layer, reconv2], axis=4)\r\n return reconc2\r\n\r\nimage_depth = 16\r\nimage_row = 128\r\nimage_col = 128\r\n\r\ndef r2udense3Dnet(pretrained_weights = None):\r\n inputs = Input((image_depth, image_row, image_col, 1))\r\n conv1 = rec_layer(32, inputs)\r\n conv1 = rec_layer(32,conv1)\r\n conc1 = concatenate([inputs, conv1], axis = 4)\r\n pool1 = MaxPooling3D(pool_size = (2,2,2))(conc1)\r\n \r\n conv2 = rec_layer(64, pool1)\r\n conv2 = rec_layer(64, conv2)\r\n conc2 = concatenate([pool1, conv2], axis = 4)\r\n pool2 = MaxPooling3D(pool_size = (2,2,2))(conc2)\r\n \r\n conv3 = rec_layer(128, pool2)\r\n conv3 = rec_layer(128, conv3)\r\n conc3 = concatenate([pool2, conv3], axis = 4)\r\n pool3 = MaxPooling3D(pool_size = (2,2,2))(conc3)\r\n \r\n conv4 = rec_layer(256, pool3)\r\n conv4 = rec_layer(256, conv4)\r\n conc4 = concatenate([pool3, conv4], axis = 4)\r\n pool4 = MaxPooling3D(pool_size = (2,2,2))(conc4)\r\n \r\n conv5 = rec_layer(512, pool4)\r\n conv5 = rec_layer(512, conv5)\r\n conc5 = concatenate([pool4, conv5], axis = 4)\r\n drop5 = Dropout(0.5)(conc5)\r\n \r\n \r\n up2 = Conv3DTranspose(256, 2, strides = (2,2,2), padding = 'same')(conv5)\r\n merge2 = concatenate([up2,conv4], axis = 4)\r\n deconv2 = rec_layer(256, merge2)\r\n deconv2 = rec_layer(256, deconv2)\r\n deconc2 = concatenate([merge2,deconv2], axis = 4)\r\n \r\n \r\n up3 = Conv3DTranspose(128, 2, strides = (2,2,2), padding = 'same')(deconc2)\r\n merge3 = concatenate([up3,conv3], axis = 4)\r\n deconv3 = rec_layer(128, merge3)\r\n deconv3 = rec_layer(128, deconv3)\r\n deconc3 = concatenate([merge3,deconv3], axis = 4)\r\n \r\n up4 = Conv3DTranspose(64, 2, strides = (2,2,2), padding = 'same')(deconc3)\r\n merge4 = concatenate([up4,conv2], axis=4)\r\n deconv4 = common_layer(64, merge4)\r\n deconv4 = common_layer(64, deconv4)\r\n deconc4 = concatenate([merge4,deconv4], axis = 4)\r\n \r\n up5 = Conv3DTranspose(32, 2, strides = (2,2,2), padding = 'same')(deconc4)\r\n merge5 = concatenate([up5,conv1], axis = 4)\r\n deconv5 = rec_layer(32,merge5)\r\n deconv5 = rec_layer(32,deconv5)\r\n deconc5 = concatenate([merge5,deconv5], axis = 4)\r\n \r\n deconv_final = Conv3D(1,(1,1,1), activation = 'sigmoid')(deconc5)\r\n \r\n model = Model(inputs = [inputs], outputs = [deconv_final])\r\n \r\n model.compile(optimizer = Adam(learning_rate = 1e-5, beta_1 = 0.9, beta_2 = 0.999), loss = 'binary_crossentropy', metrics = [dice_coef])\r\n \r\n if(pretrained_weights):\r\n \tmodel.load_weights(pretrained_weights) \r\n \r\n return model\r\n","sub_path":"r2udensenet3D.py","file_name":"r2udensenet3D.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225681180","text":"import json\n\nfrom aiohttp import web\n\nfrom app.utility.base_world import BaseWorld\n\n\nclass Contact(BaseWorld):\n\n def __init__(self, services):\n self.name = 'http'\n self.description = 'Accept beacons through a REST API endpoint'\n self.app_svc = services.get('app_svc')\n self.contact_svc = services.get('contact_svc')\n self.log = self.create_logger('contact_http')\n\n async def start(self):\n self.app_svc.application.router.add_route('POST', '/beacon', self._beacon)\n\n async def _beacon(self, request):\n try:\n profile = json.loads(self.contact_svc.decode_bytes(await request.read()))\n profile['paw'] = profile.get('paw')\n profile['contact'] = profile.get('contact', self.name)\n agent, instructions = await self.contact_svc.handle_heartbeat(**profile)\n response = dict(paw=agent.paw,\n sleep=await agent.calculate_sleep(),\n watchdog=agent.watchdog,\n instructions=json.dumps([json.dumps(i.display) for i in instructions]))\n if agent.pending_contact != agent.contact:\n response['new_contact'] = agent.pending_contact\n self.log.debug('Sending agent instructions to switch from C2 channel %s to %s' % (agent.contact, agent.pending_contact))\n if agent.executor_change_to_assign:\n response['executor_change'] = agent.assign_pending_executor_change()\n self.log.debug('Asking agent to update executor: %s', response.get('executor_change'))\n return web.Response(text=self.contact_svc.encode_string(json.dumps(response)))\n except Exception as e:\n self.log.error('Malformed beacon: %s' % e)\n","sub_path":"app/contacts/contact_http.py","file_name":"contact_http.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"252518351","text":"import os\nimport re\nfrom urlparse import urlparse\n\ndef get_env(src):\n ret = {}\n for l in src.splitlines():\n mat = re.match('export\\s+([^=]+)=\"?([^\"]+)\"?', l)\n if mat:\n groups = mat.groups()\n ret[groups[0]] = groups[1]\n return ret\n\n\ndef test_celery_not_running(host):\n with host.sudo():\n assert not host.supervisor('celery').is_running\n\n\ndef test_gunicorn_not_running(host):\n with host.sudo():\n assert not host.supervisor('firecares').is_running\n","sub_path":"test-backup-web-infrastructure.py","file_name":"test-backup-web-infrastructure.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"494946057","text":"import sys\nsys.stdin = open('장훈이_input.txt')\n\ndef powerset(n, k, current_sum):\n\n # if current_sum > B:\n # compare.append(current_sum)\n\n if n == k: # Basis Part\n printSet(n, current_sum)\n\n else: # Inductive Part\n A[k] = 1\n powerset(n, k+1, current_sum + height[k])\n A[k] = 0\n powerset(n, k+1, current_sum)\n\ndef printSet(n, current_sum):\n result = 0\n for i in range(n):\n if A[i] == 1:\n result += height[i]\n\n if result >= B:\n compare.append(current_sum)\n\n\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N, B = map(int, input().split()) # N명, 높이 B\n A = [0 for _ in range(N)]\n height = list(map(int, input().split()))\n compare = []\n # print(N, B)\n # print(height)\n\n powerset(N, 0, 0)\n # print(compare)\n min = 0xffffff # 16진수로 큰값\n for i in range(len(compare)):\n if compare[i] - B <= min:\n min = compare[i] - B\n\n print('#{} {}'.format(tc, min))","sub_path":"work/sub_lect/adv/0902장훈이.py","file_name":"0902장훈이.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268650938","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution(object):\r\n def removeZeroSumSublists(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: ListNode\r\n \"\"\"\r\n dummy = ListNode(next=head)\r\n curr = dummy\r\n total = 0\r\n mydict = {}\r\n while curr:\r\n total = total + curr.val\r\n if total in mydict.keys():\r\n to_remove = mydict[total].next\r\n temp_total = total\r\n # Remove the elements from the dict\r\n while to_remove != curr:\r\n temp_total += to_remove.val\r\n del mydict[temp_total]\r\n to_remove = to_remove.next\r\n # Update the pointer\r\n mydict[total].next = curr.next\r\n else:\r\n mydict[total] = curr\r\n curr = curr.next\r\n return dummy.next","sub_path":"Linkedlists/del_nodes_with_zero_sum.py","file_name":"del_nodes_with_zero_sum.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460429326","text":"\nimport datetime\nimport json\nimport pandas as pd\nimport sqlite3\nfrom urllib.parse import urlparse\nfrom shutil import copy2\nimport os\nimport re\nfrom sys import platform\nimport psycopg2\n\nuser1 = os.environ['PGCONNECT_USER']\npassword1 = os.environ['PGCONNECT_PASSWORD']\nhost1 = os.environ['PGCONNECT_HOST']\nport1 = os.environ['PGCONNECT_PORT']\ndatabase1 = os.environ['PGCONNECT_DBNAME']\n\npostgconnect = 'dbname='+ database1 +' user=' + user1 + ' host=' + host1 + ' password='+ password1 + ' port=' + port1\n\n\ndef get_history(filters,days_ago):\n ## copies database with browsing data (browser databases are often not accessible while browser is in use)\n if platform == \"darwin\":\n user = os.environ['USER']\n if not os.path.exists('chrome_history'):\n os.makedirs('chrome_history')\n copy2(r\"/Users/\" + user + \"/Library/Application Support/Google/Chrome/Default/History\",\"chrome_history\")\n db = sqlite3.connect('chrome_history/History')\n ###\n elif platform == \"win32\":\n user = os.environ['USER']\n data_path = os.path.expanduser(\"~\") + r\"\\AppData\\Local\\Google\\Chrome\\User Data\\Default\"\n files = os.listdir(data_path)\n history_db = os.path.join(data_path, 'history')\n if not os.path.exists('chrome_history'):\n os.makedirs('chrome_history')\n copy2(history_db,\"chrome_history\")\n db = sqlite3.connect('chrome_history/history')\n ###\n cursor = db.cursor()\n ## sql query pulls date,url,title,visit count from database\n cursor.execute('''\n select datetime(last_visit_time/1000000-11644473600,'unixepoch') ,url,title,visit_count from urls order by last_visit_time desc\n ''')\n all_rows = cursor.fetchall()\n ## pandas dataframe of browsing history\n df_hist = pd.DataFrame(all_rows,columns=['datetime', 'url','title','visits'])\n ## filtes out irrevelvant sites from a list of strings - any partial match with string in list removes item\n df_hist_clean = df_hist[-df_hist['url'].str.contains('|'.join(filters))]\n today = datetime.date.today()\n ## produces df of links from the past num of days sumbitted - days_ago = 2 - all links browsed in the past 2 days\n state_date = today - datetime.timedelta(days=days_ago)\n time_frame_df = df_hist_clean[df_hist_clean['datetime']>str(state_date)]\n withpath = [url for url in time_frame_df['url'] if len(urlparse(url).path) > 4]\n return withpath\n\ndef upload_history(uid,hist_cur):\n now = datetime.datetime.now()\n conn = psycopg2.connect(postgconnect)\n cur = conn.cursor()\n hist_links = [(uid,link,now) for link in hist_cur]\n QueryData = b','.join(cur.mogrify(b'(%s,%s,%s)', row) for row in hist_links)\n cur.execute(b'INSERT INTO user_links (uid, link, date) VALUES' + QueryData)\n conn.commit()\n conn.close\n\ndef main():\n filters2 = [\n 'duolingo','file://','instagram','twitter','localhost','google','collegeofthedesert','youtube','starbucks','sbux-portal','toyota','quicklaunchsso','github','bankofamerica','plex','hbogo','showtime','netflix','thepiratebay','facebook','tvguide','customwebauth','t.co',\n 'aws','azure','127.0.0.1:5000','paperspace','floydhub','feedly','chrome-extension','downgradepc','.pdf','chrome'\n ]\n days = 2\n uid = '3736ENQJEUavLjKX8ufPf5zfKl62'\n\n hist_cur = get_history(filters2,days)\n\n upload_history(uid,hist_cur)\n \nif __name__ == '__main__':\n main()\n","sub_path":"upload_history.py","file_name":"upload_history.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"647684114","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128)),\n ('description', models.TextField()),\n ('semester', models.CharField(max_length=5)),\n ('start_date', models.DateField()),\n ('end_date', models.DateField()),\n ('enrolment_fee', models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(max_length=128)),\n ('last_name', models.CharField(max_length=128)),\n ('birth_date', models.DateField()),\n ('gender', models.CharField(max_length=1, choices=[('M', 'Male'), ('F', 'Female')])),\n ('email', models.EmailField(max_length=75)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('person_ptr', models.OneToOneField(auto_created=True, serialize=False, parent_link=True, to='course_management.Person', primary_key=True)),\n ],\n options={\n },\n bases=('course_management.person',),\n ),\n migrations.CreateModel(\n name='Teacher',\n fields=[\n ('person_ptr', models.OneToOneField(auto_created=True, serialize=False, parent_link=True, to='course_management.Person', primary_key=True)),\n ('degree', models.CharField(max_length=24)),\n ('phone', models.CharField(max_length=16, validators=[django.core.validators.RegexValidator(message=\"Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.\", regex='^\\\\+?1?\\\\d{9,15}$')], blank=True, null=True)),\n ],\n options={\n },\n bases=('course_management.person',),\n ),\n ]\n","sub_path":"course_management/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42231625","text":"# -*- coding: utf-8 -*-\n\"\"\"Module for data plugin to represent a pseudo potential in UPF format.\"\"\"\nimport pathlib\nimport re\nimport typing\n\nfrom .pseudo import PseudoPotentialData\n\n__all__ = ('UpfData',)\n\nREGEX_ELEMENT_V1 = re.compile(r\"\"\"(?P<element>[a-zA-Z]{1,2})\\s+Element\"\"\")\nREGEX_ELEMENT_V2 = re.compile(r\"\"\"\\s*element\\s*=\\s*['\"]\\s*(?P<element>[a-zA-Z]{1,2})\\s*['\"].*\"\"\")\n\nPATTERN_FLOAT = r'[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?'\nREGEX_Z_VALENCE_V1 = re.compile(r\"\"\"(?P<z_valence>\"\"\" + PATTERN_FLOAT + r\"\"\")\\s+Z valence\"\"\")\nREGEX_Z_VALENCE_V2 = re.compile(r\"\"\"\\s*z_valence\\s*=\\s*['\"]\\s*(?P<z_valence>\"\"\" + PATTERN_FLOAT + r\"\"\")\\s*['\"].*\"\"\")\n\n\ndef parse_element(content: str):\n \"\"\"Parse the content of the UPF file to determine the element.\n\n :param stream: a filelike object with the binary content of the file.\n :return: the symbol of the element following the IUPAC naming standard.\n \"\"\"\n for regex in [REGEX_ELEMENT_V2, REGEX_ELEMENT_V1]:\n\n match = regex.search(content)\n\n if match:\n return match.group('element')\n\n raise ValueError(f'could not parse the element from the UPF content: {content}')\n\n\ndef parse_z_valence(content: str) -> int:\n \"\"\"Parse the content of the UPF file to determine the Z valence.\n\n :param stream: a filelike object with the binary content of the file.\n :return: the Z valence.\n \"\"\"\n for regex in [REGEX_Z_VALENCE_V2, REGEX_Z_VALENCE_V1]:\n\n match = regex.search(content)\n\n if match:\n z_valence = match.group('z_valence')\n\n try:\n z_valence = float(z_valence)\n except ValueError as exception:\n raise ValueError(f'parsed value for the Z valence `{z_valence}` is not a valid number.') from exception\n\n if int(z_valence) != z_valence:\n raise ValueError(f'parsed value for the Z valence `{z_valence}` is not an integer.')\n\n return int(z_valence)\n\n raise ValueError(f'could not parse the Z valence from the UPF content: {content}')\n\n\nclass UpfData(PseudoPotentialData):\n \"\"\"Data plugin to represent a pseudo potential in UPF format.\"\"\"\n\n _key_z_valence = 'z_valence'\n\n def set_file(self, source: typing.Union[str, pathlib.Path, typing.BinaryIO], filename: str = None, **kwargs): # pylint: disable=arguments-differ\n \"\"\"Set the file content and parse other optional attributes from the content.\n\n .. note:: this method will first analyse the type of the ``source`` and if it is a filepath will convert it\n to a binary stream of the content located at that filepath, which is then passed on to the superclass. This\n needs to be done first, because it will properly set the file and filename attributes that are expected by\n other methods. Straight after the superclass call, the source seeker needs to be reset to zero if it needs\n to be read again, because the superclass most likely will have read the stream to the end. Finally it is\n important that the ``prepare_source`` is called here before the superclass invocation, because this way the\n conversion from filepath to byte stream will be performed only once. Otherwise, each subclass would perform\n the conversion over and over again.\n\n :param source: the source pseudopotential content, either a binary stream, or a ``str`` or ``Path`` to the path\n of the file on disk, which can be relative or absolute.\n :param filename: optional explicit filename to give to the file stored in the repository.\n :raises TypeError: if the source is not a ``str``, ``pathlib.Path`` instance or binary stream.\n :raises FileNotFoundError: if the source is a filepath but does not exist.\n :raises ValueError: if the element symbol is invalid.\n \"\"\"\n source = self.prepare_source(source)\n super().set_file(source, filename, **kwargs)\n source.seek(0)\n content = source.read().decode('utf-8')\n self.element = parse_element(content)\n self.z_valence = parse_z_valence(content)\n\n @property\n def z_valence(self) -> typing.Optional[int]:\n \"\"\"Return the Z valence.\n\n :return: the Z valence.\n \"\"\"\n return self.get_attribute(self._key_z_valence, None)\n\n @z_valence.setter\n def z_valence(self, value: int):\n \"\"\"Set the Z valence.\n\n :param value: the Z valence.\n :raises ValueError: if the value is not a positive integer.\n \"\"\"\n if not isinstance(value, int) or value < 0:\n raise ValueError(f'`{value}` is not a positive integer')\n\n self.set_attribute(self._key_z_valence, value)\n","sub_path":"aiida_pseudo/data/pseudo/upf.py","file_name":"upf.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57097127","text":"# Copyright 2016 Rackspace Hosting\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport types\n\nimport mock\nfrom oslotest import base as test_base\n\nfrom ironic_lib import metrics as metricslib\n\n\nclass MockedMetricLogger(metricslib.MetricLogger):\n _gauge = mock.Mock(spec_set=types.FunctionType)\n _counter = mock.Mock(spec_set=types.FunctionType)\n _timer = mock.Mock(spec_set=types.FunctionType)\n\n\nclass TestMetricLogger(test_base.BaseTestCase):\n def setUp(self):\n super(TestMetricLogger, self).setUp()\n self.ml = MockedMetricLogger('prefix', '.')\n self.ml_no_prefix = MockedMetricLogger('', '.')\n self.ml_other_delim = MockedMetricLogger('prefix', '*')\n self.ml_default = MockedMetricLogger()\n\n def test_init(self):\n self.assertEqual(self.ml._prefix, 'prefix')\n self.assertEqual(self.ml._delimiter, '.')\n\n self.assertEqual(self.ml_no_prefix._prefix, '')\n self.assertEqual(self.ml_other_delim._delimiter, '*')\n self.assertEqual(self.ml_default._prefix, '')\n\n def test_get_metric_name(self):\n self.assertEqual(\n self.ml.get_metric_name('metric'),\n 'prefix.metric')\n\n self.assertEqual(\n self.ml_no_prefix.get_metric_name('metric'),\n 'metric')\n\n self.assertEqual(\n self.ml_other_delim.get_metric_name('metric'),\n 'prefix*metric')\n\n def test_send_gauge(self):\n self.ml.send_gauge('prefix.metric', 10)\n self.ml._gauge.assert_called_once_with('prefix.metric', 10)\n\n def test_send_counter(self):\n self.ml.send_counter('prefix.metric', 10)\n self.ml._counter.assert_called_once_with(\n 'prefix.metric', 10,\n sample_rate=None)\n self.ml._counter.reset_mock()\n\n self.ml.send_counter('prefix.metric', 10, sample_rate=1.0)\n self.ml._counter.assert_called_once_with(\n 'prefix.metric', 10,\n sample_rate=1.0)\n self.ml._counter.reset_mock()\n\n self.ml.send_counter('prefix.metric', 10, sample_rate=0.0)\n self.assertFalse(self.ml._counter.called)\n\n def test_send_timer(self):\n self.ml.send_timer('prefix.metric', 10)\n self.ml._timer.assert_called_once_with('prefix.metric', 10)\n\n @mock.patch('ironic_lib.metrics._time', autospec=True)\n @mock.patch('ironic_lib.metrics.MetricLogger.send_timer', autospec=True)\n def test_decorator_timer(self, mock_timer, mock_time):\n mock_time.side_effect = [1, 43]\n\n @self.ml.timer('foo.bar.baz')\n def func(x):\n return x * x\n\n func(10)\n\n mock_timer.assert_called_once_with(self.ml, 'prefix.foo.bar.baz',\n 42 * 1000)\n\n @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True)\n def test_decorator_counter(self, mock_counter):\n\n @self.ml.counter('foo.bar.baz')\n def func(x):\n return x * x\n\n func(10)\n\n mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1,\n sample_rate=None)\n\n @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True)\n def test_decorator_counter_sample_rate(self, mock_counter):\n\n @self.ml.counter('foo.bar.baz', sample_rate=0.5)\n def func(x):\n return x * x\n\n func(10)\n\n mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1,\n sample_rate=0.5)\n\n @mock.patch('ironic_lib.metrics.MetricLogger.send_gauge', autospec=True)\n def test_decorator_gauge(self, mock_gauge):\n @self.ml.gauge('foo.bar.baz')\n def func(x):\n return x\n\n func(10)\n\n mock_gauge.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 10)\n\n @mock.patch('ironic_lib.metrics._time', autospec=True)\n @mock.patch('ironic_lib.metrics.MetricLogger.send_timer', autospec=True)\n def test_context_mgr_timer(self, mock_timer, mock_time):\n mock_time.side_effect = [1, 43]\n\n with self.ml.timer('foo.bar.baz'):\n pass\n\n mock_timer.assert_called_once_with(self.ml, 'prefix.foo.bar.baz',\n 42 * 1000)\n\n @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True)\n def test_context_mgr_counter(self, mock_counter):\n\n with self.ml.counter('foo.bar.baz'):\n pass\n\n mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1,\n sample_rate=None)\n\n @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True)\n def test_context_mgr_counter_sample_rate(self, mock_counter):\n\n with self.ml.counter('foo.bar.baz', sample_rate=0.5):\n pass\n\n mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1,\n sample_rate=0.5)\n","sub_path":"ironic-lib-2.5.2/ironic_lib/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"206327329","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom nova.compute import task_states\nfrom nova.compute import vm_mode\nfrom nova import exception\nfrom nova import test\nfrom nova.tests.virt.xenapi import stubs\nfrom nova.virt import fake\nfrom nova.virt.xenapi import driver as xenapi_conn\nfrom nova.virt.xenapi import fake as xenapi_fake\nfrom nova.virt.xenapi import vm_utils\nfrom nova.virt.xenapi import vmops\n\n\nclass VMOpsTestBase(stubs.XenAPITestBase):\n def setUp(self):\n super(VMOpsTestBase, self).setUp()\n self._setup_mock_vmops()\n self.vms = []\n\n def _setup_mock_vmops(self, product_brand=None, product_version=None):\n stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)\n self._session = xenapi_conn.XenAPISession('test_url', 'root',\n 'test_pass',\n fake.FakeVirtAPI())\n self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())\n\n def create_vm(self, name, state=\"running\"):\n vm_ref = xenapi_fake.create_vm(name, state)\n self.vms.append(vm_ref)\n vm = xenapi_fake.get_record(\"VM\", vm_ref)\n return vm, vm_ref\n\n def tearDown(self):\n super(VMOpsTestBase, self).tearDown()\n for vm in self.vms:\n xenapi_fake.destroy_vm(vm)\n\n\nclass VMOpsTestCase(test.TestCase):\n def setUp(self):\n super(VMOpsTestCase, self).setUp()\n self._setup_mock_vmops()\n\n def _setup_mock_vmops(self, product_brand=None, product_version=None):\n self._session = self._get_mock_session(product_brand, product_version)\n self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())\n\n def _get_mock_session(self, product_brand, product_version):\n class Mock(object):\n pass\n\n mock_session = Mock()\n mock_session.product_brand = product_brand\n mock_session.product_version = product_version\n return mock_session\n\n def test_check_resize_func_name_defaults_to_VDI_resize(self):\n self.assertEquals(\n 'VDI.resize',\n self._vmops.check_resize_func_name())\n\n def _test_finish_revert_migration_after_crash(self, backup_made, new_made):\n instance = {'name': 'foo',\n 'task_state': task_states.RESIZE_MIGRATING}\n\n self.mox.StubOutWithMock(vm_utils, 'lookup')\n self.mox.StubOutWithMock(self._vmops, '_destroy')\n self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')\n self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')\n self.mox.StubOutWithMock(self._vmops, '_start')\n\n vm_utils.lookup(self._session, 'foo-orig').AndReturn(\n backup_made and 'foo' or None)\n vm_utils.lookup(self._session, 'foo').AndReturn(\n (not backup_made or new_made) and 'foo' or None)\n if backup_made:\n if new_made:\n self._vmops._destroy(instance, 'foo')\n vm_utils.set_vm_name_label(self._session, 'foo', 'foo')\n self._vmops._attach_mapped_block_devices(instance, [])\n self._vmops._start(instance, 'foo')\n\n self.mox.ReplayAll()\n\n self._vmops.finish_revert_migration(instance, [])\n\n def test_finish_revert_migration_after_crash(self):\n self._test_finish_revert_migration_after_crash(True, True)\n\n def test_finish_revert_migration_after_crash_before_new(self):\n self._test_finish_revert_migration_after_crash(True, False)\n\n def test_finish_revert_migration_after_crash_before_backup(self):\n self._test_finish_revert_migration_after_crash(False, False)\n\n def test_determine_vm_mode_returns_xen(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n\n fake_instance = \"instance\"\n vm_mode.get_from_instance(fake_instance).AndReturn(vm_mode.XEN)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.XEN,\n self._vmops._determine_vm_mode(fake_instance, None, None))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_hvm(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n\n fake_instance = \"instance\"\n vm_mode.get_from_instance(fake_instance).AndReturn(vm_mode.HVM)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.HVM,\n self._vmops._determine_vm_mode(fake_instance, None, None))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_is_pv(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n self.mox.StubOutWithMock(vm_utils, 'determine_is_pv')\n\n fake_instance = {\"os_type\": \"foo\"}\n fake_vdis = {'root': {\"ref\": 'fake'}}\n fake_disk_type = \"disk\"\n vm_mode.get_from_instance(fake_instance).AndReturn(None)\n vm_utils.determine_is_pv(self._session, \"fake\", fake_disk_type,\n \"foo\").AndReturn(True)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.XEN,\n self._vmops._determine_vm_mode(fake_instance, fake_vdis,\n fake_disk_type))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_is_not_pv(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n self.mox.StubOutWithMock(vm_utils, 'determine_is_pv')\n\n fake_instance = {\"os_type\": \"foo\"}\n fake_vdis = {'root': {\"ref\": 'fake'}}\n fake_disk_type = \"disk\"\n vm_mode.get_from_instance(fake_instance).AndReturn(None)\n vm_utils.determine_is_pv(self._session, \"fake\", fake_disk_type,\n \"foo\").AndReturn(False)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.HVM,\n self._vmops._determine_vm_mode(fake_instance, fake_vdis,\n fake_disk_type))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_is_not_pv_no_root_disk(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n self.mox.StubOutWithMock(vm_utils, 'determine_is_pv')\n\n fake_instance = {\"os_type\": \"foo\"}\n fake_vdis = {'iso': {\"ref\": 'fake'}}\n fake_disk_type = \"disk\"\n vm_mode.get_from_instance(fake_instance).AndReturn(None)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.HVM,\n self._vmops._determine_vm_mode(fake_instance, fake_vdis,\n fake_disk_type))\n self.mox.VerifyAll()\n\n def test_xsm_sr_check_relaxed_cached(self):\n self.make_plugin_call_count = 0\n\n def fake_make_plugin_call(plugin, method, **args):\n self.make_plugin_call_count = self.make_plugin_call_count + 1\n return \"true\"\n\n self.stubs.Set(self._vmops, \"_make_plugin_call\",\n fake_make_plugin_call)\n\n self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())\n self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())\n\n self.assertEqual(self.make_plugin_call_count, 1)\n\n\nclass InjectAutoDiskConfigTestCase(VMOpsTestBase):\n def setUp(self):\n super(InjectAutoDiskConfigTestCase, self).setUp()\n\n def test_inject_auto_disk_config_when_present(self):\n vm, vm_ref = self.create_vm(\"dummy\")\n instance = {\"name\": \"dummy\", \"uuid\": \"1234\", \"auto_disk_config\": True}\n self.vmops.inject_auto_disk_config(instance, vm_ref)\n xenstore_data = vm['xenstore_data']\n self.assertEquals(xenstore_data['vm-data/auto-disk-config'], 'True')\n\n def test_inject_auto_disk_config_none_as_false(self):\n vm, vm_ref = self.create_vm(\"dummy\")\n instance = {\"name\": \"dummy\", \"uuid\": \"1234\", \"auto_disk_config\": None}\n self.vmops.inject_auto_disk_config(instance, vm_ref)\n xenstore_data = vm['xenstore_data']\n self.assertEquals(xenstore_data['vm-data/auto-disk-config'], 'False')\n\n\nclass GetConsoleOutputTestCase(VMOpsTestBase):\n def setUp(self):\n super(GetConsoleOutputTestCase, self).setUp()\n\n def test_get_console_output_works(self):\n self.mox.StubOutWithMock(self.vmops, '_get_dom_id')\n\n instance = {\"name\": \"dummy\"}\n self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)\n self.mox.ReplayAll()\n\n self.assertEqual(\"dom_id: 42\", self.vmops.get_console_output(instance))\n\n def test_get_console_output_throws_nova_exception(self):\n self.mox.StubOutWithMock(self.vmops, '_get_dom_id')\n\n instance = {\"name\": \"dummy\"}\n # dom_id=0 used to trigger exception in fake XenAPI\n self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)\n self.mox.ReplayAll()\n\n self.assertRaises(exception.NovaException,\n self.vmops.get_console_output, instance)\n\n def test_get_dom_id_works(self):\n instance = {\"name\": \"dummy\"}\n vm, vm_ref = self.create_vm(\"dummy\")\n self.assertEqual(vm[\"domid\"], self.vmops._get_dom_id(instance))\n\n def test_get_dom_id_works_with_rescue_vm(self):\n instance = {\"name\": \"dummy\"}\n vm, vm_ref = self.create_vm(\"dummy-rescue\")\n self.assertEqual(vm[\"domid\"],\n self.vmops._get_dom_id(instance, check_rescue=True))\n\n def test_get_dom_id_raises_not_found(self):\n instance = {\"name\": \"dummy\"}\n self.create_vm(\"not-dummy\")\n self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)\n\n def test_get_dom_id_works_with_vmref(self):\n vm, vm_ref = self.create_vm(\"dummy\")\n self.assertEqual(vm[\"domid\"],\n self.vmops._get_dom_id(vm_ref=vm_ref))\n","sub_path":"nova/tests/virt/xenapi/test_vmops.py","file_name":"test_vmops.py","file_ext":"py","file_size_in_byte":10156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"490697632","text":"import maya.cmds as cmds\n\n\nclass BadResolutionsCheck(object):\n name = \"Bad objects in resolutions\" # Check's main title\n label = \"Checks children in resolutions to make sure that only meshes are there\" # Short description\n failLabel = \"Some odd objects were detected under some resolution nuls! Normally only meshes should be here. Can these be re-parent elsewhere?\" # Tip to show on fail\n failObjects = [] # List of object names causing fail. List will reset every time block runs.\n failType = 1 # 0=Error (must be fixed on fail), 1=Warning (is optional to fix on fail)\n \n # Un-comment out for auto-fix button to show up and run this\n #def fix(self):\n #pass\n \n # Return True if check passes, or False if it fails\n def run(self):\n objs = cmds.ls(transforms=True, l=True)\n for i, obj in enumerate(objs):\n has_resolution_tag = cmds.attributeQuery(\"RESOLUTION_TYPE\", node=obj, exists=True)\n if not has_resolution_tag:\n continue\n \n valid_node_types = [\"mesh\", \"transform\"]\n valid_objs = cmds.listRelatives(obj, ad=True, f=True, type=valid_node_types) or []\n all_objs = cmds.listRelatives(obj, ad=True, f=True) or []\n valid_objs_set = set(valid_objs)\n all_objs_set = set(all_objs)\n invalid_objs = list( all_objs_set.difference(valid_objs_set) )\n if not invalid_objs:\n continue\n \n self.failObjects.extend(invalid_objs)\n \n if self.failObjects:\n return False\n return True\n","sub_path":"maya/general/tools/oa_shield/asset_export/bad_resolutions_check.py","file_name":"bad_resolutions_check.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640891555","text":"#!/usr/bin/env python3\n\n\ndef is_substring(str1, str2):\n return str1.find(str2) > 0\n\n\ndef is_string_rotation(inpstr, inpstr2):\n if len(inpstr) != len(inpstr2):\n return False\n\n s = inpstr + inpstr\n\n if is_substring(s, inpstr2):\n return True\n\n return False\n\n\ndef main():\n print('Enter a string:')\n\n inpstr = input()\n\n print('Enter a second string:')\n\n inpstr2 = input()\n\n if is_string_rotation(inpstr, inpstr2):\n print('Rotation')\n else:\n print('Not rotation')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cracking-the-coding-interview/python-old/arrays_and_strings/string_rotation/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"465444030","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 23 Jul 2016\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\"\"\"\n\nimport time\n\nfrom scs_dfe.gas.afe.ads1115 import ADS1115\n\nfrom scs_host.bus.i2c import I2C\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nADS1115.init()\n\ngain = ADS1115.GAIN_1p024\nrate = ADS1115.RATE_8\n\nsn1 = ADS1115.MUX_A3_GND\nsn2 = ADS1115.MUX_A2_GND\nsn3 = ADS1115.MUX_A1_GND\nsn4 = ADS1115.MUX_A0_GND\n\nmux = sn4\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\ntry:\n I2C.Sensors.open()\n\n wrk = ADS1115(ADS1115.ADDR_WRK, rate)\n print(\"wrk: %s\" % wrk)\n\n aux = ADS1115(ADS1115.ADDR_AUX, rate)\n print(\"aux: %s\" % aux)\n\n wrk.start_conversion(mux, gain)\n aux.start_conversion(mux, gain)\n\n time.sleep(wrk.tconv)\n\n v_wrk = wrk.read_conversion()\n v_aux = aux.read_conversion()\n\n print(\"wrk v: %0.6f\" % v_wrk)\n print(\"aux v: %0.6f\" % v_aux)\n print(\"-\")\n\n v_wrk = wrk.convert(mux, gain)\n print(\"wrk v: %0.6f\" % v_wrk)\n\nfinally:\n I2C.Sensors.close()\n","sub_path":"tests/gas/afe/ads1115_test.py","file_name":"ads1115_test.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618754178","text":"import sqlite3\nfrom flask import Response\nimport json\n\ndb_name = 'player_services.db'\n\ndef get_char_data(): \n with sqlite3.connect(db_name) as conn:\n cursor = conn.cursor()\n sqli_query = \"SELECT * FROM characters\"\n cursor.execute(sqli_query)\n result = cursor.fetchall()\n final = []\n for row in result:\n item = {'id': row[0], 'game_id': row[1], 'player_id': row[2], 'title': row[3]}\n print(str(item))\n # final.append(item)\n # return Response(json.dumps(final), status=201, mimetype='application/json')\n\nget_char_data()\n","sub_path":"checkdb.py","file_name":"checkdb.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499888254","text":"import datetime\n\nimport numpy as np\nimport pandas as pd\n\n\ndef load_gtype(fname):\n \"\"\"Read genotype file into tidy DataFrame\"\"\"\n # Read file\n df = pd.read_csv(fname, delimiter='\\t', comment='#', header=[0, 1])\n\n # Reset the columns to be the second level of indexing\n df.columns = df.columns.get_level_values(1)\n\n # Only keep genotype up to last space because sometimes has n\n df.columns = [col[:col.rfind(' ')] if col.rfind(' ') > 0 else col\n for col in df.columns]\n\n # Melt the DataFrame\n df = pd.melt(df, var_name='genotype', value_name='fish').dropna()\n\n # Reset the index\n df = df.reset_index(drop=True)\n\n # Make sure data type is integer\n df.loc[:,'fish'] = df.loc[:, 'fish'].astype(int)\n\n return df\n\n\ndef load_data(fname, genotype_fname, lights_on, lights_off, day_in_the_life,\n extra_cols=[], rename={'middur': 'activity'}):\n \"\"\"Load in activity to DataFrame.\"\"\"\n # Get genotype information\n df_gt = load_gtype(genotype_fname)\n\n # Determine which columns to read in\n if extra_cols is None:\n extra_cols = []\n cols = ['location', 'stdate', 'sttime', 'middur']\n new_cols = list(set(extra_cols) - set(cols))\n usecols = cols + new_cols\n\n # Read file\n df = pd.read_csv(fname, usecols=usecols)\n\n # Convert location to well number (just drop 'c' in front)\n df = df.rename(columns={'location': 'fish'})\n df['fish'] = df['fish'].str.extract('(\\d+)', expand=False).astype(int)\n\n # Only keep fish that we have genotypes for\n df = df.loc[df['fish'].isin(df_gt['fish']), :]\n\n # Store the genotypes\n fish_lookup = {fish: df_gt.loc[df_gt['fish']==fish, 'genotype'].values[0]\n for fish in df_gt['fish']}\n df['genotype'] = df['fish'].apply(lambda x: fish_lookup[x])\n\n # Convert date and time to a time stamp\n df['time'] = pd.to_datetime(df['stdate'] + df['sttime'],\n format='%d/%m/%Y%H:%M:%S')\n\n # Get earliest time point\n t_min = pd.DatetimeIndex(df['time']).min()\n\n # Get Zeitgeber time in units of hours\n df['zeit'] = (df['time'] - t_min).dt.total_seconds() / 3600\n\n # Determine light or dark\n clock = pd.DatetimeIndex(df['time']).time\n df['light'] = np.logical_and(clock >= lights_on, clock < lights_off)\n\n # Which day it is (remember, day goes lights on to lights on)\n df['day'] = pd.DatetimeIndex(\n df['time'] - datetime.datetime.combine(t_min.date(), lights_on)).day \\\n + day_in_the_life - 1\n\n # Sort by fish and zeit\n df = df.sort_values(by=['fish', 'zeit']).reset_index(drop=True)\n\n # Return everything if we don't want to delete anything\n if 'sttime' not in extra_cols:\n usecols.remove('sttime')\n if 'stdate' not in extra_cols:\n usecols.remove('stdate')\n usecols.remove('location')\n\n cols = usecols + ['time', 'fish', 'genotype', 'zeit', 'light', 'day']\n df = df[cols]\n\n # Rename columns\n if rename is not None:\n df = df.rename(columns=rename)\n\n return df\n\n\ndef load_perl_processed_activity(activity_file, df_gt):\n \"\"\"\n Load activity data into tidy DataFrame\n \"\"\"\n df = pd.read_csv(activity_file, delimiter='\\t', comment='#', header=[0, 1])\n\n # Make list of columns (use type conversion to allow list concatenation)\n df.columns = list(df.columns.get_level_values(1)[:2]) \\\n + list(df.columns.get_level_values(0)[2:])\n\n # Columns we want to drop\n cols_to_drop = df.columns[df.columns.str.contains('Unnamed')]\n df = df.drop(cols_to_drop, axis=1)\n\n # Start and end times are also dispensible\n df = df.drop(['start', 'end'], axis=1)\n\n # Find columns to drop (fish that do not have assigned genotypes)\n cols_to_drop = []\n for col in df.columns:\n if 'FISH' in col and int(col.lstrip('FISH')) not in df_gt['fish'].values:\n cols_to_drop.append(col)\n\n # Drop 'em!\n df = df.drop(cols_to_drop, axis=1)\n\n # Add a column for whether or not it is light\n df['light'] = pd.Series(df.CLOCK < 14.0, index=df.index)\n\n # Find where the lights switch from off to on.\n dark_to_light = np.where(np.diff(df['light'].astype(np.int)) == 1)[0]\n\n # Initialize array with day numbers\n day = np.zeros_like(df['light'], dtype=np.int)\n\n # Loop through transitions to set day numbers\n for i in range(len(dark_to_light) - 1):\n day[dark_to_light[i]+1:dark_to_light[i+1]+1] = i + 1\n day[dark_to_light[-1]+1:] = len(dark_to_light)\n\n # Insert the day numnber into DataFrame\n df['day'] = pd.Series(day, index=df.index)\n\n # Build ziet and put it in the DataFrame\n zeit = 24.0 * df['day'] + df['CLOCK']\n df['zeit'] = pd.Series(zeit, index=df.index)\n\n # Build list of genotypes\n genotypes = []\n\n # Check each column, put None for non-FISH column\n for col in df.columns:\n if 'FISH' in col:\n fish_id = int(col.lstrip('FISH'))\n genotypes.append(df_gt.genotype[df_gt.fish==fish_id].iloc[0])\n else:\n genotypes.append(None)\n\n df.columns = pd.MultiIndex.from_arrays((genotypes, df.columns),\n names=['genotype', 'variable'])\n\n # Value variables are the ones with FISH\n col_1 = df.columns.get_level_values(1)\n value_vars = list(df.columns[col_1.str.contains('FISH')])\n\n # ID vars are the non-FISH entries\n id_vars = list(df.columns[~col_1.str.contains('FISH')])\n\n # Perform the melt\n df = pd.melt(df, value_vars=value_vars, id_vars=id_vars,\n value_name='activity', var_name=['genotype', 'fish'])\n\n # Rename any column that is a tuple\n for col in df.columns:\n if type(col) is tuple:\n df.rename(columns={col: col[1]}, inplace=True)\n\n # Make fish IDs integer\n df['fish'] = df['fish'].apply(lambda x: int(x.lstrip('FISH')))\n\n return df\n\n\ndef resample(df, ind_win):\n \"\"\"\n Resample the DataFrame.\n \"\"\"\n # Make a copy so as to leave original unperturbed\n df_in = df.copy()\n\n # Sort the DataFrame by fish and then zeit\n df_in = df_in.sort_values(by=['fish', 'zeit']).reset_index(drop=True)\n\n # If no resampling is necessary\n n_fish = len(df_in.fish.unique())\n if ind_win == 1:\n zeit_ind = list(range(np.sum(df_in.fish==df_in.fish.unique()[0]))) * n_fish\n df_in['zeit_ind'] = zeit_ind\n return df_in\n\n # Make GroupBy object\n df_gb = df_in.groupby('fish')['activity']\n\n # Compute rolling sum\n s = df_gb.rolling(window=ind_win).sum().reset_index(level=0, drop='fish')\n df_in['window'] = s\n\n # Index of right edge of 1st averaging win. (ensure win. ends at lights out)\n light = df_in.loc[df_in.fish==df_in.fish.unique()[0], 'light']\n start_ind = ind_win \\\n + np.where(np.diff(light.values.astype(int)))[0][0] % ind_win\n\n # Inds to keep\n inds = np.array([])\n for fish in df_in.fish.unique():\n start = df_in.loc[df_in.fish==fish, :].index[0] + start_ind\n stop = df_in.loc[df_in.fish==fish, :].index[-1]\n inds = np.concatenate((inds, np.arange(start, stop, ind_win)))\n\n # Zeit indices\n zeit_ind = list(range(int(len(inds) // n_fish))) * n_fish\n\n # New DataFrame\n new_cols = ['fish', 'genotype', 'day', 'light', 'zeit', 'window']\n df_resampled = df_in.loc[inds, new_cols].reset_index(drop=True)\n df_resampled['zeit_ind'] = zeit_ind\n df_resampled = df_resampled.rename(columns={'window': 'activity'})\n\n return df_resampled\n","sub_path":"data_parser.py","file_name":"data_parser.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302686541","text":"import json\r\nfrom imp import reload\r\n\r\nimport requests\r\n\r\nfrom TestPoolModel.model import Operationtable\r\nfrom TestPoolModel.model import db\r\nfrom conf.Setting import *\r\n\r\n\r\nclass GetSession():\r\n\r\n def __init__(self,body):\r\n self.body = body\r\n if self.body.get(\"env\")==\"4\":\r\n self.url = GetSessionProdUrl\r\n if self.body.get(\"env\")==\"2\":\r\n self.url = GetSessionProdUrl_test\r\n if self.body.get(\"env\")==\"3\":\r\n self.url = GetSessionPreUrl\r\n self.db = db\r\n self.data = {\r\n \"token\": \"debug\",\r\n \"userId\": self.body.get(\"u\"),\r\n \"accountId\": accountId,\r\n \"courseId\": self.body.get(\"courseId\"),\r\n \"sectionId\": self.body.get(\"sectionId\"),\r\n \"requestId\": rand,\r\n \"redoNum\": \"1\"\r\n }\r\n self.loger = self.body.get(\"loger\")\r\n self.header = {'Content-Type': 'application/json','grayTestList': \\\r\n 'W3siY29kZSI6IkRZTkFNSUNfVEFSR0VUIiwibmFtZSI6Ilx1ODFlYVx1NTJhOFx1N2I1NFx1Njg0OCJ9LHsiY29kZSI6IlRSQUNJTkdCQUNLIiwibmFtZSI6Ilx1NWY1Mlx1NGUwMFx1NTMxNlx1OGZmZFx1NjgzOVx1NmVhZlx1NmU5MCJ9XQ'}\r\n\r\n def testGetSessionId(self):\r\n loger = self.loger\r\n loger.info(\"获取 sessionId请求的参数为 {}\".format(str(self.data)))\r\n try:\r\n res1 = requests.post(url=self.url, data=json.dumps(self.data), headers=self.header,timeout = 5)\r\n except Exception as e:\r\n loger.error(\"异常为{}\".format(e))\r\n loger.error(\"获取 sessionId响应的参数 {}\".format(res1.text))\r\n Operationtable.query.filter(Operationtable.user_id == self.body.get(\"u\")).update({\r\n Operationtable.status: \"ERROR\",\r\n Operationtable.message: \"getSession失败\",\r\n Operationtable.starttime: int(round(time.time()))\r\n })\r\n self.db.session.commit()\r\n self.db.session.close()\r\n loger.error(\"{} 请求失败,返回状态码是 {},请检查请求参数和 URL \\\r\n \".format(\"获取sessionId \", res1.status_code))\r\n # raise NetWorkRxections(\"获取 sessionId \",res1.status_code)\r\n if json.loads(res1.text).get(\"message\") == \"getSession失败\":\r\n loger.info(\"获取 sessionId响应的参数 {}\".format(res1.text))\r\n Operationtable.query.filter(Operationtable.user_id == self.body.get(\"u\")).update({\r\n Operationtable.status: \"ERROR\",\r\n Operationtable.message: \"getSession失败\",\r\n Operationtable.starttime: int(round(time.time()))\r\n })\r\n self.db.session.commit()\r\n self.db.session.close()\r\n loger.error(\"{} 请求失败,返回状态码是 {},请检查请求参数和 URL \\\r\n \".format(\"获取sessionId \", res1.status_code))\r\n self.loger.info(\"开始获取 sessionID,请求返回状态码为 {} SessionId为:{}\"\\\r\n .format(res1.status_code,json.loads(res1.text)[\"result\"][\"sessionId\"]))\r\n self.loger.info(\"获取 sessionId 完成 ,响应为{}\".format(res1.text))\r\n import logging\r\n reload(logging)\r\n logging.shutdown()\r\n return json.loads(res1.text)[\"result\"][\"sessionId\"]\r\n","sub_path":"app/utils/GetSession.py","file_name":"GetSession.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"179758321","text":"from logging import debug\nfrom flask import Flask,render_template,url_for,request\nimport pandas as pd \nimport pickle\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nimport joblib\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom google.transliteration import transliterate_text\nfrom google_trans_new import google_translator \nimport requests\n\n# loading the models\nclf = pickle.load(open('ngram_vectorizer', 'rb'))\nclf_model = pickle.load(open('sentiment_analyser', 'rb'))\n\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template(\"index.html\")\n\n@app.route('/index', methods=['POST'])\ndef index():\n return render_template(\"index.html\")\n\n#removing stop words\ndef remove_stopwords(review_list):\n stop = stopwords.words('english')\n removed = []\n for review in review_list:\n removed.append(' '.join([word for word in review.split() if word not in stop]))\n return removed\n\n#lemmatizing text\ndef get_lemmatized(review_list):\n lemmatizer = WordNetLemmatizer()\n return [' '.join([lemmatizer.lemmatize(word) for word in review.split()]) for review in review_list]\n\n\n@app.route(\"/submit\", methods=['POST'])\ndef submit():\n if request.method == 'POST':\n message = request.form['message']\n btn_inp = request.form['option']\n msg = message\n if(btn_inp == 'Tamil'):\n #transliterate the input message\n translit_result = transliterate_text(message, lang_code='ta')\n \n #translating the input message [Tamil ------ to ------ English]\n translator = google_translator() \n translate_result = translator.translate(translit_result, lang_src='ta', lang_tgt='en')\n msg = translate_result\n else:\n translit_result = 'None'\n translate_result = 'None'\n review = [msg]\n review = remove_stopwords(review)\n review = get_lemmatized(review)\n my_review = np.array(review)\n my_test_review = clf.transform(my_review)\n my_prediction = clf_model.predict(my_test_review)\n return render_template('result.html', inp_message=message, tamil_msg=translit_result, translated_eng_msg=translate_result, prediction=my_prediction)\n \n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42599591","text":"from collections import OrderedDict\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom utils import *\nfrom matplotlib.colors import ListedColormap\n\ndata = read_binary('differences')\ndata = np.where(data > 0.0001, data, 0)\nfig = plt.figure(figsize=(13, 9))\n# plt.imshow(rotation_angles, extent=(rotation_angles.min(), rotation_angles.max(), rotation_angles.max(), rotation_angles.min()),\n# interpolation='nearest', cmap=cm.gist_rainbow)\n\n\n# colormap\ncmap = mpl.colors.ListedColormap(['black', 'blue', 'green', 'white', 'red'])\nbounds = [0, 0.00032, 0.00033, 0.001, 1, 300]\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n# tell imshow about color map so that only set colors are used\nimg = plt.imshow(data, interpolation='nearest',\n cmap=cmap, norm=norm)\n\nplt.colorbar(img, cmap=cmap,\n norm=norm, boundaries=bounds, ticks=[0, 0.00001, 0.00001, 0.00033, 1, 300])\n\n'''\n#histogram \nrotation_angles =rotation_angles.flatten()\nplt.hist(rotation_angles,bins=20000)\nplt.xscale('log')\nplt.yscale('log')\nplt.show()\n'''\n\n# cmaps\ncmaps = OrderedDict()\n\ncmaps['Perceptually Uniform Sequential'] = [\n 'viridis', 'plasma', 'inferno', 'magma', 'cividis']\ncmaps['Sequential'] = [\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']\ncmaps['Sequential (2)'] = [\n 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',\n 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',\n 'hot', 'afmhot', 'gist_heat', 'copper']\ncmaps['Diverging'] = [\n 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',\n 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']\ncmaps['Cyclic'] = ['twilight', 'twilight_shifted', 'hsv']\ncmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',\n 'Dark2', 'Set1', 'Set2', 'Set3',\n 'tab10', 'tab20', 'tab20b', 'tab20c']\ncmaps['Miscellaneous'] = [\n 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',\n 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg',\n 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar']\n\n\nnrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps.items())\ngradient = np.linspace(0, 1, 256)\ngradient = np.vstack((gradient, gradient))\n\n\ndef plot_color_gradients(cmap_category, cmap_list, nrows):\n fig, axes = plt.subplots(nrows=nrows)\n fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)\n axes[0].set_title(cmap_category + ' colormaps', fontsize=14)\n for ax, name in zip(axes, cmap_list):\n ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))\n pos = list(ax.get_position().bounds)\n x_text = pos[0] - 0.01\n y_text = pos[1] + pos[3]/2.\n fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)\n # Turn off *all* ticks & spines, not just the ones with colormaps.\n for ax in axes:\n ax.set_axis_off()\n\n\nfor cmap_category, cmap_list in cmaps.items():\n plot_color_gradients(cmap_category, cmap_list, nrows)\n\nplt.show()\n","sub_path":"UM_utils/other/colormaps_mpl.py","file_name":"colormaps_mpl.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529336050","text":"import math\nimport numpy as np\n\nfrom ising_model.l1_ise import misc_loss_lik\n\n\nclass L0L2Constrained_ISE:\n def __init__(self, data_train, data_val, W_init=None):\n assert (-1 <= data_train).all() and (data_train <= 1).all()\n self.data_train = data_train\n self.data_val = data_val\n self.best_val_lik = 0\n\n self.N, self.P = data_train.shape\n\n if W_init is not None:\n self.W = W_init\n self.l1_norm = 2 * np.sum(np.abs(W_init), axis=1).max()\n else:\n self.W = np.zeros((self.P, self.P))\n self.l1_norm = 1e3\n\n def estimate_W(\n self,\n validate=False,\n n_steps=20,\n estimate_on_support=True,\n K=None\n ):\n # The continuation heuristic allows us to not tune the regularization parameter\n assert K is not None\n\n # Loop accross nodes\n for pred_idx in range(self.P):\n print(\"\\nNODE {}\".format(pred_idx))\n features_idx = np.array(\n list(range(0, pred_idx)) + list(range(pred_idx + 1, self.P))\n )\n best_beta = None\n best_val_lik = np.inf\n\n # Train data\n X_train = self.data_train[:, features_idx]\n y_train = self.data_train[:, pred_idx]\n beta_start = self.W[pred_idx, features_idx]\n\n # Val data\n X_val = self.data_val[:, features_idx]\n y_val = self.data_val[:, pred_idx]\n\n assert not validate\n alpha_list = [self.l1_norm / math.sqrt(K) + 1e-9]\n\n for alpha in alpha_list:\n beta = train_L0L2_ISE(X_train, y_train, alpha, K, beta_start=beta_start)\n\n if estimate_on_support: # no penalty\n support = np.where(beta != 0)[0]\n beta_support = train_L0L2_ISE(\n X_train[:, support], y_train, np.inf, K, beta_start=beta[support]\n )\n beta = np.zeros(X_train.shape[1])\n beta[support] = beta_support\n\n val_accu, val_loss, val_lik = misc_loss_lik(X_val, y_val, beta)\n print(\n \"Alpha: {}, (Neg. normalized) val lik:{}, val accu: {}\".format(\n round(alpha, 4), round(val_lik, 4), round(val_accu, 4)\n )\n )\n if validate:\n if val_lik < best_val_lik:\n best_beta = beta\n best_val_lik = val_lik\n else:\n best_beta = beta\n best_val_lik = val_lik\n\n self.W[pred_idx, features_idx] = best_beta\n self.best_val_lik += best_val_lik\n\n\ndef train_L0L2_ISE(X, y, alpha, K, eta=1e-3, T_max=300, beta_start=None):\n N, P = X.shape\n old_beta = np.ones(P)\n\n if beta_start is None:\n beta_m = np.zeros(P, dtype=float)\n else:\n beta_m = beta_start\n\n Lipchtiz_coeff = float(np.linalg.norm(X, ord='fro') ** 2)\n\n it = 0\n while np.linalg.norm(beta_m - old_beta) > eta and it < T_max:\n it += 1\n\n aux = y * np.exp(-y * np.dot(X, beta_m))\n gradient = -np.dot(X.T, aux)\n\n # Gradient descent\n old_beta = beta_m.copy()\n grad = beta_m - gradient / Lipchtiz_coeff\n\n # L0 thresholding\n coefs_sorted = np.abs(grad).argsort()[::-1]\n for idx in coefs_sorted[K:]:\n grad[idx] = 0\n beta_m = grad\n\n # L2 projection\n l2_norm = np.linalg.norm(beta_m, 2)\n if l2_norm >= alpha:\n beta_m *= alpha / l2_norm\n return beta_m.astype(np.float32)\n","sub_path":"ising_model/l0_l2constrained_ise.py","file_name":"l0_l2constrained_ise.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"630871840","text":"import os\nfrom datetime import datetime\nimport time\nimport tqdm\nimport pandas as pd\nimport random\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nimport torch\n\n\nclass Preprocess:\n def __init__(self, args):\n self.args = args\n self.train_data = None\n self.valid_data = None\n self.test_data = None\n\n def get_train_data(self):\n return self.train_data\n\n def get_valid_data(self):\n return self.valid_data\n\n def get_test_data(self):\n return self.test_data\n\n def split_data(self, data, ratio=0.9, shuffle=True, seed=0):\n \"\"\"\n split data into two parts with a given ratio.\n \"\"\"\n if shuffle:\n random.seed(seed) # fix to default seed 0\n random.shuffle(data)\n\n size = int(len(data) * ratio)\n data_1 = data[:size]\n data_2 = data[size:]\n\n return data_1, data_2\n\n def __save_labels(self, encoder, name):\n le_path = os.path.join(self.args.asset_dir, name + \"_classes.npy\")\n np.save(le_path, encoder.classes_)\n\n def __preprocessing(self, df, is_train=True):\n cate_cols = [\n \"assessmentItemID\",\n \"testId\",\n \"KnowledgeTag\",\n ]\n\n if not os.path.exists(self.args.asset_dir):\n os.makedirs(self.args.asset_dir)\n\n for col in cate_cols:\n\n le = LabelEncoder()\n label_path = os.path.join(self.args.asset_dir, col + \"_classes.npy\")\n le.classes_ = np.load(label_path)\n\n # df[col] = df[col].apply(lambda x: x if x in le.classes_ else \"unknown\")\n\n # 모든 컬럼이 범주형이라고 가정\n df[col] = df[col].astype(str)\n test = le.transform(df[col])\n df[col] = test\n\n # def convert_time(s):\n # timestamp = time.mktime(\n # datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\").timetuple()\n # )\n # return int(timestamp)\n\n # df[\"Timestamp\"] = df[\"Timestamp\"].apply(convert_time)\n\n return df\n\n def __feature_engineering(self, df):\n # TODO\n return df\n\n def load_data_from_file(self, file_name, is_train=True):\n csv_file_path = os.path.join(self.args.data_dir, file_name)\n\n df = pd.read_csv(csv_file_path) # , nrows=100000)\n df = self.__feature_engineering(df)\n df = self.__preprocessing(df, is_train)\n\n # 추후 feature를 embedding할 시에 embedding_layer의 input 크기를 결정할때 사용\n\n self.args.n_questions = len(\n np.load(os.path.join(self.args.asset_dir, \"assessmentItemID_classes.npy\"))\n )\n self.args.n_test = len(\n np.load(os.path.join(self.args.asset_dir, \"testId_classes.npy\"))\n )\n self.args.n_tag = len(\n np.load(os.path.join(self.args.asset_dir, \"KnowledgeTag_classes.npy\"))\n )\n df = df.sort_values(by=[\"userID\", \"Timestamp\"], axis=0)\n columns = [\n \"userID\",\n \"assessmentItemID\",\n \"testId\",\n \"answerCode\",\n \"KnowledgeTag\",\n \"elapsed\",\n \"Timestamp\",\n \"problem_number\",\n \"test_mean\",\n \"ItemID_mean\",\n \"tag_mean\",\n \"aug_idx\",\n ]\n\n group = (\n df[columns]\n .groupby(\"aug_idx\")\n .apply(\n lambda r: (\n r[\"testId\"].values,\n r[\"assessmentItemID\"].values,\n r[\"KnowledgeTag\"].values,\n r[\"answerCode\"].values,\n r[\"elapsed\"].values,\n r[\"Timestamp\"].values,\n r[\"problem_number\"].values,\n r[\"test_mean\"].values,\n r[\"ItemID_mean\"].values,\n r[\"tag_mean\"].values,\n )\n )\n )\n\n return group.values\n\n def load_test_data_from_file(self, file_name, is_train=True):\n csv_file_path = os.path.join(self.args.data_dir, file_name)\n\n df = pd.read_csv(csv_file_path) # , nrows=100000)\n df = self.__feature_engineering(df)\n df = self.__preprocessing(df, is_train)\n\n # 추후 feature를 embedding할 시에 embedding_layer의 input 크기를 결정할때 사용\n\n self.args.n_questions = len(\n np.load(os.path.join(self.args.asset_dir, \"assessmentItemID_classes.npy\"))\n )\n self.args.n_test = len(\n np.load(os.path.join(self.args.asset_dir, \"testId_classes.npy\"))\n )\n self.args.n_tag = len(\n np.load(os.path.join(self.args.asset_dir, \"KnowledgeTag_classes.npy\"))\n )\n df = df.sort_values(by=[\"userID\", \"Timestamp\"], axis=0)\n columns = [\n \"userID\",\n \"assessmentItemID\",\n \"testId\",\n \"answerCode\",\n \"KnowledgeTag\",\n \"elapsed\",\n \"Timestamp\",\n \"problem_number\",\n \"test_mean\",\n \"ItemID_mean\",\n \"tag_mean\",\n ]\n\n group = (\n df[columns]\n .groupby(\"userID\")\n .apply(\n lambda r: (\n r[\"testId\"].values,\n r[\"assessmentItemID\"].values,\n r[\"KnowledgeTag\"].values,\n r[\"answerCode\"].values,\n r[\"elapsed\"].values,\n r[\"Timestamp\"].values,\n r[\"problem_number\"].values,\n r[\"test_mean\"].values,\n r[\"ItemID_mean\"].values,\n r[\"tag_mean\"].values,\n )\n )\n )\n\n return group.values\n\n def load_train_data(self, file_name):\n # self.train_data, self.valid_data = self.load_data_from_file(file_name)\n self.train_data = self.load_data_from_file(file_name)\n\n def load_valid_data(self, file_name):\n # self.train_data, self.valid_data = self.load_data_from_file(file_name)\n self.valid_data = self.load_data_from_file(file_name)\n\n def load_test_data(self, file_name):\n self.test_data = self.load_test_data_from_file(file_name, is_train=False)\n\n\nclass DKTDataset(torch.utils.data.Dataset):\n def __init__(self, data, args):\n self.data = data\n self.args = args\n self.max_seq = -float(\"inf\")\n\n def __getitem__(self, index):\n row = self.data[index]\n\n # 각 data의 sequence length\n seq_len = len(row[0])\n\n (\n test,\n question,\n tag,\n correct,\n elapsed,\n timestamp,\n problem_number,\n test_mean,\n ItemID_mean,\n tag_mean,\n ) = (\n row[0],\n row[1],\n row[2],\n row[3],\n row[4],\n row[5],\n row[6],\n row[7],\n row[8],\n row[9],\n )\n\n cate_cols = [\n test,\n question,\n tag,\n correct,\n elapsed,\n timestamp,\n problem_number,\n test_mean,\n ItemID_mean,\n tag_mean,\n ]\n\n # max seq len을 고려하여서 이보다 길면 자르고 아닐 경우 그대로 냅둔다\n if seq_len > self.args.max_seq_len:\n self.max_seq = max(self.max_seq, seq_len)\n print(self.max_seq)\n for i, col in enumerate(cate_cols):\n cate_cols[i] = col[-self.args.max_seq_len :]\n mask = np.ones(self.args.max_seq_len, dtype=np.int16)\n else:\n\n mask = np.zeros(self.args.max_seq_len, dtype=np.int16)\n mask[-seq_len:] = 1\n\n # mask도 columns 목록에 포함시킴\n cate_cols.append(mask)\n\n # np.array -> torch.tensor 형변환\n for i, col in enumerate(cate_cols):\n cate_cols[i] = torch.tensor(col)\n\n return cate_cols\n\n def __len__(self):\n return len(self.data)\n\n\nfrom torch.nn.utils.rnn import pad_sequence\n\n\ndef collate(batch):\n col_n = len(batch[0])\n col_list = [[] for _ in range(col_n)]\n max_seq_len = len(batch[0][-1])\n\n # batch의 값들을 각 column끼리 그룹화\n for row in batch:\n for i, col in enumerate(row):\n pre_padded = torch.zeros(max_seq_len)\n pre_padded[-len(col) :] = col\n col_list[i].append(pre_padded)\n\n for i, _ in enumerate(col_list):\n col_list[i] = torch.stack(col_list[i])\n\n return tuple(col_list)\n\n\ndef get_loaders(args, train, valid):\n\n pin_memory = True\n train_loader, valid_loader = None, None\n\n if train is not None:\n trainset = DKTDataset(train, args)\n train_loader = torch.utils.data.DataLoader(\n trainset,\n num_workers=args.num_workers,\n shuffle=True,\n batch_size=args.batch_size,\n pin_memory=pin_memory,\n collate_fn=collate,\n )\n if valid is not None:\n valset = DKTDataset(valid, args)\n valid_loader = torch.utils.data.DataLoader(\n valset,\n num_workers=args.num_workers,\n shuffle=False,\n batch_size=args.batch_size,\n pin_memory=pin_memory,\n collate_fn=collate,\n )\n\n return train_loader, valid_loader\n","sub_path":"keonwoo/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":9375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590807260","text":"\"\"\"Encontrar números primos é uma tarefa difícil. Faça um programa que gera uma lista dos números primos existentes entre 1 e um número inteiro informado pelo usuário.\"\"\"\r\nprimos = []\r\nnumero = int(input(\"Informe um valor: \"))\r\nlista = []\r\nfor x in range (numero, 0, -1):\r\n lista.append(x)\r\nfor x in lista:\r\n cont = 0\r\n for y in range (x, 0, -1):\r\n if x % y == 0:\r\n cont += 1\r\n if cont == 2:\r\n primos.append(x)\r\nprint(primos)","sub_path":"ferias075.py","file_name":"ferias075.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91142689","text":"\"\"\"@package color trackbar program\nDocumentation for this module.\n\nThis program as the objective of identifying colors(HSV) in an image using trackbars and the mouse coursor.\n\"\"\"\nimport cv2 as cv\nimport numpy as np\nimport time\n\n\ndef nothing(x):\n \"\"\"\n A required callback method that goes into the trackbar function.\n \"\"\"\n pass\n\ndef resize(img,fx,fy):\n \"\"\"\n resize function\n @param [in]\n @param [in]\n @param [out]\n \"\"\"\n height, width = img.shape[:2]\n size = (int(width * fx), int(height * fy)) # bgr\n img = cv.resize(img, size)\n\n return img\n\ndef getposHsv(event,x,y,flags,param):\n \"\"\"\n function to transform the event from the mouse into a variation in the image and trackbar. \n The variation is [-10,+10] from the original value.\n @param [in] event\n @param [in] x : x value returned from the mouse click\n @param [in] y : y value returned from the mouse click\n @param [in] param\n \"\"\"\n\n global hsv_ret,color_h,color_s,color_v \n global l_h,u_h,l_v,u_v,u_s,l_s\n \n if event==cv.EVENT_LBUTTONDOWN:\n hsv_ret = hsv[y,x]\n color_h = hsv[y,x,0]\n color_s = hsv[y,x,1]\n color_v = hsv[y,x,2]\n l_h = color_h - 10\n u_h = color_h + 10\n l_s = color_s - 10\n u_s = color_s + 10\n l_v = color_v - 10\n u_v = color_v + 10\n\n\n l_h = cv.setTrackbarPos(\"L - H\", \"Trackbars\",l_h)\n l_s = cv.setTrackbarPos(\"L - S\", \"Trackbars\",l_s)\n l_v = cv.setTrackbarPos(\"L - V\", \"Trackbars\",l_v)\n u_h = cv.setTrackbarPos(\"U - H\", \"Trackbars\",u_h)\n u_s = cv.setTrackbarPos(\"U - S\", \"Trackbars\",u_s)\n u_v = cv.setTrackbarPos(\"U - V\", \"Trackbars\",u_v)\n\n\n# import the image\nframe = cv.imread(\"/home/alegria/VCI/OPENCV_COURSE/vciproj3/dataset2/rect/IMG_20210412_173344.jpg\")\n\n# resize function\nframe = resize(frame,0.2,0.2)\n\n# Create a window named trackbars.\ncv.namedWindow(\"Trackbars\")\n\n# Now create 6 trackbars that will control the lower and upper range of \n# H,S and V channels. The Arguments are like this: Name of trackbar, \n# window name, range,callback function. For Hue the range is 0-179 and\n# for S,V its 0-255.\ncv.createTrackbar(\"L - H\", \"Trackbars\", 0, 179, nothing)\ncv.createTrackbar(\"L - S\", \"Trackbars\", 0, 255, nothing)\ncv.createTrackbar(\"L - V\", \"Trackbars\", 0, 255, nothing)\ncv.createTrackbar(\"U - H\", \"Trackbars\", 179, 179, nothing)\ncv.createTrackbar(\"U - S\", \"Trackbars\", 255, 255, nothing)\ncv.createTrackbar(\"U - V\", \"Trackbars\", 255, 255, nothing)\n\n# Convert the BGR image to HSV image.\ncv.setMouseCallback(\"Trackbars\",getposHsv)\n\nwhile True:\n\n # Convert the BGR image to HSV image.\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n\n # Get the new values of the trackbar in real time as the user changes \n # them\n l_h = cv.getTrackbarPos(\"L - H\", \"Trackbars\")\n l_s = cv.getTrackbarPos(\"L - S\", \"Trackbars\")\n l_v = cv.getTrackbarPos(\"L - V\", \"Trackbars\")\n u_h = cv.getTrackbarPos(\"U - H\", \"Trackbars\")\n u_s = cv.getTrackbarPos(\"U - S\", \"Trackbars\")\n u_v = cv.getTrackbarPos(\"U - V\", \"Trackbars\")\n \n # Set the lower and upper HSV range according to the value selected\n # by the trackbar\n lower_range = np.array([l_h, l_s, l_v])\n upper_range = np.array([u_h, u_s, u_v])\n \n # Filter the image and get the binary mask, where white represents \n # your target color\n mask = cv.inRange(hsv, lower_range, upper_range)\n\n # You can also visualize the real part of the target color (Optional)\n res = cv.bitwise_and(frame, frame, mask=mask)\n\n\n # load background (could be an image too)\n bk = np.full(frame.shape, 255, dtype=np.uint8) # white bk\n\n # get masked background, mask must be inverted \n mask = cv.bitwise_not(mask)\n bk_masked = cv.bitwise_and(bk, bk, mask=mask)\n\n # combine masked foreground and masked background \n final = cv.bitwise_or(res, bk_masked)\n \n mask = cv.bitwise_not(mask) # revert mask to original\n\n\n # Converting the binary mask to 3 channel image, this is just so \n # we can stack it with the others\n mask_3 = cv.cvtColor(mask, cv.COLOR_GRAY2BGR)\n \n # stack the mask, orginal frame and the filtered result\n stacked = np.hstack((mask_3,res,final))\n \n # Show this stacked frame at 40% of the size.\n cv.imshow(\"Trackbars\",frame)\n cv.imshow(\"stacked\",stacked)\n\n # If the user presses ESC then exit the program\n key = cv.waitKey(1)\n if key == 27:\n break\n\n \n # # If the user presses `s` then print this array.\n # if key == ord('s'):\n \n # thearray = [[l_h,l_s,l_v],[u_h, u_s, u_v]]\n # print(thearray)\n \n # # Also save this array as penval.npy\n # np.savetxt('hsv_value',thearray)\n # break\n\n \n \ncv.destroyAllWindows()","sub_path":"Main Code/1 Iteration/color_trackbar_ex.py","file_name":"color_trackbar_ex.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290869688","text":"import asyncio\n\nfrom aiohttp import web\nfrom webargs import fields\nfrom webargs.aiohttpparser import use_kwargs, parser\n\n# Demonstrate basic implementation of args\nhello_args = {\"name\": fields.Str(missing=\"World\")}\n\n# Demonstrate how to use default values\n# And how to specify type for arguments.\nage_args = {\"age\": fields.Integer(required=True),\n \"number\": fields.Integer(missing=1)}\n\n# No decorator\nasync def hello(request):\n args = await parser.parse(hello_args, request)\n return web.Response(body=\"Hello, {}\".format(args['name']).encode(\"utf-8\"))\n\n# Decorator with coroutine\n@asyncio.coroutine\n@use_kwargs(hello_args)\ndef coro_hello(request, name):\n return web.Response(body=\"Hello, {}\".format(name).encode(\"utf-8\"))\n\n# Decorator with async def\n@use_kwargs(hello_args)\nasync def async_hello(request, name):\n return web.Response(body=\"Hello, {}\".format(name).encode(\"utf-8\"))\n\n\n@asyncio.coroutine\n@use_kwargs(age_args)\ndef coro_age(request, age, number):\n magic_number = age * number\n return web.Response(body=\"You're age * number is {}\".format(magic_number).encode(\"utf-8\"))\n\napp = web.Application()\napp.add_routes([web.get('/hello', hello),\n web.get('/async_hello', async_hello),\n web.get('/coro_hello', coro_hello),\n web.get('/coro_age', coro_age)])\n\nweb.run_app(app)","sub_path":"aiohttp-webargs-demo.py","file_name":"aiohttp-webargs-demo.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"175643141","text":"class Solution(object):\n def reverse(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n str_x=str(x)\n \n # the number is positive\n if '-' not in str_x:\n ans=int(str_x[::-1])\n \n # if the number is negative\n else:\n str_x=str_x.replace('-', '')\n ans=int(str_x[::-1])*-1\n \n # if the answer is smaller than the 32-bit number\n # and greater than and equare to the negative 32-bit number\n # else means more than 32 bits\n return ans if ans<2147483648 and ans>=-2147483648 else 0","sub_path":"reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520474540","text":"#!/usr/bin/python\n\nimport sys\nfrom optparse import OptionParser\n\ndef predict(x,y):\n if y< -4*x + 0.85:\n return 1\n else:\n return 2\n \n \ndef main():\n parser = OptionParser()\n parser.add_option(\"-i\", \"--input\", dest=\"input\",\n help=\"input filename\", metavar=\"FILE\")\n parser.add_option(\"-o\", \"--output\", dest=\"output\",\n help=\"output filename\", metavar=\"FILE\")\n parser.add_option(\"-c\", \"--class_name\", dest=\"class_name\",\n help=\"class filename\", metavar=\"FILE\")\n parser.add_option(\"-r\", action=\"store_true\", dest=\"coords\",help=\"print coordinates on pca plane\",default=True)\n parser.add_option(\"-n\", action=\"store_false\", dest=\"coords\",help=\"print image numbers\")\n parser.add_option(\"-p\", action=\"store_true\", dest=\"predict\",help=\"predict unknown points\",default=False)\n\n (parsed_options, parsed_args) = parser.parse_args()\n\n if parsed_options.input: # if filename is given\n in_file = open(parsed_options.input,'r')\n else:\n in_file = sys.stdin\n\n if parsed_options.output:\n out_file = open(parsed_options.output,'w')\n else:\n out_file = sys.stdout\n\n if not parsed_options.class_name: # if filename is not given\n parser.error('Class filename not given')\n\n print_coords = parsed_options.coords\n predict_unknown = parsed_options.predict\n\n classes = open(parsed_options.class_name)\n\n class_list = []\n for line in classes:\n class_list.append(int(line.split()[-1]))\n stats = [0,0,0,0,0,0]\n for idx,line in enumerate(in_file):\n x = float(line.split()[0])\n y = float(line.split()[1])\n\n # if len(line.split())>2:\n # n = int(line.split()[2])\n # else:\n n = int(line.split()[-1])\n if n > len(class_list):\n cl = predict(x,y) if predict_unknown else 0\n cl = 0\n else:\n cl = class_list[n-1]\n # if cl<0:\n # continue\n # if x<-5000:\n # cl=2\n # else:\n # cl=1\n stats[cl]+=1\n out_file.write(\"%s\\t%d\\n\" %(\"\\t\".join(line.split()),cl))\n continue\n # print idx,cl # Type and image number\n # if print_coords:\n # out_file.write(\"%f %f %d %d\\n\" % (x,y,n+1,cl)) # PCA result and type \n # else:\n # out_file.write(\"%d %d\\n\" %(n+1,cl))\n \n sys.stderr.write('%d %d %d %d %d %d\\n' % (stats[0],stats[1],stats[2],stats[3],stats[4],stats[5]))\n\n out_file.close()\n in_file.close()\n classes.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"257461347","text":"import RPi.GPIO as GPIO\nimport picamera\nimport time\nimport os\n\nshutter_button = 13\npower_button = 19\n\n#setup GPIO using Board numbering\nGPIO.setmode(GPIO.BOARD)\n\n# Configure GPIO pins pullup resistors\nGPIO.setup(shutter_button, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(power_button, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# Start camera\ncamera = picamera.PiCamera()\n\n# Configure camera\ncamera.resolution = (1920, 1080)\n# camera.hflip = True\n# camera.vflip = True\n# camera.sharpness = 0\n# camera.contrast = 0\n# camera.brightness = 50\n# camera.saturation = 0\n# camera.ISO = 0\n# camera.video_stabilization = False\n# camera.exposure_compensation = 0\n# camera.exposure_mode = 'auto'\n# camera.meter_mode = 'average'\n# camera.awb_mode = 'auto'\n# camera.image_effect = 'none'\n# camera.color_effects = None\n# camera.rotation = 0\n# camera.crop = (0.0, 0.0, 1.0, 1.0)\n\ntime_last = time.time()\n\ndef shutter_pressed(channel):\n \"\"\" Detect shutter button pressed \"\"\"\n global camera\n global time_last\n # Record time now\n time_now = time.time()\n time_diff = time_now - time_last\n # Only take photo if last photo taken over a second ago\n if time_diff >= 1.0:\n time_last = time_now\n # Sleep a very short time to allow shutters to open\n time.sleep(0.8)\n folder = 'images/'\n #time_str = time.strftime(\"%d-%m-%Y_%H-%M-%S\", time.gmtime())\n time_str = time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\n filename = folder + time_str + '.jpg'\n camera.capture(filename)\n\n\ndef power_off(channel):\n \"\"\" Detect power off button \"\"\"\n os.system(\"sudo shutdown -h now\")\n\n\n# Configure GPIO Event for power button\nGPIO.add_event_detect(shutter_button, GPIO.RISING, callback=shutter_pressed, bouncetime=300)\nGPIO.add_event_detect(power_button, GPIO.RISING, callback=power_off, bouncetime=300)\n\ntry:\n while True:\n # Do nothing whilst we wait for button events\n time.sleep(0.5)\nexcept:\n print(\"Handle Errors\")\nfinally:\n GPIO.cleanup()\n","sub_path":"retrocam.py","file_name":"retrocam.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"308290456","text":"#!/usr/bin/env python3\n\n# kircbot.py, part of kIRCbot\n# Copyright (C) 2017 : kikadf <kikadf.01@gmail.com>\n# Based on ircecho.py\n#\n# ircecho.py\n# Copyright (C) 2011 : Robert L Szkutak II - http://robertszkutak.com\n#\n# More copyright references: https://gist.github.com/GarrettSocling/371917661f98c6c54beea49de94ce1d9\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport commonevents as ce\n\n\nreadbuffer = \"\"\n\n\nce.conns()\n\nwhile 1:\n readbuffer = readbuffer + ce.s.recv(1024).decode(\"UTF-8\")\n temp = str.split(readbuffer, \"\\n\")\n readbuffer = temp.pop( )\n\n if ce.checkconnected(ce.lasttime) > 500:\n ce.restart()\n\n for line in temp:\n print(line)\n line = str.rstrip(line)\n line = str.split(line)\n\n if(line[0] == \"PING\"):\n ce.pong(line[1])\n\n if(line[1] == \"PRIVMSG\"):\n ce.activation()\n if(line[3].strip(\":\") == ce.NICK):\n calledevent = \"42\"\n _who = ce.defsender(line[0])\n if (ce.checkarg(line, 4) is True):\n calledevent = line[4]\n if(calledevent in ce.events and _who in ce.MASTER):\n ce.eventhandler(calledevent, line[5:])\n else:\n ce.message(\"WTF?\")\n\n","sub_path":"kircbot.py","file_name":"kircbot.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"240725951","text":"from . import DATA_DIR\nimport pandas as pd\nimport xarray as xr\nimport numpy as np\nfrom pathlib import Path\nimport csv\n\nREMIND_ELEC_MARKETS = (DATA_DIR / \"remind_electricity_markets.csv\")\nREMIND_ELEC_EFFICIENCIES = (DATA_DIR / \"remind_electricity_efficiencies.csv\")\nREMIND_ELEC_EMISSIONS = (DATA_DIR / \"remind_electricity_emissions.csv\")\nGAINS_TO_REMIND_FILEPATH = (DATA_DIR / \"GAINStoREMINDtechmap.csv\")\n\n\nclass RemindDataCollection:\n \"\"\"\n Class that extracts data from REMIND output files.\n\n :ivar scenario: name of a Remind scenario\n :vartype scenario: str\n\n \"\"\"\n\n def __init__(self, scenario, year, filepath_remind_files):\n self.scenario = scenario\n self.year = year\n self.filepath_remind_files = filepath_remind_files\n self.data = self.get_remind_data()\n self.gains_data = self.get_gains_data()\n self.electricity_market_labels = self.get_remind_electricity_market_labels()\n self.electricity_efficiency_labels = (\n self.get_remind_electricity_efficiency_labels()\n )\n self.electricity_emission_labels = self.get_remind_electricity_emission_labels()\n self.rev_electricity_market_labels = self.get_rev_electricity_market_labels()\n self.rev_electricity_efficiency_labels = (\n self.get_rev_electricity_efficiency_labels()\n )\n self.electricity_markets = self.get_remind_electricity_markets()\n self.electricity_efficiencies = self.get_remind_electricity_efficiencies()\n self.electricity_emissions = self.get_remind_electricity_emissions()\n\n def get_remind_electricity_emission_labels(self):\n \"\"\"\n Loads a csv file into a dictionary. This dictionary contains labels of electricity emissions\n in Remind.\n\n :return: dictionary that contains emission names equivalence\n :rtype: dict\n \"\"\"\n with open(REMIND_ELEC_EMISSIONS) as f:\n return dict(filter(None, csv.reader(f, delimiter=\";\")))\n\n def get_remind_electricity_market_labels(self):\n \"\"\"\n Loads a csv file into a dictionary. This dictionary contains labels of electricity markets\n in Remind.\n\n :return: dictionary that contains market names equivalence\n :rtype: dict\n \"\"\"\n with open(REMIND_ELEC_MARKETS) as f:\n return dict(filter(None, csv.reader(f, delimiter=\";\")))\n\n def get_remind_electricity_efficiency_labels(self):\n \"\"\"\n Loads a csv file into a dictionary. This dictionary contains labels of electricity technologies efficiency\n in Remind.\n\n :return: dictionary that contains market names equivalence\n :rtype: dict\n \"\"\"\n with open(REMIND_ELEC_EFFICIENCIES) as f:\n return dict(filter(None, csv.reader(f, delimiter=\";\")))\n\n def get_rev_electricity_market_labels(self):\n return {v: k for k, v in self.electricity_market_labels.items()}\n\n def get_rev_electricity_efficiency_labels(self):\n return {v: k for k, v in self.electricity_efficiency_labels.items()}\n\n def get_remind_data(self):\n \"\"\"\n Read the REMIND csv result file and return an `xarray` with dimensions:\n * region\n * variable\n * year\n\n :return: an multi-dimensional array with Remind data\n :rtype: xarray.core.dataarray.DataArray\n\n \"\"\"\n\n filename = self.scenario + \".mif\"\n\n filepath = Path(self.filepath_remind_files) / filename\n df = pd.read_csv(\n filepath, sep=\";\", index_col=[\"Region\", \"Variable\", \"Unit\"]\n ).drop(columns=[\"Model\", \"Scenario\", \"Unnamed: 24\"])\n df.columns = df.columns.astype(int)\n\n # Filter the dataframe\n df = df.loc[\n (df.index.get_level_values(\"Variable\").str.contains(\"SE\"))\n | (df.index.get_level_values(\"Variable\").str.contains(\"Tech\"))\n ]\n variables = df.index.get_level_values(\"Variable\").unique()\n\n regions = df.index.get_level_values(\"Region\").unique()\n years = df.columns\n array = xr.DataArray(\n np.zeros((len(variables), len(regions), len(years), 1)),\n coords=[variables, regions, years, np.arange(1)],\n dims=[\"variable\", \"region\", \"year\", \"value\"],\n )\n for r in regions:\n val = df.loc[(df.index.get_level_values(\"Region\") == r), :]\n array.loc[dict(region=r, value=0)] = val\n\n return array\n\n def get_gains_data(self):\n \"\"\"\n Read the GAINS emissions csv file and return an `xarray` with dimensions:\n * region\n * pollutant\n * sector\n * year\n\n :return: an multi-dimensional array with GAINS emissions data\n :rtype: xarray.core.dataarray.DataArray\n\n \"\"\"\n filename = \"GAINS emission factors.csv\"\n filepath = Path(self.filepath_remind_files) / filename\n\n gains_emi = pd.read_csv(\n filepath,\n skiprows=4,\n names=[\"year\", \"region\", \"GAINS\", \"pollutant\", \"scenario\", \"factor\"],\n )\n gains_emi[\"unit\"] = \"Mt/TWa\"\n gains_emi = gains_emi[gains_emi.scenario == \"SSP2\"]\n\n sector_mapping = pd.read_csv(GAINS_TO_REMIND_FILEPATH).drop(\n [\"noef\", \"elasticity\"], axis=1\n )\n\n gains_emi = (\n gains_emi.join(sector_mapping.set_index(\"GAINS\"), on=\"GAINS\")\n .dropna()\n .drop([\"scenario\", \"REMIND\"], axis=1)\n .pivot_table(\n index=[\"region\", \"GAINS\", \"pollutant\", \"unit\"],\n values=\"factor\",\n columns=\"year\",\n )\n )\n\n regions = gains_emi.index.get_level_values(\"region\").unique()\n years = gains_emi.columns.values\n pollutants = gains_emi.index.get_level_values(\"pollutant\").unique()\n sectors = gains_emi.index.get_level_values(\"GAINS\").unique()\n\n array = xr.DataArray(\n np.zeros((len(pollutants), len(sectors), len(regions), len(years), 1)),\n coords=[pollutants, sectors, regions, years, np.arange(1)],\n dims=[\"pollutant\", \"sector\", \"region\", \"year\", \"value\"],\n )\n for r in regions:\n for s in sectors:\n val = gains_emi.loc[\n (gains_emi.index.get_level_values(\"region\") == r)\n & (gains_emi.index.get_level_values(\"GAINS\") == s),\n :,\n ]\n array.loc[dict(region=r, sector=s, value=0)] = val\n\n return array / 8760 # per TWha --> per TWh\n\n def get_remind_electricity_markets(self, drop_hydrogen=True):\n \"\"\"\n This method retrieves the market share for each electricity-producing technology, for a specified year,\n for each region provided by REMIND.\n Electricity production from hydrogen can be removed from the mix (unless specified, it is removed).\n\n :param drop_hydrogen: removes hydrogen from the region-specific electricity mix if `True`.\n :type drop_hydrogen: bool\n :return: an multi-dimensional array with electricity technologies market share for a given year, for all regions.\n :rtype: xarray.core.dataarray.DataArray\n\n \"\"\"\n # If hydrogen is not to be considered, it is removed from the technologies labels list\n if drop_hydrogen:\n list_technologies = [\n l\n for l in list(self.electricity_market_labels.values())\n if \"Hydrogen\" not in l\n ]\n else:\n list_technologies = list(self.electricity_market_labels.values())\n\n # If the year specified is not contained within the range of years given by REMIND\n if (\n self.year < self.data.year.values.min()\n or self.year > self.data.year.values.max()\n ):\n raise KeyError(\"year not valid, must be between 2005 and 2150\")\n\n # Otherwise, if the year specified corresponds exactly to a year given by REMIND\n elif self.year in self.data.coords[\"year\"]:\n # The contribution of each technology, for a specified year, for a specified region is normalized to 1.\n return self.data.loc[list_technologies, :, self.year] / self.data.loc[\n list_technologies, :, self.year\n ].groupby(\"region\").sum(axis=0)\n\n # Finally, if the specified year falls in between two periods provided by REMIND\n else:\n # Interpolation between two periods\n data_to_interp_from = self.data.loc[\n list_technologies, :, :\n ] / self.data.loc[list_technologies, :, :].groupby(\"region\").sum(axis=0)\n return data_to_interp_from.interp(year=self.year)\n\n def get_remind_electricity_efficiencies(self, drop_hydrogen=True):\n \"\"\"\n This method retrieves efficiency values for electricity-producing technology, for a specified year,\n for each region provided by REMIND.\n Electricity production from hydrogen can be removed from the mix (unless specified, it is removed).\n\n :param drop_hydrogen: removes hydrogen from the region-specific electricity mix if `True`.\n :type drop_hydrogen: bool\n :return: an multi-dimensional array with electricity technologies market share for a given year, for all regions.\n :rtype: xarray.core.dataarray.DataArray\n\n \"\"\"\n # If hydrogen is not to be considered, it is removed from the technologies labels list\n if drop_hydrogen:\n list_technologies = [\n l\n for l in list(self.electricity_efficiency_labels.values())\n if \"Hydrogen\" not in l\n ]\n else:\n list_technologies = list(self.electricity_efficiency_labels.values())\n\n # If the year specified is not contained within the range of years given by REMIND\n if (\n self.year < self.data.year.values.min()\n or self.year > self.data.year.values.max()\n ):\n raise KeyError(\"year not valid, must be between 2005 and 2150\")\n\n # Otherwise, if the year specified corresponds exactly to a year given by REMIND\n elif self.year in self.data.coords[\"year\"]:\n # The contribution of each technologies, for a specified year, for a specified region is normalized to 1.\n return (\n self.data.loc[list_technologies, :, self.year] / 100\n ) # Percentage to ratio\n\n # Finally, if the specified year falls in between two periods provided by REMIND\n else:\n # Interpolation between two periods\n data_to_interp_from = self.data.loc[list_technologies, :, :]\n return (\n data_to_interp_from.interp(year=self.year) / 100\n ) # Percentage to ratio\n\n def get_remind_electricity_emissions(self):\n \"\"\"\n This method retrieves emission values for electricity-producing technology, for a specified year,\n for each region provided by REMIND.\n\n :return: an multi-dimensional array with emissions for different technologies for a given year, for all regions.\n :rtype: xarray.core.dataarray.DataArray\n\n \"\"\"\n # If the year specified is not contained within the range of years given by REMIND\n if (\n self.year < self.gains_data.year.values.min()\n or self.year > self.gains_data.year.values.max()\n ):\n raise KeyError(\"year not valid, must be between 2005 and 2150\")\n\n # Otherwise, if the year specified corresponds exactly to a year given by REMIND\n elif self.year in self.gains_data.coords[\"year\"]:\n # The contribution of each technologies, for a specified year, for a specified region is normalized to 1.\n return self.gains_data.loc[dict(year=self.year, value=0)]\n\n # Finally, if the specified year falls in between two periods provided by REMIND\n else:\n # Interpolation between two periods\n return self.gains_data.loc[dict(value=0)].interp(year=self.year)\n","sub_path":"rmnd_lca/data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":12061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506503579","text":"#!/usr/bin/env python3\n\nfrom pyVoiceText import VoiceText\n\ndef voicetext(textfile, outfile, speaker, speed):\n vt = VoiceText(open('./key.txt', 'r').read(16))\n wave = vt.fetch(\n text=open(textfile, 'r').read(),\n speaker=speaker,\n emotion_level=1,\n pitch=100,\n speed=speed,\n volume=100)\n vt.save(wave, outfile)\n\n\nif __name__ == \"__main__\":\n import sys\n argv = sys.argv\n if argv[3] == 'h':\n speaker='hikari'\n else:\n# speaker='show'\n speaker='takeru'\n speed=100\n print('VoiceText start: ', argv[1])\n voicetext(argv[1], argv[2], speaker, speed)\n print('VoiceText finish: ', argv[1])\n","sub_path":"voicetext.py","file_name":"voicetext.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"566500647","text":"#!/usr/bin/env python\n\nimport collections\nimport json\nimport os\nimport subprocess\nimport sys\n\n\nclass Vhosts(collections.Mapping):\n def __init__(self, vhosts_pl_path=\"/data/vhosts.pl\"):\n self.vhosts = self._parse_vhosts_pl(vhosts_pl_path)\n # Special additions\n self.vhosts['mx0.mysociety.org'] = {\n 'ssl_group': 'Mail',\n 'servers': ['bittern', 'starling'],\n 'aliases': ['mx1.mysociety.org', 'mx0.ukcod.org.uk', 'mx1.ukcod.org.uk', 'smtp.mysociety.org']\n }\n self.vhosts['git.mysociety.org'] = {\n 'ssl_group': 'mySociety',\n 'servers': ['leopard', 'panther', 'kingfisher', 'raven'],\n 'aliases': ['debian.mysociety.org', 'nagios.mysociety.org', 'nagios-external.mysociety.org',\n 'git.mysociety.org', 'icinga.mysociety.org', 'icinga-external.mysociety.org', \n 'grafana.mysociety.org', 'secure.mysociety.org'\n ]\n }\n self.vhosts['mysociety.org'] = {\n 'ssl_group': 'mySociety.external',\n 'servers': ['mslb001hexa', 'mslb001sova'],\n 'aliases': [\n 'mysociety.org',\n 'mysociety.org.uk',\n 'www.mysociety.org.uk',\n 'mysociety.uk',\n 'www.mysociety.uk',\n 'democracy.co.uk',\n 'www.democracy.co.uk',\n 'democracy.org.uk',\n 'www.democracy.org.uk'\n ]\n }\n self.vhosts['mysocietyemergency.org'] = {\n 'servers': ['emergency'],\n 'aliases': ['www.mysocietyemergency.org']\n }\n\n self.vhosts['parlvid.mysociety.org'] = {\n 'servers': ['starling']\n }\n\n def __getitem__(self, key):\n vhost = self.vhosts[key]\n vhost['domains'] = self._get_vhost_domains(key, vhost)\n return vhost\n\n def __len__(self):\n return len(self.vhosts)\n\n def __iter__(self):\n return iter(self.vhosts)\n\n def _get_vhost_domains(self, vhost_name, data):\n # Determine CN and SAN names\n aliases = data.get('aliases', [])\n redirects = data.get('redirects', [])\n ignore = data.get('https_ignore', [])\n if not isinstance(ignore, list):\n ignore = [ignore]\n\n dns_names = set()\n dns_names.add(vhost_name)\n dns_names.update(aliases)\n dns_names.update(redirects)\n\n if vhost_name in redirects:\n cn = aliases[0]\n else:\n cn = vhost_name\n dns_names.remove(cn)\n dns_names -= set(ignore)\n\n if cn in ignore:\n ssl_dns_names = sorted(list(dns_names))\n else:\n ssl_dns_names = [cn] + sorted(list(dns_names))\n\n return ssl_dns_names\n\n def _parse_vhosts_pl_section(self, vhosts_pl_path, section):\n return json.loads(subprocess.check_output([\n 'perl', '-e', 'use JSON; require \"' + vhosts_pl_path + '\"; print encode_json($' + section + ');'\n ]).decode('utf-8'))\n\n def _parse_vhosts_pl(self, vhosts_pl_path):\n vhosts = self._parse_vhosts_pl_section(vhosts_pl_path, 'vhosts')\n return vhosts\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"627316009","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport uuid\r\nfrom kernel.helpers import image_resize\r\nfrom kernel.validator import Validator\r\nimport os\r\n\r\n__author__ = 'mr.S'\r\n\r\nfrom kernel.server import app, files_dir\r\nfrom bottle import jinja2_view as view, request\r\n\r\n\r\n@app.route(\"/admin\")\r\n@view('admin/index')\r\ndef admin():\r\n return {}\r\n\r\n@app.post('/upload/image')\r\ndef save_object():\r\n if not request.user.role('admin'): return False\r\n data = request.forms\r\n data['image'] = request.files.get('image')\r\n v = Validator(data)\r\n v.field(\"image\").image()\r\n img = data.get(\"image\")\r\n\r\n if v.is_valid() and img is not None:\r\n path = os.path.abspath(files_dir+\"/upload/\")\r\n image_name = 'image.'+str(uuid.uuid4())+\".png\"\r\n if not os.path.exists(path): os.makedirs(path)\r\n\r\n image_path = os.path.join(path, image_name)\r\n\r\n image = image_resize(img)\r\n image.save(image_path)\r\n\r\n return {\"status\": \"ok\",\r\n \"url\": '/file/upload/'+image_name,\r\n \"width\": image.size[0],\r\n \"height\": image.size[1]\r\n }\r\n else:\r\n return {\"status\": \"fail\",\r\n \"errors\": v.errors}","sub_path":"modules/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"494715715","text":"import sys\n\nSRC_EPSILON = \"@@@@\"\nTRG_EPSILON = \"@@@\"\n\nsrc_f = sys.argv[1]\ntrg_f = sys.argv[2]\n\nsrc_o = open(src_f, 'r')\ntrg_o = open(trg_f, 'r')\n\nsrc = src_o.readlines()\ntrg = trg_o.readlines()\n\nfor i in range(len(src)):\n s = src[i].split()[-1]\n t = trg[i].split()[-1]\n if ((s == SRC_EPSILON and t == TRG_EPSILON)\n or (t == SRC_EPSILON and s == SRC_EPSILON)):\n print(i)\n print('\\t'+src[i])\n print('\\t'+trg[i])\n","sub_path":"scripts/find_unnecessary_epsilon_pads.py","file_name":"find_unnecessary_epsilon_pads.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645298625","text":"#packages\nimport pygame as pg\nimport random as r\nimport tkinter as t\nimport winsound as w\nfrom playsound import playsound as ps\nfrom pydub import AudioSegment as aus\nfrom pydub.playback import play as p\n\npg.init()\n#pg.font.init()\n\n#colors\nwhite = (255,255,255)\nblack = (0,0,0)\nred = (255,0,0)\ngreen = (54,198,83)\nblue = (30,144,255)\ngrey = (128,128,128)\nyellow = (255,255,0)\n\n#ui variables\ngame_caption = \"snake2\"\nscreen_widgth = 1000\nscreen_height = 500\nbox_width = 990\nbox_height = 400\nsnake_size = 10\nfood_size = 10\nspeed = 5\n\ngamewindow = pg.display.set_mode((screen_widgth,screen_height))\nboxwindow = pg.display.set_mode((box_width,box_height))\npg.display.set_caption(game_caption)\npg.display.update()\n\nclock = pg.time.Clock()\nfps = 30\n\n#texts\nfont = pg.font.SysFont('Whimsy TT',35)\ndef text_screen(txt1,txt2,clr1,clr2,x1,y1,x2,y2):\n screen_txt1 = font.render(txt1,True,clr1)\n screen_txt2 = font.render(txt2,True,clr2)\n gamewindow.blit(screen_txt1,[x1,y1])\n gamewindow.blit(screen_txt2,[x2,y2])\n\ndef snake(boxwindow,black,snake_list,snake_size):\n for x,y in snake_list:\n pg.draw.rect(boxwindow,black,[x,y,snake_size,snake_size])\n\ndef gameloop():\n #game processing variables\n exitgame = False\n gameover = False\n score = 0\n velocity_x = 0\n velocity_y = 0\n #fps = 30\n \n snake_x = 100\n snake_y = 150\n food_x = r.randint(0,box_width)\n food_y = r.randint(0,box_height)\n \n #snake length\n snake_length = 1\n snake_list = []\n \n #game loop\n while exitgame != True:\n if gameover == True:\n #w.PlaySound(\"death.wav\", w.SND_ALIAS ) #gives sound effects in windows\n #w.PlaySound(None, w.SND_PURGE)\n #ps('end.mp3')\n #ps('',False)\n #ps('end.wav')\n #song = aus.from_file('end.wav', 'wav')\n #p(song)\n \n boxwindow.fill(grey)\n text_screen(\"GAME OVER!\",\"press ENTER to play again...\",blue,yellow,250,160,230,200)\n \n #w.PlaySound(\"end.wav\", w.SND_ASYNC | w.SND_ALIAS ) #gives sound effects in windows\n \n for event in pg.event.get():\n if event.type == pg.QUIT:\n exitgame = True\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RETURN:\n #w.PlaySound(None, w.SND_ALIAS)\n gameloop()\n else:\n w.PlaySound(None, w.SND_ASYNC)\n for event in pg.event.get():\n if event.type == pg.QUIT:\n exitgame = True\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RIGHT:\n velocity_x = speed\n velocity_y = 0\n #event.key != pg.K_LEFT\n if event.key == pg.K_LEFT:\n velocity_x = -speed\n velocity_y = 0\n #event.key != pg.K_RIGHT\n if event.key == pg.K_UP:\n velocity_x = 0\n velocity_y = -speed\n #event.key != pg.K_DOWN\n if event.key == pg.K_DOWN:\n velocity_x = 0\n velocity_y = speed\n #event.key != pg.K_UP\n \n ''' for event in pg.event.get():\n #if event.type == pg.KEYDOWN:\n if velocity_x == speed:\n pg.K_LEFT = False \n elif velocity_x == -speed:\n pg.K_RIGHT = False\n elif velocity_y == -speed:\n pg.K_DOWN = False\n else:\n pg.K_UP = False '''\n \n #snake movement\n snake_x += velocity_x\n snake_y += velocity_y\n \n #collision or eating food\n #c = 10\n if abs(snake_x - food_x)<10 and abs(snake_y - food_y)<10:\n #ps('eat.wav')\n #w.PlaySound(\"eat.wav\", w.SND_ASYNC | w.SND_ALIAS ) #gives sound effects in windows\n song = aus.from_wav('eat.wav')\n p(song)\n \n score += 10\n snake_length += 2\n food_x = r.randint(0,box_width)\n food_y = r.randint(0,box_height)\n #c += 20\n \n gamewindow.fill(grey)\n boxwindow.fill(white)\n text_screen(\"sNake GaMe\",\"score: \" + str(score),green,red,10,5,800,5)\n pg.draw.rect(boxwindow,red,[food_x,food_y,10,10])\n \n #snake head for length\n snake_head = []\n snake_head.append(snake_x)\n snake_head.append(snake_y) \n\n snake_list.append(snake_head)\n #print(snake_list)\n if len(snake_list) > snake_length: #if no. of lists of snake is more than snake length:\n del(snake_list[0])\n \n #snake eating itself (game over)\n ''' for x in snake_list:\n if abs(x[1] - snake_head[1])<5 and abs(x[2] - snake_head[2])<5:\n exitgame = True '''\n \n if (snake_head in snake_list[:-1]) or (snake_x or snake_y)<0 or (snake_x>box_width) or (snake_y>box_height): #list[-1]or[:-1] -> starting from last item\n gameover = True\n w.PlaySound(\"end.wav\", w.SND_ASYNC | w.SND_ALIAS ) #gives sound effects when game is over (for windows)\n \n snake(boxwindow,black,snake_list,snake_size)\n \n \"\"\" e=[i*i for i in range(1,6)]\n #print(\"e is \" + str(e))\n #print(\"e is \" + str(e[-3:]))\n print(\"e is \" + str(e[ :-1])) \"\"\"\n\n pg.display.update()\n clock.tick(fps)\n pg.quit()\n quit()\n\ngameloop()\n\n'''\nclass game_screen\nclass snake\nclass food\nclass play_screen\n'''","sub_path":".history/GAMES/snake game/snake game_20200226214618.py","file_name":"snake game_20200226214618.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235385464","text":"#!/usr/bin/env python3\n\nfrom gql import gql\nimport wandb\nfrom typing import Dict\n\ndef get_sweep_table(api: wandb.Api, project: str) -> Dict[str, str]:\n QUERY = gql(''' \n query Sweep($project: String!, $entity: String) {\n project(name: $project, entityName: $entity) {\n sweeps {\n edges {\n node {\n name\n displayName\n config\n }\n }\n }\n }\n }''')\n\n entity, project = project.split(\"/\")\n response = api.client.execute(QUERY, variable_values={\n 'entity': entity,\n 'project': project,\n })\n\n edges = response.get(\"project\", {}).get(\"sweeps\", {}).get(\"edges\")\n assert edges\n\n id_to_name = {}\n for sweep in edges:\n sweep = sweep[\"node\"]\n\n name = sweep[\"displayName\"]\n if name is None:\n name = [s for s in sweep[\"config\"].split(\"\\n\") if s.startswith(\"name:\")]\n assert len(name)==1\n name = name[0].split(\":\")[1].strip()\n\n id_to_name[sweep[\"name\"]] = name\n\n return id_to_name\n\n\napi = wandb.Api()\nprint(get_sweep_table(api, \"username/project_name\"))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598951358","text":"def mostCommonCharacter(stringInput):\n alphabetCount = [0]*28\n alphabet = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n stringInput = list(stringInput)\n charCount = 0\n letterCount = 0\n\n for character in stringInput:\n for letter in alphabet:\n if stringInput[charCount] == alphabet[letterCount]:\n alphabetCount[letterCount]+=1\n letterCount+=1\n letterCount = 0\n charCount+=1\n maxIndex = alphabetCount.index(max(alphabetCount))\n print(\"The most common character is\" ,alphabet[maxIndex], \": \",alphabetCount[maxIndex] )\n","sub_path":"All_labs/lab2/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618071553","text":"\n\n#calss header\nclass _RELISH():\n\tdef __init__(self,): \n\t\tself.name = \"RELISH\"\n\t\tself.definitions = [u'a type of sauce that is eaten with food to add flavour to it: ', u'the enjoyment you get from doing something: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_relish.py","file_name":"_relish.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249082571","text":"import model\nfrom sqlalchemy.ext.declarative import declarative_base\n\nENGINE = None\n\ndef make(engine):\n Base.metadata.create_all(engine)\n\ndef main():\n global ENGINE\n ENGINE = create_engine(\"sqlite:///ratings.db\", echo=True)\n make(ENGINE)\n\nif __name__ == \"__main__\":\n main()","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71318948","text":"\"\"\"\n 给定一个二维网格和一个单词,找出该单词是否存在于网格中。\n 单词必须按照字母顺序,通过相邻的单元格内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母不允许被重复使用。\n\n 示例:\n board =\n [\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n ]\n\n 给定 word = \"ABCCED\", 返回 true\n 给定 word = \"SEE\", 返回 true\n 给定 word = \"ABCB\", 返回 false\n\n\n 提示:\n board 和 word 中只包含大写和小写英文字母。\n 1 <= board.length <= 200\n 1 <= board[i].length <= 200\n 1 <= word.length <= 10^3\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n\n def exist(self, board: List[List[str]], word: str) -> bool:\n\n word_len = len(word)\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word, 0, word_len):\n return True\n return False\n\n @classmethod\n def dfs(cls, board, i, j, word, index, word_len):\n char = board[i][j]\n\n if index == word_len - 1:\n return word[index] == char\n\n m = len(board)\n n = len(board[0]) if board else 0\n board[i][j] = \"#\"\n\n if char == word[index]:\n for tmp_i, tmp_j in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n new_i = tmp_i + i\n new_j = tmp_j + j\n if 0 <= new_i < m and 0 <= new_j < n and board[new_i][new_j] != \"#\" and cls.dfs(board, new_i, new_j,\n word, index + 1,\n word_len):\n return True\n board[i][j] = char\n return False\n\n\nif __name__ == '__main__':\n print(\n Solution().exist(\n [\n [\"A\", \"B\", \"C\", \"E\"],\n [\"S\", \"F\", \"C\", \"S\"],\n [\"A\", \"D\", \"E\", \"E\"]\n ],\n \"ASAD\"\n )\n )\n","sub_path":"Week_06/G20200343030545/LeetCode_79_545.py","file_name":"LeetCode_79_545.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293650739","text":"# In this programming problem and the next you'll code up the clustering \n# algorithm from lecture for computing a max-spacing k-clustering. Download \n# the text file here. This file describes a distance function (equivalently, \n# a complete graph with edge costs). It has the following format:\n# [number_of_nodes]\n# [edge 1 node 1] [edge 1 node 2] [edge 1 cost]\n# [edge 2 node 1] [edge 2 node 2] [edge 2 cost]\n# ...\n# There is one edge (i,j) for each choice of 1≤i<j≤n, where n is the number \n# of nodes. For example, the third line of the file is \"1 3 5250\", indicating \n# that the distance between nodes 1 and 3 (equivalently, the cost of the edge (1,3)) \n# is 5250. You can assume that distances are positive, but you should NOT assume that\n# they are distinct.\n# Your task in this problem is to run the clustering algorithm from lecture on this \n# data set, where the target number k of clusters is set to 4. What is the maximum \n# spacing of a 4-clustering?\n\nData = open(\"Data1.txt\",\"r\")\nlines = Data.readlines()\nnum = int(lines[0])\nnodes = [i+1 for i in range(num)]\nedges = []\n# Use a list to store the list of edge cost, and the two nodes\nfor i in range(1,len(lines)-1):\n node1,node2,edge12 = map(int,lines[i].split())\n edges.append([edge12,node1,node2])\n\n# Sort the above list in ascending order of the edge costs and save it as spacings\nedges.sort(key = lambda edge : edge[0])\nspacings = edges.copy()\n\n# Calculate the max spacing\nmax_spacing = spacings[len(spacings)-1][0]\n\n# Run the loop till the number of clusters remaining is equal to 4\nwhile(len(set(nodes)) != 4):\n edge = edges.pop(0)\n # If the nodes with the least edge cost do not belong to the same cluster,\n # change the cluster tags of the cluster the second node belongs to to the\n # cluster tag of the cluster the first node belongs to\n if nodes[edge[1]-1] != nodes[edge[2]-1]:\n clustertag1 = nodes[edge[1]-1]\n clustertag2 = nodes[edge[2]-1]\n\n for i in range(nodes.count(clustertag2)):\n nodes[nodes.index(clustertag2)] = clustertag1\n\n# Make the spacing equal to max spacing if the nodes belong to the same cluster\nfor spacing in spacings:\n if nodes[spacing[1]-1] == nodes[spacing[2]-1]:\n spacing[0] = max_spacing\n\n# Sort the list of spacings and nodes according to their edge costs\nspacings.sort(key = lambda spacing : spacing[0])\n# Print the lowest spacing value as it will be the least distance between\n# two nodes of different clusters\nprint(spacings[0][0])","sub_path":"Clustering/Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"47660423","text":"import random\n\ndef guess (ending_range):\n random_number = random.randint(1,ending_range)\n guess = 0\n while guess != random_number:\n guess = int(input(\"Enter a number between 1 and \" + str(ending_range) + \" : \"))\n if guess>random_number:\n print(\"Sorry! Try again Guess is too high\")\n elif guess<random_number:\n print(\"Sorry! Try again Guess is too Low\")\n\n print(\"Yeah! You have guessed the number \" + str(random_number) + \"Correctly.\")\ndef computer_guess(ending_range):\n low = 1\n high = ending_range\n feedback = ''\n while feedback !='c':\n if low != high:\n guess =random.randint(low,high)\n else:\n guess = low\n feedback = input(\"Is the number \" + str(guess) + \"is too high (h), too low or is it correct (c)\")\n if feedback == 'h':\n high = guess - 1\n elif feedback == 'l':\n low = guess + 1\n print(\"Thank God Guessed the number is correctly!\")\n computer_guess(100)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"122125352","text":"## python genelevel1.1.py snpindel_indel3030_raw.indel.recalibrated.annovar.exonic_variant_function\n\nimport sys\n\ndict1={}\ndict2={}\ntype=['frameshift insertion','frameshift deletion','nonframeshift insertion','nonframeshift deletion','stopgain SNV','stoploss SNV','unknown']\ntitle=['gene_symbol','exonic_indel','new','dbsnp','new_frameshift_insertion','new_frameshift_deletion','new_nonframeshift_insertion','new_nonframeshift_deletion','new_stopgain_SNV','new_stoploss_SNV','new_unknown','dbsnp_frameshift_insertion','dbsnp_frameshift_deletion','dbsnp_nonframeshift_insertion','dbsnp_nonframeshift_deletion','dbsnp_stopgain_SNV','dbsnp_stoploss_SNV','dbsnp_unknown']\ninFile1=open(sys.argv[1],'r')\nouFile1=open(sys.argv[1]+'.genelevel','w')\nfor line in inFile1 :\n fields=line.split('\\t')\n gene=fields[2].split(':')[0]\n if fields[14]=='PASS' :\n dict1.setdefault(gene,[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])\n dict1[gene][0]+=1\n if fields[10]=='.' :\n dict1[gene][1]+=1\n if fields[1]==type[0]:\n dict1[gene][3]+=1\n if fields[1]==type[1]:\n dict1[gene][4]+=1\n if fields[1]==type[2]:\n dict1[gene][5]+=1\n if fields[1]==type[3]:\n dict1[gene][6]+=1\n if fields[1]==type[4]:\n dict1[gene][7]+=1\n if fields[1]==type[5]:\n dict1[gene][8]+=1\n if fields[1]==type[6]:\n dict1[gene][9]+=1\n\n else :\n dict1[gene][2]+=1\n if fields[1]==type[0]:\n dict1[gene][10]+=1\n if fields[1]==type[1]:\n dict1[gene][11]+=1\n if fields[1]==type[2]:\n dict1[gene][12]+=1\n if fields[1]==type[3]:\n dict1[gene][13]+=1\n if fields[1]==type[4]:\n dict1[gene][14]+=1\n if fields[1]==type[5]:\n dict1[gene][15]+=1\n if fields[1]==type[6]:\n dict1[gene][16]+=1\n\nouFile1.write('\\t'.join(title)+'\\n')\nfor item in sorted(dict1.items(),key=lambda x:x[1][0],reverse=True) :\n ouFile1.write(item[0]+'\\t'+'\\t'.join(str(i) for i in item[1])+'\\n')\n\n\ninFile1.close()\nouFile1.close()\n\n\n","sub_path":"cc_mcc_seq/annotation/genelevel1.1.py","file_name":"genelevel1.1.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"613821375","text":"# coding:cp949\n\ncoffee = 10\nmoney = 300\n\nwhile True :\n money = int(input(\"돈을 넣어주세요: \"))\n \n if money == 300 :\n print(\"커피를 줍니다.\")\n coffee = coffee - 1\n elif money > 300 :\n print(\"거스름돈 %d를 주고 커피를 줍니다.\" %(money-300))\n else :\n print(\"돈을 다시 돌려주고 커피를 주지않습니다.\")\n\n print(\"남은 커피의 양은 %d개 입니다.\" %coffee)\n\n if not coffee :\n print(\"커피가 다 떨여졌습니다. 판매를 중지합니다.\")\n break\n","sub_path":"01_Jump_to_Python/Chap03/126.py","file_name":"126.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399759130","text":"import jieba.analyse\n\nfrom main import *\nfrom process_config import get_valid_blanks_count\nfrom process_words import *\n\n\ndef get_all_tag_words(text, old_all_tag_words):\n \"\"\"\n 获取关键词(tag_word.txt+jieba的tf-idf算法)。\n :param old_all_tag_words:从该文本分析前的所有文本中所提取的已有关键词。\n :param text:输入tf-idf算法待提取关键词的文本。\n :return: 返回关键词的集合all_tag_words,以及相应文本的预期空格数blanks_count。\n \"\"\"\n # 获取自定义的空格数。\n blanks_count = get_valid_blanks_count(text)\n\n # jieba的tf-idf算法分析的关键词,这里最先导入停止词库。\n jieba.analyse.set_stop_words(config_constant.STOP_WORDS_PATH)\n tf_idf_tag_words = jieba.analyse.extract_tags(\n text, topK=blanks_count, withWeight=False, allowPOS=())\n tf_idf_tag_words = set(tf_idf_tag_words)\n\n # tag_words.txt自定义的关键词。\n custom_tag_words = open_file(config_constant.TAG_WORDS_PATH, \"r\")\n\n # 生成所有的关键词。\n all_tag_words = tf_idf_tag_words | custom_tag_words | old_all_tag_words\n\n return all_tag_words, blanks_count\n\n\ndef get_seged_words(text):\n \"\"\"\n 获取用jieba按新词库分词后的分词列表。\n :param text: 输入待分词的文本。\n :return: 分词后的分词列表。\n \"\"\"\n # 抑制jieba的日志消息。\n jieba.setLogLevel(20)\n # jieba导入新词库。\n jieba.load_userdict(config_constant.NEW_WORDS_PATH)\n # 将文本分词。\n seged_words = list(jieba.cut(text, cut_all=False))\n return seged_words\n\n\ndef get_cloze_seged_words(text, old_all_tag_words):\n \"\"\"\n 对给定的文本,获得添加填空后的分词列表。\n :param old_all_tag_words:从该文本分析前的所有文本中所提取的已有关键词。\n :param text: 输入待加填空的文本。\n :return: 添加填空的分词列表cloze_seged_words。\n \"\"\"\n seged_words = get_seged_words(text)\n all_tag_words, blanks_count = get_all_tag_words(text, old_all_tag_words)\n # 对文本所找到关键词的缓存,只用于给 anki 填空符添加索引用。\n tmp_tag_words = []\n cloze_seged_words = []\n # 索引\n count = 0\n\n # 给文本添加填空。\n if config_constant.CLOZE_INDEX_SWITCH:\n for seged_word in seged_words:\n if seged_word in all_tag_words:\n if seged_word in tmp_tag_words:\n word_index = tmp_tag_words.index(seged_word) + 1\n # 给 anki 填空符添加索引。\n seged_word = \"\".join(\n [\"{{c\", str(word_index), \"::\", seged_word, \"}}\"])\n else:\n count += 1\n tmp_tag_words.append(seged_word)\n seged_word = \"\".join(\n [\"{{c\", str(count), \"::\", seged_word, \"}}\"])\n cloze_seged_words.append(seged_word)\n elif config_constant.CLOZE_INDEX_SWITCH == False:\n for seged_word in seged_words:\n if seged_word in all_tag_words:\n tmp_tag_words.append(seged_word)\n seged_word = \"\".join([\"{{c1\", \"::\", seged_word, \"}}\"])\n cloze_seged_words.append(seged_word)\n\n text_tag_count = len(tmp_tag_words)\n if blanks_count > text_tag_count:\n print(\n '你所期望的空格数为 %d 个,但超过系统找到的关键词数 %d 个。' %\n (blanks_count, text_tag_count))\n divide()\n return cloze_seged_words, all_tag_words\n","sub_path":"code/process_text.py","file_name":"process_text.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12792940","text":"# 3. 完全数:\n# 1 + 2 + 3 = 6 (6为完全数)\n# 1, 2, 3 都为 6的因数(能被一个数x整数的数为y,则y为x的因数)\n# 1 x 6 = 6\n# 2 x 3 = 6\n# 完全数是指除自身以外,所有的因数相加之和等于自身的数\n# 求 4 ~ 5个完全数并打印\n# 答案:\n# 6\n# 28\n# 496\n# ....\n\n# 方法1\n# i = 1 # 完全数的开始值\n# while True:\n# # 判断是否是完全数,如果是则打印 \n# L = [] # 每次循环开始都创建一个新列表,用来存因数\n# for x in range(1, i):\n# if i % x == 0: # 如果x是i的因数\n# L.append(x) # 放在列表中\n# # 此时L列表是中i所有的因数\n# if sum(L) == i:\n# print(i, \"是完全数\")\n\n# i += 1\n\n# 方法2 用函数来求完全数,增加程序的可读性 \n\ndef is_perfect_number(i):\n L = [] # 每次循环开始都创建一个新列表,用来存因数\n for x in range(1, i):\n if i % x == 0: # 如果x是i的因数\n L.append(x) # 放在列表中\n # 此时L列表是中i所有的因数\n if sum(L) == i:\n return True\n return False\n\n\ndef main():\n # 此函数用来计算所有的完全数\n i = 1 # 完全数的开始值\n while True:\n # 判断是否是完全数,如果是则打印 \n if is_perfect_number(i):\n print(i, \"是完全数\")\n\n i += 1\n\nmain()","sub_path":"第一阶段/3. Python02/day03/day02_exercise/03_perfect_number.py","file_name":"03_perfect_number.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16048813","text":"import pygame\nimport time\nfrom random import*\nfrom LoadMusic import*\nfrom bullets import*\nfrom enemies import BasePlane\n\n# define colors\nWHITE = (255,255,255)\nBLACK = (0,0,0)\nRED = (255,0,0)\nPINK = (255, 102, 153)\nORANGE = (255, 51, 0)\nYELLOW = (255, 255, 102)\nLIGHT_GREEN = (102, 255, 102)\n# size of window\nScreen_W = 600\nScreen_H = 900\n\n# check when the hero plane is destroyed\ndef check_hero_reset(em_list,heroPlane,boss):\n\tfor em in em_list: # collide with enemies\n\t\tif(em.y+60 >= heroPlane.y and em.y <= heroPlane.y):\n\t\t\tif(heroPlane.x+60 > em.x and heroPlane.x +60 <em.x+50):\n\t\t\t\tem.reset()\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x < em.x+50 and heroPlane.x > em.x):\n\t\t\t\tem.reset()\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x +30 <= em.x+50 and heroPlane.x+30 >= em.x):\n\t\t\t\tem.reset()\n\t\t\t\theroPlane.active = False\t\t\n\t\tif(heroPlane.y+50 >= em.y and heroPlane.y <= em.y):\n\t\t\tif(heroPlane.x+60 > em.x and heroPlane.x +60 <em.x+50):\n\t\t\t\tem.reset()\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x < em.x+50 and heroPlane.x > em.x):\n\t\t\t\tem.reset()\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x +30 <= em.x+50 and heroPlane.x+30 >= em.x):\n\t\t\t\tem.reset()\n\t\t\t\theroPlane.active = False\n\t\t# collide with enemies' bullets\t\n\t\tfor bullet in em.bulletList:\n\t\t\tif bullet.y+10 >= heroPlane.y and bullet.y+10 <= heroPlane.y+50:\n\t\t\t\tif bullet.x >= heroPlane.x and bullet.x <= heroPlane.x+52:\n\t\t\t\t\theroPlane.active = False\n\t\t\t\t\tem.bulletList.remove(bullet)\n\t\n\tif boss.active:\n\t\t# collide with boss\n\t\tif(boss.y+330 >= heroPlane.y and boss.y <= heroPlane.y):\n\t\t\tif(heroPlane.x+60 > boss.x and heroPlane.x +60 <boss.x+250):\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x < boss.x+250 and heroPlane.x > boss.x):\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x +30 <= boss.x+250 and heroPlane.x+30 >= boss.x):\n\t\t\t\theroPlane.active = False\t\t\n\t\tif(heroPlane.y+50 >= boss.y and heroPlane.y <= boss.y):\n\t\t\tif(heroPlane.x+60 > boss.x and heroPlane.x +60 <boss.x+250):\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x < boss.x+250 and heroPlane.x > boss.x):\n\t\t\t\theroPlane.active = False\n\t\t\telif(heroPlane.x +30 <= boss.x+250 and heroPlane.x+30 >= boss.x):\n\t\t\t\theroPlane.active = False\n\t\t# collide with boss bullets\n\t\tfor b in boss.bulletList1:\n\t\t\tif b.y + 12 >= heroPlane.y and b.y + 12 <= heroPlane.y +50:\n\t\t\t\tif b.x -5 >= heroPlane.x and b.x + 5 <= heroPlane.x + 60:\n\t\t\t\t\theroPlane.active = False\n\t\t\t\t\tboss.bulletList1.remove(b)\n\t\t# collide with boss missiles\n\t\tfor m in boss.missileList:\n\t\t\tif int(m.y1) >= heroPlane.y and int(m.y1) <= heroPlane.y+50:\n\t\t\t\tif int(m.x1) >= heroPlane.x and int(m.x1) <= heroPlane.x+60:\n\t\t\t\t\theroPlane.active = False\n\t\t\t\t\tboss.missileList.remove(m)\n\n# class for hero plane\nclass HeroPlane(BasePlane):\n\tdef __init__(self,screen):\n\t\t# spawning location\n\t\tself.x = int(Screen_W/2-30)\n\t\tself.y = 850\n\n\t\tself.screen = screen\n\t\t# total lives for hp\n\t\tself.life = 3\n\t\t# load images\n\t\tself.image1 = pygame.image.load(\"./planes/hp1.gif\").convert()\n\t\tself.image2 = pygame.image.load(\"./planes/hp2.gif\").convert()\n\t\tself.image3 = pygame.image.load(\"./planes/hp3.gif\").convert()\n\t\tself.image4 = pygame.image.load(\"./planes/hp4.gif\").convert()\n\t\tself.image5 = pygame.image.load(\"./planes/hp5.gif\").convert()\n\n\t\tself.bulletList = list()\n\n\t\t# initial score\n\t\tself.score = 0\n\n\t\tself.active = True\n\n\t\tself.dest_images = list()\n\t\t# destroying images\n\t\tself.dest_images.extend([\n\t\t\t\tpygame.image.load('./planes/heroexplode1.gif').convert(),\n\t\t\t\tpygame.image.load('./planes/heroexplode2.gif').convert(),\n\t\t\t\tpygame.image.load('./planes/h_final1.gif').convert()\n\n\t\t\t])\n\t\n\t# display the hero plane\n\tdef display(self,delay):\n\t\t# count for different images of hero plane\n\t\tif(delay <= 60 and delay >54):\n\t\t\tself.screen.blit(self.image1,(self.x,self.y))\n\t\telif(delay <= 54 and delay >48):\n\t\t\tself.screen.blit(self.image2,(self.x,self.y))\t\t\t\t\n\t\telif(delay <= 48 and delay > 42):\n\t\t\tself.screen.blit(self.image3,(self.x,self.y))\n\t\telif(delay <= 42 and delay > 36):\n\t\t\tself.screen.blit(self.image4,(self.x,self.y))\n\t\telif(delay <= 36 and delay > 30):\n\t\t\tself.screen.blit(self.image5,(self.x,self.y))\n\t\telif(delay <= 30 and delay > 24):\n\t\t\tself.screen.blit(self.image5,(self.x,self.y))\n\t\telif(delay <= 24 and delay > 18):\n\t\t\tself.screen.blit(self.image4,(self.x,self.y))\n\t\telif(delay <= 18 and delay > 12):\n\t\t\tself.screen.blit(self.image3,(self.x,self.y))\n\t\telif(delay <= 12 and delay > 6):\n\t\t\tself.screen.blit(self.image2,(self.x,self.y))\n\t\telif(delay <= 6 and delay > 0):\n\t\t\tself.screen.blit(self.image1,(self.x,self.y))\n\n\n\t\tNoUseBullets = list()\n\n\t\t# clear the useless bullets\n\t\tif(len(self.bulletList)>0):\n\t\t\tfor b in self.bulletList:\n\t\t\t\tif b.checkBullet() == True:\n\t\t\t\t\tNoUseBullets.append(b)\n\t\t\n\t\tif(len(NoUseBullets)>0):\n\t\t\tfor i in NoUseBullets:\n\t\t\t\tself.bulletList.remove(i)\n\n\t\t# display and move bullets\n\t\tfor b in self.bulletList:\n\t\t\tb.display()\n\t\t\tb.move()\n\n\t# move horizontally\n\tdef moveHorz(self,changeX):\n\t\tself.x +=changeX\n\n\t# move vertically\n\tdef moveVert(self,changeY):\n\t\tself.y +=changeY\n\n\t# cant move out of boundary\n\tdef checkBound(self):\n\t\tif(self.x <= 0):\n\t\t\tself.x =0\n\t\telif(self.x >= 540):\n\t\t\tself.x = 540\n\n\t\tif(self.y <=0):\n\t\t\tself.y =0\n\t\telif(self.y >=840):\n\t\t\tself.y = 840\n\n\t# shoot bullets\n\tdef shoot(self):\n\t\thb_sound.play()\n\t\tnewBullet = HeroBullet(self.screen,self.x,self.y)\n\t\tself.bulletList.append(newBullet)\n\n\t# reset the heroplane\n\tdef reset(self):\n\t\tself.x = int(Screen_W/2-30)\n\t\tself.y = 850\n\t\tif self.life > 0:\n\t\t\tself.life -=1\n\n\t\tself.active = True\n\t\n\t# check whether game over\n\tdef check_end(self):\n\t\tif self.life <= 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n","sub_path":"heroPlane.py","file_name":"heroPlane.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"256989386","text":"from typing import Optional\nimport collections\nfrom model import MuZeroNetwork\n\nKnownBounds = collections.namedtuple('KnownBounds', ['min', 'max'])\n\nclass MuZeroConfig(object):\n def __init__(self,\n game,\n action_space_size: int,\n max_moves: int,\n discount: float,\n dirichlet_alpha: float,\n num_simulations: int,\n num_training_loop: int,\n num_epochs,\n batch_size: int,\n td_steps: int,\n num_train_episodes,\n num_eval_episodes,\n lr_init: float,\n lr_decay_steps: float,\n max_priority: bool,\n visit_softmax_temperature_fn,\n network_args,\n result_path,\n known_bounds: Optional[KnownBounds] = None,\n ):\n\n ### Self-Play\n self.game = game\n self.action_space_size = action_space_size\n self.num_train_episodes = num_train_episodes\n self.eval_episodes = num_eval_episodes\n\n self.visit_softmax_temperature_fn = visit_softmax_temperature_fn\n self.max_moves = max_moves\n self.num_simulations = num_simulations\n self.num_training_loop = num_training_loop\n self.discount = discount\n\n # Root prior exploration noise.\n self.root_dirichlet_alpha = dirichlet_alpha\n self.root_exploration_fraction = 0.25\n\n # UCB formula\n self.pb_c_base = 19652\n self.pb_c_init = 1.25\n\n # If we already have some information about which values occur in the\n # environment, we can use them to initialize the rescaling.\n # This is not strictly necessary, but establishes identical behaviour to\n # AlphaZero in board games.\n self.known_bounds = known_bounds\n\n ### Training\n self.training_steps = int(1000e3)\n self.checkpoint_interval = int(1e3)\n self.window_size = int(1e6)\n self.batch_size = batch_size\n self.num_unroll_steps = 10\n self.td_steps = td_steps\n self.max_priority = max_priority\n\n self.weight_decay = 1e-4\n self.momentum = 0.9\n\n # Exponential learning rate schedule\n self.lr_init = lr_init\n self.lr_decay_rate = 0.1\n self.lr_decay_steps = lr_decay_steps\n\n self.result_path = result_path\n self.device = \"cuda\"\n self.step_counter = 0\n\n self.support_size = network_args[\"support_size\"]\n self.encoding_size = network_args[\"encoding_size\"]\n self.fc_representation_layers = network_args[\"rep_hidden\"]\n self.fc_dynamics_layers = network_args[\"dyn_hidden\"]\n self.fc_reward_layers = network_args[\"rew_hidden\"] \n self.fc_value_layers = network_args[\"val_hidden\"] \n self.fc_policy_layers = network_args[\"pol_hidden\"]\n self.observation_shape = network_args[\"observation_shape\"] \n self.action_space = [i for i in range(self.action_space_size)]\n self.players = [i for i in range(1)]\n\n def new_game(self):\n return self.game(self.action_space_size, self.discount)\n\n def new_network(self):\n return MuZeroNetwork(self)\n\n def incr_counter(self):\n self.step_counter += 1\n\n def get_counter(self):\n return self.step_counter","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"596415812","text":"class Solution:\n def flipAndInvertImage(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n res=[]\n for i in A:\n val = i[::-1]\n val = [1 if x==0 else 0 for x in val]\n res.append(val)\n return res \n","sub_path":"Strings/Flipping_an_image.py","file_name":"Flipping_an_image.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227584203","text":"# -*- coding: cp936 -*-\nimport requests\nfrom datetime import date, time, datetime, timedelta\nimport database\n\nclass Save_users(object):\n\n #获取关注度前10用户信息\n #参数:无\n #返回值:无\n def Get_users(self):\n mydatabase=database.Database()\n users=[]\n user_url=[]\n follower=[]\n location=[]\n repository=[]\n repository_url=[]\n now=datetime.now()\n #设定30s后访问GitHub API\n response=requests.get('https://api.github.com/search/users?q=+followers:%3E10000&sort=followers').json()\n period=timedelta(days=0,hours=0,minutes=0,seconds=30)\n next_time=now+period\n strnext_time=next_time.strftime('%Y-%m-%d %H:%M:%S')\n for i in range(0,10):\n users.append(response['items'][i]['login'])\n user_url.append('https://github.com/'+response['items'][i]['login'])\n j=0\n while True and j<10:\n iter_now=datetime.now()\n iter_now_time = iter_now.strftime('%Y-%m-%d %H:%M:%S')\n if str(iter_now_time)==str(strnext_time):\n user_response=requests.get('https://api.github.com/users/'+users[j]).json()\n follower.append(user_response['followers'])\n location.append(user_response['location'])\n repo_response=requests.get('https://api.github.com/search/repositories?q=user:'+users[j]+'&sort=stars').json()\n repository.append(repo_response['items'][0]['name'])\n repository_url.append('https://github.com/'+users[j]+'/'+repo_response['items'][0]['name'])\n iter_now=datetime.now()\n iter_time=iter_now+period\n strnext_time = iter_time.strftime('%Y-%m-%d %H:%M:%S')\n j=j+1\n continue\n #更新数据库\n for i in range(0,10):\n mydatabase.update_users_data(i+1,users[i],user_url[i],follower[i],location[i],repository[i],repository_url[i])\n","sub_path":"Save_users.py","file_name":"Save_users.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"110243547","text":"from opengever.activity.events import NotificationEvent\nfrom opengever.activity.events import WatcherAddedEvent\nfrom opengever.activity.model import Activity\nfrom opengever.testing import IntegrationTestCase\nfrom zope.event import notify\n\n\nclass TestNotificationEventHandler(IntegrationTestCase):\n\n features = (\n 'activity',\n )\n\n def test_adds_new_activity(self):\n self.login(self.dossier_responsible)\n notify(NotificationEvent(\n self.task,\n 'task-transition-open-in-progress',\n {'en': 'Task accepted'},\n {'en': 'Task accepted by Robert Ziegler.'},\n self.dossier_responsible,\n {'en': 'Lorem ipsum'},\n ))\n activity = Activity.query.first()\n self.assertEquals(self.task, activity.resource.oguid.resolve_object())\n self.assertEquals('task-transition-open-in-progress', activity.kind)\n self.assertEquals('Task accepted by Robert Ziegler.', activity.summary)\n self.assertEquals('Task accepted', activity.label)\n self.assertEquals('robert.ziegler', activity.actor_id)\n self.assertEquals('Lorem ipsum', activity.description)\n\n\nclass TestDisabledNotificationEventHandler(IntegrationTestCase):\n\n def test_event_is_ignored(self):\n self.login(self.dossier_responsible)\n notify(NotificationEvent(\n self.task,\n 'task-transition-open-in-progress',\n 'Task accepted.',\n self.dossier_responsible,\n 'Lorem ipsum',\n ))\n self.assertEquals(0, Activity.query.count())\n\n\nclass TestWatcherAddedEventHandler(IntegrationTestCase):\n features = ('activity',)\n\n def test_adds_watcher_added_activity(self):\n self.login(self.regular_user)\n notify(WatcherAddedEvent(self.task.oguid, self.meeting_user.getId()))\n activity = Activity.query.first()\n self.assertEqual('task-watcher-added', activity.kind)\n self.assertEqual('Added as watcher of task', activity.label)\n self.assertEqual('kathi.barfuss', activity.actor_id)\n self.assertEqual(u'Added as watcher of task by <a href=\"http://nohost/plone/'\n u'@@user-details/kathi.barfuss\">B\\xe4rfuss K\\xe4thi (kathi.barfuss)</a>',\n activity.summary)\n","sub_path":"opengever/activity/tests/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7545671","text":"import re\nimport json\nimport logging\nfrom uuid import uuid4\nfrom telegram import InlineQueryResultArticle, ParseMode, InputTextMessageContent\nfrom telegram.ext import Updater, InlineQueryHandler, CommandHandler\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ndef start(bot, update):\n update.message.reply_text('I AM ALIVE!')\n\n\ndef help(bot, update):\n update.message.reply_text(\"CAN'T HELP YOu M8\")\n\n\ndef inlinequery(bot, update):\n query = update.inline_query.query\n results = list()\n \n # Have fun retrieving cool stuff to the user\n # results list elements could be any of:\n # InlineQueryResultArticle\n # InlineQueryResultAudio \n # InlineQueryResultContact \n # InlineQueryResultDocument \n # InlineQueryResultGame \n # InlineQueryResultGif\n # InlineQueryResultLocation\n # InlineQueryResultPhoto\n # InlineQueryResultVenue\n # InlineQueryResultVideo\n # InlineQueryResultVoice\n\n results.append()\n update.inline_query.answer(results)\n\n\ndef error(bot, update, error):\n logger.warn('Update \"%s\" caused error \"%s\"' % (update, error))\n\n\ndef main():\n updater = Updater(json.load(open('config.json'))['TOKEN'])\n\n dp = updater.dispatcher\n\n # adding commands and handlers\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", help))\n\n # does the magic with inline queries\n dp.add_handler(InlineQueryHandler(inlinequery))\n\n dp.add_error_handler(error)\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"inline_bot.py","file_name":"inline_bot.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"500349078","text":"\"\"\"A simple example to demonstrate a thing with multiple ports. The \nthing samples values from a sensor and sends them on different output\nports depending on the divisibility of the value. See docs/ports.rst\nfor a more detailed explanation.\n\"\"\"\nimport random\nimport asyncio\nfrom thingflow.base import OutputThing, InputThing, Scheduler,\\\n SensorAsOutputThing\n\nclass MultiPortOutputThing(OutputThing, InputThing):\n def __init__(self, previous_in_chain):\n super().__init__(ports=['divisible_by_two', 'divisible_by_three',\n 'other'])\n # connect to the previous filter\n self.disconnect_from_upstream = previous_in_chain.connect(self)\n \n def on_next(self, x):\n val = int(round(x.val))\n if (val%2)==0:\n self._dispatch_next(val, port='divisible_by_two')\n if (val%3)==0:\n self._dispatch_next(val, port='divisible_by_three')\n if (val%3)!=0 and (val%2)!=0:\n self._dispatch_next(val, port='other')\n\n def on_completed(self):\n self._dispatch_completed(port='divisible_by_two')\n self._dispatch_completed(port='divisible_by_three')\n self._dispatch_completed(port='other')\n \n def on_error(self, e):\n self._dispatch_error(e, port='divisible_by_two')\n self._dispatch_error(e, port='divisible_by_three')\n self._dispatch_error(e, port='other')\n \n def __repr__(self):\n return 'MultiPortOutputThing()'\n\nclass RandomSensor:\n def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):\n self.sensor_id = sensor_id\n self.mean = mean\n self.stddev = stddev\n self.stop_after_events = stop_after_events\n if stop_after_events is not None:\n def generator():\n for i in range(stop_after_events):\n yield random.gauss(mean, stddev)\n else: # go on forever\n def generator():\n while True:\n yield random.gauss(mean, stddev)\n self.generator = generator()\n\n def sample(self):\n return self.generator.__next__()\n\n def __repr__(self):\n if self.stop_after_events is None:\n return 'RandomSensor(%s, mean=%s, stddev=%s)' % \\\n (self.sensor_id, self.mean, self.stddev)\n else:\n return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \\\n (self.sensor_id, self.mean, self.stddev, self.stop_after_events)\n\n\nscheduler = Scheduler(asyncio.get_event_loop())\nsensor = SensorAsOutputThing(RandomSensor(1, mean=10, stddev=5,\n stop_after_events=10))\nmtthing = MultiPortOutputThing(sensor)\nmtthing.connect(lambda v: print(\"even: %s\" % v),\n port_mapping=('divisible_by_two', 'default'))\nmtthing.connect(lambda v: print(\"divisible by three: %s\" % v),\n port_mapping=('divisible_by_three', 'default'))\nmtthing.connect(lambda v: print(\"not divisible: %s\" % v),\n port_mapping=('other', 'default'))\nmtthing.print_downstream()\nscheduler.schedule_recurring(sensor)\nscheduler.run_forever()\n\n","sub_path":"examples/multi_port_example.py","file_name":"multi_port_example.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482367414","text":"#!/usr/bin/env python\n\n\"\"\"\nPlot EucFACE soil moisture\n\nThat's all folks.\n\"\"\"\n\n__author__ = \"MU Mengyuan\"\n__version__ = \"2019-7-18\"\n\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nfrom matplotlib import ticker\nimport datetime as dt\nimport netCDF4 as nc\nfrom scipy.interpolate import griddata\n\ndef main(fobs, fcable):\n\n neo = pd.read_csv(fobs, usecols = ['Ring','Depth','Date','VWC'])\n # usecols : read specific columns from CSV\n\n # translate datetime\n neo['Date'] = pd.to_datetime(neo['Date'],format=\"%d/%m/%y\",infer_datetime_format=False)\n # unit='D', origin=pd.Timestamp('2012-01-01')\n\n # turn datetime64[ns] into timedelta64[ns] since 2011-12-31, e.g. 2012-1-1 as 1 days\n neo['Date'] = neo['Date'] - pd.datetime(2011,12,31)\n\n # extract days as integers from a timedelta64[ns] object\n neo['Date'] = neo['Date'].dt.days\n\n # sort by 'Date','Depth'\n neo = neo.sort_values(by=['Date','Depth'])\n\n # divide neo into groups\n subset_amb = neo[neo['Ring'].isin(['R2','R3','R6'])]\n subset_ele = neo[neo['Ring'].isin(['R1','R4','R5'])]\n subset_R1 = neo[neo['Ring'].isin(['R1'])]\n subset_R2 = neo[neo['Ring'].isin(['R2'])]\n subset_R3 = neo[neo['Ring'].isin(['R3'])]\n subset_R4 = neo[neo['Ring'].isin(['R4'])]\n subset_R5 = neo[neo['Ring'].isin(['R5'])]\n subset_R6 = neo[neo['Ring'].isin(['R6'])]\n\n # calculate the mean of every group ( and unstack #.unstack(level=0)\n neo_mean = neo.groupby(by=[\"Depth\",\"Date\"]).mean()\n amb_mean = subset_amb.groupby(by=[\"Depth\",\"Date\"]).mean()\n ele_mean = subset_ele.groupby(by=[\"Depth\",\"Date\"]).mean()\n R1_mean = subset_R1.groupby(by=[\"Depth\",\"Date\"]).mean()\n R2_mean = subset_R2.groupby(by=[\"Depth\",\"Date\"]).mean()\n R3_mean = subset_R3.groupby(by=[\"Depth\",\"Date\"]).mean()\n R4_mean = subset_R4.groupby(by=[\"Depth\",\"Date\"]).mean()\n R5_mean = subset_R5.groupby(by=[\"Depth\",\"Date\"]).mean()\n R6_mean = subset_R6.groupby(by=[\"Depth\",\"Date\"]).mean()\n\n # remove 'VWC'\n neo_mean = neo_mean.xs('VWC', axis=1, drop_level=True)\n amb_mean = amb_mean.xs('VWC', axis=1, drop_level=True)\n ele_mean = ele_mean.xs('VWC', axis=1, drop_level=True)\n R1_mean = R1_mean.xs('VWC', axis=1, drop_level=True)\n R2_mean = R2_mean.xs('VWC', axis=1, drop_level=True)\n R3_mean = R3_mean.xs('VWC', axis=1, drop_level=True)\n R4_mean = R4_mean.xs('VWC', axis=1, drop_level=True)\n R5_mean = R5_mean.xs('VWC', axis=1, drop_level=True)\n R6_mean = R6_mean.xs('VWC', axis=1, drop_level=True)\n # 'VWC' : key on which to get cross section\n # axis=1 : get cross section of column\n # drop_level=True : returns cross section without the multilevel index\n\n #neo_mean = np.transpose(neo_mean)\n\n vars = amb_mean\n# ___________________ From Pandas to Numpy __________________________\n date_start = pd.datetime(2013,1,1) - pd.datetime(2011,12,31)\n date_end = pd.datetime(2019,5,11) - pd.datetime(2011,12,31)\n# date_start = pd.datetime(2012,4,30) - pd.datetime(2011,12,31)\n# date_end = pd.datetime(2019,5,11) - pd.datetime(2011,12,31)\n date_start = date_start.days\n date_end = date_end.days\n\n # Interpolate\n x = np.concatenate((vars[(25)].index.values, \\\n vars.index.get_level_values(1).values, \\\n vars[(450)].index.values )) # time\n y = np.concatenate(([0]*len(vars[(25)]), \\\n vars.index.get_level_values(0).values, \\\n [460]*len(vars[(25)]) ))\n value = np.concatenate((vars[(25)].values, vars.values, vars[(450)].values))\n # get_level_values(1) : Return an Index of values for requested level.\n # add Depth = 0 and Depth = 460\n\n\n # add the 12 depths to 0\n X = np.arange(date_start,date_end,1) # 2012-4-30 to 2019-5-11\n Y = np.arange(0,465,5)\n\n grid_X, grid_Y = np.meshgrid(X,Y)\n print(grid_X.shape)\n # interpolate\n grid_data = griddata((x, y) , value, (grid_X, grid_Y), method='cubic')\n #'cubic')#'linear')#'nearest')\n print(grid_data.shape)\n\n# ____________________ Plot obs _______________________\n fig = plt.figure(figsize=[15,10])\n fig.subplots_adjust(hspace=0.1)\n fig.subplots_adjust(wspace=0.05)\n plt.rcParams['text.usetex'] = False\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Helvetica\"\n plt.rcParams['axes.labelsize'] = 14\n plt.rcParams['font.size'] = 14\n plt.rcParams['legend.fontsize'] = 10\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 14\n\n almost_black = '#262626'\n # change the tick colors also to the almost black\n plt.rcParams['ytick.color'] = almost_black\n plt.rcParams['xtick.color'] = almost_black\n\n # change the text colors also to the almost black\n plt.rcParams['text.color'] = almost_black\n\n # Change the default axis colors from black to a slightly lighter black,\n # and a little thinner (0.5 instead of 1)\n plt.rcParams['axes.edgecolor'] = almost_black\n plt.rcParams['axes.labelcolor'] = almost_black\n\n ax1 = fig.add_subplot(311) #(nrows=2, ncols=2, index=1)\n\n cmap = plt.cm.viridis_r\n #######\n #plt.imshow(amb_mean, cmap=cmap, vmin=0, vmax=40, origin=\"upper\", interpolation='nearest')\n #plt.show()\n ######\n #img = ax1.imshow(grid_data, cmap=cmap, vmin=0, vmax=40, origin=\"upper\", interpolation='nearest')\n #'spline16')#'nearest')\n levels = [0,5,10,15,20,25,30,35,40,45,50]\n img = ax1.contourf(grid_data, cmap=cmap, origin=\"upper\", levels=levels)\n cbar = fig.colorbar(img, orientation=\"vertical\", pad=0.1, shrink=.6) #\"horizontal\"\n cbar.set_label('VWC Obs (%)')#('Volumetric soil water content (%)')\n tick_locator = ticker.MaxNLocator(nbins=5)\n cbar.locator = tick_locator\n cbar.update_ticks()\n\n # every second tick\n ax1.set_yticks(np.arange(len(Y))[::10])\n Y_labels = np.flipud(Y)\n ax1.set_yticklabels(Y_labels[::10])\n plt.setp(ax1.get_xticklabels(), visible=False)\n\n #ax1.set_xticks(np.arange(len(X)))\n #cleaner_dates = X\n #ax1.set_xticklabels(cleaner_dates)\n\n #datemark = np.arange(np.datetime64('2013-01-01','D'), np.datetime64('2017-01-01','D'))\n #xtickslocs = ax1.get_xticks()\n\n #for i in range(len(datemark)):\n # print(xtickslocs[i], datemark[i])\n\n #cleaner_dates = [\"2014\",\"2015\",\"2016\",]\n #xtickslocs = [365,730,1095]\n\n #cleaner_dates = [\"2012\",\"2013\",\"2014\",\"2015\",\"2016\",\"2017\",\"2018\",\"2019\"]\n #[\"2012-04\",\"2013-01\",\"2014-01\",\"2015-01\",\"2016-01\",\\\n # \"2017-03\",\"2018-01\",\"2019-01\",]\n #xtickslocs = [0,246,611,976,1341,1707,2072,2437]\n\n #ax1.set(xticks=xtickslocs, xticklabels=cleaner_dates) ####\n ax1.set_ylabel(\"Depth (cm)\")\n ax1.axis('tight')\n\n plt.show()\n\n# _________________________ CABLE ___________________________\n cable = nc.Dataset(fcable, 'r')\n\n Time = nc.num2date(cable.variables['time'][:],cable.variables['time'].units)\n SoilMoist = pd.DataFrame(cable.variables['SoilMoist'][:,:,0,0], columns=[1.,4.5,10.,19.5,41,71,101,131,161,191,221,273.5,386])\n #columns =[17.69,53.07,88.45,\\\n # 123.83,159.21,194.59,229.97,265.35,300.73,336.11,371.49,406.87,442.25])\n #columns=[1.,4.5,10.,19.5,41,71,101,131,161,191,221,273.5,386])\n SoilMoist['dates'] = Time\n SoilMoist = SoilMoist.set_index('dates')\n SoilMoist = SoilMoist.resample(\"D\").agg('mean')\n SoilMoist.index = SoilMoist.index - pd.datetime(2011,12,31)\n SoilMoist.index = SoilMoist.index.days\n SoilMoist = SoilMoist.stack() # turn multi-columns into one-column\n SoilMoist = SoilMoist.reset_index() # remove index 'dates'\n SoilMoist = SoilMoist.rename(index=str, columns={\"level_1\": \"Depth\"})\n SoilMoist = SoilMoist.sort_values(by=['Depth','dates'])\n # rename columns level_1 to Depth\n #SoilMoist = SoilMoist.set_index('Depth')\n\n # Interpolate\n date_start_cable = pd.datetime(2013,1,1) - pd.datetime(2011,12,31)\n date_end_cable = pd.datetime(2019,5,11) - pd.datetime(2011,12,31)\n date_start_cable = date_start_cable.days\n date_end_cable = date_end_cable.days\n\n ntimes = len(np.unique(SoilMoist['dates']))\n dates = np.unique(SoilMoist['dates'].values)\n\n x_cable = np.concatenate(( dates, SoilMoist['dates'].values,dates)) # Time\n y_cable = np.concatenate(([0]*ntimes,SoilMoist['Depth'].values,[460]*ntimes))# Depth\n value_cable = np.concatenate(( SoilMoist.iloc[:ntimes,2].values, \\\n SoilMoist.iloc[:,2].values, \\\n SoilMoist.iloc[-(ntimes):,2].values ))\n value_cable = value_cable*100.\n # add the 12 depths to 0\n X_cable = np.arange(date_start_cable,date_end_cable,1) # 2013-1-1 to 2016-12-31\n Y_cable = np.arange(0,465,5)\n grid_X_cable, grid_Y_cable = np.meshgrid(X_cable,Y_cable)\n\n # interpolate\n grid_cable = griddata((x_cable, y_cable) , value_cable, (grid_X_cable, grid_Y_cable),\\\n method='cubic')\n #'cubic')#'linear')#'nearest')\n\n ax2 = fig.add_subplot(312)#, sharey = ax1)#(nrows=2, ncols=2, index=2, sharey=ax1)\n\n #img2 = ax2.imshow(grid_cable, cmap=cmap, vmin=0, vmax=40, origin=\"upper\", interpolation='nearest')\n #'spline16')#'nearest')\n img2 = ax2.contourf(grid_cable, cmap=cmap, origin=\"upper\", levels=levels)\n cbar2 = fig.colorbar(img2, orientation=\"vertical\", pad=0.1, shrink=.6)\n cbar2.set_label('VWC CABLE (%)')#('Volumetric soil water content (%)')\n tick_locator2 = ticker.MaxNLocator(nbins=5)\n cbar2.locator = tick_locator2\n cbar2.update_ticks()\n\n # every second tick\n ax2.set_yticks(np.arange(len(Y_cable))[::10])\n Y_labels2 = np.flipud(Y_cable)\n ax2.set_yticklabels(Y_labels2[::10])\n plt.setp(ax2.get_xticklabels(), visible=False)\n\n #ax2.set_xticks(np.arange(len(X_cable)))\n #cleaner_dates2 = X_cable\n #ax2.set_xticklabels(cleaner_dates2)\n\n #datemark2 = np.arange(np.datetime64('2013-01-01','D'), np.datetime64('2017-01-01','D'))\n\n #xtickslocs2 = ax2.get_xticks()\n #for i in range(len(datemark2)):\n # print(xtickslocs2[i], datemark2[i])\n\n #cleaner_dates2 = [\"2014\",\"2015\",\"2016\",]\n # [\"2013-01\",\"2014-01\",\"2015-01\",\"2016-01\",]\n #xtickslocs2 = [365,730,1095]\n\n #ax2.set(xticks=xtickslocs2, xticklabels=cleaner_dates2)\n ax2.set_ylabel(\"Depth (cm)\")\n ax2.axis('tight')\n\n# ________________ plot difference _____________________\n ax3 = fig.add_subplot(313)\n difference = grid_cable -grid_data\n\n cmap = plt.cm.BrBG\n\n #img3 = ax3.imshow(difference, cmap=cmap, vmin=-30, vmax=30, origin=\"upper\", interpolation='nearest')\n #'spline16')#'nearest')\n levels = [-30,-25,-20,-15,-10,-5,0,5,10,15,20,25,30]\n img3 = ax3.contourf(difference, cmap=cmap, origin=\"upper\", levels=levels)\n cbar3 = fig.colorbar(img3, orientation=\"vertical\", pad=0.1, shrink=.6)\n cbar3.set_label('CABLE - Obs (%)')\n tick_locator3 = ticker.MaxNLocator(nbins=6)\n cbar3.locator = tick_locator3\n cbar3.update_ticks()\n\n # every second tick\n ax3.set_yticks(np.arange(len(Y_cable))[::10])\n Y_labels3 = np.flipud(Y_cable)\n ax3.set_yticklabels(Y_labels3[::10])\n\n ax3.set_xticks(np.arange(len(X_cable)))\n cleaner_dates3 = X_cable\n ax3.set_xticklabels(cleaner_dates3)\n\n cleaner_dates3 = [\"2014\",\"2015\",\"2016\",\"2017\",\"2018\",\"2019\"]\n # [\"2013-01\",\"2014-01\",\"2015-01\",\"2016-01\",]\n xtickslocs3 = [365,730,1095,1461,1826,2191]\n\n ax3.set(xticks=xtickslocs3, xticklabels=cleaner_dates3)\n ax3.set_ylabel(\"Depth (cm)\")\n ax3.axis('tight')\n\n fig.savefig(\"EucFACE_SW_amb_contour_13uneqlayers.pdf\", bbox_inches='tight', pad_inches=0.1)\n\nif __name__ == \"__main__\":\n\n fobs = \"/short/w35/mm3972/data/Eucface_data/swc_at_depth/FACE_P0018_RA_NEUTRON_20120430-20190510_L1.csv\"\n fcable = \"/short/w35/mm3972/cable/runs/EucFACE/EucFACE_run/outputs/type_elev_para_ununi/13layers/unequal/gw_on_or_off/EucFACE_amb_out.nc\"\n main(fobs, fcable)\n","sub_path":"plots/plot_eucface_swc_cable_vs_obs.py","file_name":"plot_eucface_swc_cable_vs_obs.py","file_ext":"py","file_size_in_byte":12142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373436017","text":"#### Settings ####\nunlabeled_folder = 'Y:/TV Shows/30 Rock/' # folder with unlabeled videos (disc1_title00.mkv, disc1_title01.mkv)\nlabeled_folder = 'Y:/TV Shows/30 Rock/new/' # folder with labeled videos (s01e01.mp4, s01e02.mp4)\nstart_second = 0 # start later if theres a intro right at the start (default: 30)\ncompare_seconds = 1 # higher value = slower but more confident results. higher values might be a problem if the framerates differ (default: 10)\nincrement = (1/23.976)\nmatch_avg = 40 # if avg is lower than this: move on (default: 75)\nskip_already_matched_episodes = True # True will be much faster as we'll skip checking episodes we already got, but could be much unsafer also if there was a wrong match (default: True)\n### End of Settings ###\n\nprint (\"*** Settings ***\")\nprint (\"Unlabeled files folder:\", unlabeled_folder)\nprint (\"Labeled files folder:\", labeled_folder)\nprint (\"Start second:\", start_second)\nprint (\"Compare seconds:\", compare_seconds)\nprint()\n\nvideo_extensions = ('.mkv', '.mp4', '.avi', '.m4v')\nffmpeg_options = '-loglevel error -vf hue=s=0,scale=100:100'\n\nimport re, math, operator, os, sys, random, subprocess, time, tempfile\nfrom PIL import ImageChops, Image\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\n\nif os.path.isfile('rename.bat'):\n\tos.remove('rename.bat')\nif os.path.isfile('Undo_rename.bat'):\n\tos.remove('Undo_rename.bat')\n\nfolder_for_comparisons = './episode_matches/'\nif not os.path.isdir(folder_for_comparisons):\n\tos.makedirs(folder_for_comparisons)\n\n\ndef episode_from_filename(filename): # https://stackoverflow.com/a/9129611\n\tepisode = re.findall(r\"(?:s|season)(\\d{2})(?:e|x|episode|\\n)(\\d{2})\", filename, re.I)\n\treturn \"S\" + episode[0][0] + \"E\" + episode[0][1]\n\ndef rmsdiff(im1, im2): # https://stackoverflow.com/a/40176818\n\t# 0.0 means identical image, so values closer to 0 are probably matches\n # \"Calculate the root-mean-square difference between two images\"\n diff = ImageChops.difference(im1, im2)\n h = diff.histogram()\n sq = (value*((idx%256)**2) for idx, value in enumerate(h))\n sum_of_squares = sum(sq)\n rms = math.sqrt(sum_of_squares/float(im1.size[0] * im1.size[1]))\n return rms\n\ndef FrameIsBlack(img_path): # im is path\n\tim = cv2.imread(img_path)\n\tgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\tif np.average(gray) < 20:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef getLength(video):\n result = subprocess.check_output('ffprobe -i \"' + video + '\" -show_entries format=duration -v quiet -of csv=\"p=0\"', shell=True)\n return result.strip().decode()\n\ndef add_comparison_image(im1, im2, im3, y):\n\tcomparison_image = im3\n\tif comparison_image is None:\n\t\tcomparison_image = Image.new(\"RGB\", ((im1.size[0]*2)+1, im1.size[1]*compare_seconds))\n\tw1, h1 = im1.size\n\tw2, h2 = im2.size\n\tif h2 > h1:\n\t\theight = h2\n\telif h1 > h2:\n\t\theight = h1\n\telse:\n\t\theight = h1\n\ty_offset = y * height\n\tcomparison_image.paste(im1,(0,y_offset))\n\tcomparison_image.paste(im2,((w1+1),y_offset))\n\t#comparison_image.show()\n\treturn comparison_image\n\nlowest = 999\nmatched_episodes = []\n\nfor ula in os.listdir(unlabeled_folder):\n\tcomparison_image = None\n\tif not ula.lower().endswith(video_extensions):\n\t\t#print \"skipping \" + ula + \" - not a video\"\n\t\tcontinue\n\tulaPath = os.path.abspath(os.path.join(unlabeled_folder , ula))\n\n\t#print (\"?\", ulaPath)\n\n\tfor la in os.listdir(labeled_folder):\n\n\t\tif not la.lower().endswith(video_extensions):\n\t\t\tcontinue\n\n\t\tif episode_from_filename(la) in matched_episodes and skip_already_matched_episodes is True:\n\t\t\tcontinue\n\n\t\tlaPath = os.path.abspath(os.path.join(labeled_folder , la))\n\t\tep = episode_from_filename(la) # get sxxeyy from labeled episode\n\n\t\t#pbar = tqdm(total=compare_seconds) # start progress bar\n\t\t#pbar.set_description(\"Comparing [\" + ula + \"] to [\" + la + ']')\n\t\t#print (\"Comparing [\", ula, \"] to [\", la, ']')\n\n\t\t# compare length, if theyre very different its very unlikely to be a match and thus we skip it\n\t\tulaLength = float( getLength(ulaPath) )\n\t\tlaLength = float( getLength(laPath) )\n\t\tif (ulaLength/laLength) < 0.96 or (ulaLength/laLength) > 1.04: # 0.96 and 1.04 to compensate for PAL 4% speedup\n\t\t\t#pbar.close()\n\t\t\tprint(\"Length differs too much, skipping...\", ulaLength, laLength)\n\t\t\tcontinue\n\n\t\tdiffs = [] # values of the diffs are stored here, we later avg it\n\t\tsec = start_second \n\t\titeration = 0\n\t\twhile iteration < compare_seconds:\n\t\t\t# set up temp files \n\t\t\ttemp1_file_handle, temp1_filename = tempfile.mkstemp('.jpg')\n\t\t\ttemp2_file_handle, temp2_filename = tempfile.mkstemp('.jpg')\n\t\t\tulaImg = temp1_filename\n\t\t\tlaImg = temp2_filename\n\n\t\t\t# use ffmpeg to screenshot videos to the temp files\n\t\t\t# find first not black ula frame\n\t\t\tsec = 0\n\t\t\tprint('Finding first not black frame of',ulaPath)\n\t\t\tsubprocess.check_output('ffmpeg -y -ss ' + str(sec) + ' -i \"' + ulaPath + '\" -vframes 1 ' + ffmpeg_options + ' ' + ulaImg, shell=True)\n\t\t\twhile FrameIsBlack(ulaImg):\n\t\t\t\tsubprocess.check_output('ffmpeg -y -ss ' + str(sec) + ' -i \"' + ulaPath + '\" -vframes 1 ' + ffmpeg_options + ' ' + ulaImg, shell=True)\n\t\t\t\tsec += increment\n\t\t\tprint('Found at',sec)\n\t\t\t#cv2.imshow('Unlabeled',cv2.imread(ulaImg))\n\n\t\t\tsec = 0\n\t\t\tprint('Finding first not black frame of',laPath)\n\t\t\tsubprocess.check_output('ffmpeg -y -ss ' + str(sec) + ' -i \"' + laPath + '\" -vframes 1 ' + ffmpeg_options + ' ' + laImg, shell=True)\n\t\t\twhile FrameIsBlack(laImg):\n\t\t\t\tsubprocess.check_output('ffmpeg -y -ss ' + str(sec) + ' -i \"' + laPath + '\" -vframes 1 ' + ffmpeg_options + ' ' + laImg, shell=True)\n\t\t\t\tsec += increment\n\t\t\tprint('Found at',sec)\n\t\t\t#cv2.imshow('Labeled',cv2.imread(laImg))\n\t\t\t\n\t\t\t# open the temp fiels and compare them\n\t\t\tim1 = Image.open(ulaImg)\n\t\t\tim2 = Image.open(laImg)\n\t\t\tdiff = rmsdiff(im1,im2) # diff between the two screenshots\n\t\t\tprint(\"diff(lower is better):\",diff)\n\t\t\tcomparison_image = add_comparison_image(im1, im2, comparison_image, iteration)\n\t\t\tdiffs.append(diff)\n\t\t\tavg = sum(diffs)/len(diffs)\n\t\t\t#pbar.set_description(\"Comparing [\" + ula + \"] / [\" + la + ']: ' + str(round(diff, 2)) + ' (' + str(round(avg, 2)) + ')')\n\n\t\t\t# clean up tempfiles\n\t\t\tos.close(temp1_file_handle)\n\t\t\tos.remove(temp1_filename)\n\t\t\tos.close(temp2_file_handle)\n\t\t\tos.remove(temp2_filename)\n\n\t\t\t# update progress bar and iterate\n\t\t\t#pbar.update(1)\n\t\t\titeration += 1\n\n\t\t#pbar.close()\n\n\t\tif avg < match_avg:\n\t\t\tprint(\"*** Matched! [\" + ula + \"] seems to be [\" + la + \"]\")\n\t\t\tmatched_episode = episode_from_filename(la)\n\t\t\tprint(\"\")\n\t\t\tbreak\n\t\telif avg < lowest:\n\t\t\tlowest = avg\n\t\t\tmatched_episode = episode_from_filename(la)\n\n\tif matched_episode in matched_episodes:\n\t\tprint (\"Yikes !!! Episode already matched. Cannot continue with confidence. Exiting...\")\n\t\tsys.exit(1)\n\telse:\n\t\tcomparison_image.save(folder_for_comparisons + matched_episode + \"-comparison.jpg\")\n\t\tmatched_episodes.append(matched_episode)\n\t\tfilename = os.path.splitext(la)[0]\n\t\textension = os.path.splitext(ula)[1]\n\t\t# write rename scripts\n\t\twith open('rename.bat', \"a\") as fi:\n\t\t\tfi.write('move \"' + ula + '\" \"' + filename + extension + \"\\\"\\n\")\n\t\twith open('Undo_rename.bat', \"a\") as fi: # same as regular rename but reverse\n\t\t\tfi.write('move \"' + filename + extension + '\" \"' + ula + \"\\\"\\n\")\n","sub_path":"30_rock.py","file_name":"30_rock.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266706350","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom decimal import Decimal\nimport django.core.validators\nimport djmoney.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('freelancer', '0008_auto_20150420_1204'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='freelancer',\n name='minimum_pay_per_hour',\n field=djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('6.75'), max_digits=5, validators=[django.core.validators.MinValueValidator(Decimal('6.75'))], help_text=b'The minimum pay per hour you will accept.', default_currency=b'GBP'),\n preserve_default=True,\n ),\n ]\n","sub_path":"apps/freelancer/migrations/0009_auto_20150506_1529.py","file_name":"0009_auto_20150506_1529.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42642887","text":"from django.core.cache import cache\nfrom django.db.models import Max\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom .models import Coin\nfrom .models import Market\nfrom .models import Ticker\n\n\ndef read_coin(coin_code):\n try:\n coin = cache.get('coin:{}'.format(coin_code))\n coin = coin or Coin.objects.get(code=coin_code)\n except ObjectDoesNotExist:\n coin = None\n return coin\n\n\ndef read_market(market_code):\n try:\n market = cache.get('market:{}'.format(market_code))\n market = market or Market.objects.get(code=market_code)\n except ObjectDoesNotExist:\n market = None\n return market\n\n\ndef read_sequence(market_code):\n sequence = cache.get('market:{}:sequence'.format(market_code))\n sequence = sequence or Ticker\\\n .objects\\\n .filter(market__code=market_code)\\\n .aggregate(Max('sequence'))\\\n .get('sequence__max', 0)\n if sequence is not None:\n return sequence + 1\n return 0\n\n\ndef read_ticker(market_code, date):\n try:\n ticker = cache.get('market:{}:date:{}'.format(market_code, date))\n ticker = ticker or Ticker.objects.get(market__code=market_code, date=date)\n except ObjectDoesNotExist:\n ticker = None\n return ticker\n\n\ndef read_coins(coin_codes):\n keys = ['coin:{}'.format(coin_code) for coin_code in coin_codes]\n cached_coin_dicts = cache.get_many(keys)\n\n cached_coins = {}\n if cached_coin_dicts.__len__():\n cached_coins = {Coin(**coin_dict) for coin_dict in cached_coin_dicts.values()}\n\n missed_coin_codes = set(coin_codes) - set(cached_coin_dicts.keys())\n queried_coins = {}\n if missed_coin_codes.__len__():\n queried_coins = set(Coin.objects.filter(code__in=missed_coin_codes))\n\n return list(cached_coins | queried_coins)\n\n\ndef read_markets(market_codes):\n keys = ['market:{}'.format(market_code) for market_code in market_codes]\n cached_market_dicts = cache.get_many(keys)\n\n cached_markets = {}\n if cached_market_dicts.__len__():\n cached_markets = {Market(**market_dict) for market_dict in cached_market_dicts.values()}\n\n missed_market_codes = set(market_codes) - set(cached_market_dicts.keys())\n queried_markets = {}\n if missed_market_codes.__len__():\n queried_markets = set(Market.objects.filter(code__in=missed_market_codes))\n\n return list(cached_markets | queried_markets)\n\n\ndef read_tickets(market_code, dates):\n keys = ['market:{}:date:{}'.format(market_code, date) for date in dates]\n cached_ticker_dicts = cache.get_many(keys)\n\n cached_tickers = set([])\n if cached_ticker_dicts.__len__():\n cached_tickers = {Ticker(**ticker_dict) for ticker_dict in cached_ticker_dicts.values()}\n\n missed_ticker_codes = set(dates) - set(cached_ticker_dicts.keys())\n queried_tickers = {}\n if missed_ticker_codes.__len__():\n queried_tickers = set(Ticker.objects.filter(market=market_code, date__in=missed_ticker_codes))\n\n return list(cached_tickers | queried_tickers)\n","sub_path":"api/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389473236","text":"import argparse\nfrom subprocess import call\n\n\ndef main(sourcelist):\n print(\"Running Main Script\")\n cmd = [\"python\", \"extract.py\"]\n if sourcelist:\n cmd.extend([\"--sourcelist\", sourcelist])\n call(cmd)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Full ETL Pipeline')\n parser.add_argument(\n '--sourcelist',\n help='The filename of sources from which to extract data'\n )\n args = parser.parse_args()\n main(args.sourcelist)\n","sub_path":"conference-etl.py","file_name":"conference-etl.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518580176","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\nstack = []\n\nfor _ in range(N):\n data = list(input().strip().split())\n if data[0] == 'push':\n stack.append(data[1])\n elif data[0] == 'top':\n if stack:\n print(stack[-1])\n else:\n print(-1)\n elif data[0] == 'size':\n print(len(stack))\n elif data[0] == 'pop':\n if stack:\n m = stack.pop()\n print(m)\n else:\n print(-1)\n else:\n if stack:\n print(0)\n else:\n print(1)","sub_path":"solution/10828(스택).py","file_name":"10828(스택).py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"263683539","text":"from django.db import models\nfrom django.db.models.signals import post_save\nfrom django.conf import settings\n\n\nclass Album(models.Model):\n \"\"\"\n Album models to contain all image links from twitter\n Contains foreign key to User model\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n image_url = models.URLField(verbose_name='Image URL')\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n store_time = models.DateTimeField(auto_now_add=True, verbose_name='Added On')\n\n class Meta:\n \"\"\"\n Make sure there are no duplicate image URLs for a single user\n \"\"\"\n unique_together = (\"image_url\", \"user\")\n\n\nclass Hashtag(models.Model):\n \"\"\"\n Contains attributes hashtag name and since_id\n \"\"\"\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=15, unique=True)\n since_id = models.CharField(max_length=20)\n\n\ndef send_email_check(sender, instance, created=None, **kwargs):\n \"\"\"\n Post Save Signal Function\n Check to see if email has to be sent\n :param sender: Model from which signal originates\n :param instance: The instance which just saved\n :param kwargs: Other fields\n :return: None\n \"\"\"\n if created:\n count = Album.objects.filter(user=instance.user).count()\n if count % 100 == 0 and count <= 500:\n from photo_album.tasks import send_email\n send_email(instance.user.hashtag, count)\n\n\npost_save.connect(send_email_check, sender=Album, dispatch_uid=\"send_email_check\")","sub_path":"carnival_album/photo_album/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"509218924","text":"\"\"\"Support for statistics for sensor values.\"\"\"\nfrom collections import deque\nimport logging\nimport statistics\n\nimport voluptuous as vol\n\nfrom homeassistant.components.recorder.models import States\nfrom homeassistant.components.recorder.util import execute, session_scope\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity\nfrom homeassistant.const import (\n ATTR_UNIT_OF_MEASUREMENT,\n CONF_ENTITY_ID,\n CONF_NAME,\n EVENT_HOMEASSISTANT_START,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n)\nfrom homeassistant.core import callback\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.event import (\n async_track_point_in_utc_time,\n async_track_state_change_event,\n)\nfrom homeassistant.helpers.reload import async_setup_reload_service\nfrom homeassistant.util import dt as dt_util\n\nfrom . import DOMAIN, PLATFORMS\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_AVERAGE_CHANGE = \"average_change\"\nATTR_CHANGE = \"change\"\nATTR_CHANGE_RATE = \"change_rate\"\nATTR_COUNT = \"count\"\nATTR_MAX_AGE = \"max_age\"\nATTR_MAX_VALUE = \"max_value\"\nATTR_MEAN = \"mean\"\nATTR_MEDIAN = \"median\"\nATTR_MIN_AGE = \"min_age\"\nATTR_MIN_VALUE = \"min_value\"\nATTR_QUANTILES = \"quantiles\"\nATTR_SAMPLING_SIZE = \"sampling_size\"\nATTR_STANDARD_DEVIATION = \"standard_deviation\"\nATTR_TOTAL = \"total\"\nATTR_VARIANCE = \"variance\"\n\nCONF_SAMPLING_SIZE = \"sampling_size\"\nCONF_MAX_AGE = \"max_age\"\nCONF_PRECISION = \"precision\"\nCONF_QUANTILE_INTERVALS = \"quantile_intervals\"\nCONF_QUANTILE_METHOD = \"quantile_method\"\n\nDEFAULT_NAME = \"Stats\"\nDEFAULT_SIZE = 20\nDEFAULT_PRECISION = 2\nDEFAULT_QUANTILE_INTERVALS = 4\nDEFAULT_QUANTILE_METHOD = \"exclusive\"\nICON = \"mdi:calculator\"\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Required(CONF_ENTITY_ID): cv.entity_id,\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_SAMPLING_SIZE, default=DEFAULT_SIZE): vol.All(\n vol.Coerce(int), vol.Range(min=1)\n ),\n vol.Optional(CONF_MAX_AGE): cv.time_period,\n vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),\n vol.Optional(\n CONF_QUANTILE_INTERVALS, default=DEFAULT_QUANTILE_INTERVALS\n ): vol.All(vol.Coerce(int), vol.Range(min=2)),\n vol.Optional(CONF_QUANTILE_METHOD, default=DEFAULT_QUANTILE_METHOD): vol.In(\n [\"exclusive\", \"inclusive\"]\n ),\n }\n)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up the Statistics sensor.\"\"\"\n\n await async_setup_reload_service(hass, DOMAIN, PLATFORMS)\n\n entity_id = config.get(CONF_ENTITY_ID)\n name = config.get(CONF_NAME)\n sampling_size = config.get(CONF_SAMPLING_SIZE)\n max_age = config.get(CONF_MAX_AGE)\n precision = config.get(CONF_PRECISION)\n quantile_intervals = config.get(CONF_QUANTILE_INTERVALS)\n quantile_method = config.get(CONF_QUANTILE_METHOD)\n\n async_add_entities(\n [\n StatisticsSensor(\n entity_id,\n name,\n sampling_size,\n max_age,\n precision,\n quantile_intervals,\n quantile_method,\n )\n ],\n True,\n )\n\n return True\n\n\nclass StatisticsSensor(SensorEntity):\n \"\"\"Representation of a Statistics sensor.\"\"\"\n\n def __init__(\n self,\n entity_id,\n name,\n sampling_size,\n max_age,\n precision,\n quantile_intervals,\n quantile_method,\n ):\n \"\"\"Initialize the Statistics sensor.\"\"\"\n self._entity_id = entity_id\n self.is_binary = self._entity_id.split(\".\")[0] == \"binary_sensor\"\n self._name = name\n self._sampling_size = sampling_size\n self._max_age = max_age\n self._precision = precision\n self._quantile_intervals = quantile_intervals\n self._quantile_method = quantile_method\n self._unit_of_measurement = None\n self.states = deque(maxlen=self._sampling_size)\n self.ages = deque(maxlen=self._sampling_size)\n\n self.count = 0\n self.mean = self.median = self.quantiles = self.stdev = self.variance = None\n self.total = self.min = self.max = None\n self.min_age = self.max_age = None\n self.change = self.average_change = self.change_rate = None\n self._update_listener = None\n\n async def async_added_to_hass(self):\n \"\"\"Register callbacks.\"\"\"\n\n @callback\n def async_stats_sensor_state_listener(event):\n \"\"\"Handle the sensor state changes.\"\"\"\n new_state = event.data.get(\"new_state\")\n if new_state is None:\n return\n\n self._unit_of_measurement = new_state.attributes.get(\n ATTR_UNIT_OF_MEASUREMENT\n )\n\n self._add_state_to_queue(new_state)\n\n self.async_schedule_update_ha_state(True)\n\n @callback\n def async_stats_sensor_startup(_):\n \"\"\"Add listener and get recorded state.\"\"\"\n _LOGGER.debug(\"Startup for %s\", self.entity_id)\n\n self.async_on_remove(\n async_track_state_change_event(\n self.hass, [self._entity_id], async_stats_sensor_state_listener\n )\n )\n\n if \"recorder\" in self.hass.config.components:\n # Only use the database if it's configured\n self.hass.async_create_task(self._async_initialize_from_database())\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_START, async_stats_sensor_startup\n )\n\n def _add_state_to_queue(self, new_state):\n \"\"\"Add the state to the queue.\"\"\"\n if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:\n return\n\n try:\n if self.is_binary:\n self.states.append(new_state.state)\n else:\n self.states.append(float(new_state.state))\n\n self.ages.append(new_state.last_updated)\n except ValueError:\n _LOGGER.error(\n \"%s: parsing error, expected number and received %s\",\n self.entity_id,\n new_state.state,\n )\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self.mean if not self.is_binary else self.count\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return self._unit_of_measurement if not self.is_binary else None\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n @property\n def extra_state_attributes(self):\n \"\"\"Return the state attributes of the sensor.\"\"\"\n if not self.is_binary:\n return {\n ATTR_SAMPLING_SIZE: self._sampling_size,\n ATTR_COUNT: self.count,\n ATTR_MEAN: self.mean,\n ATTR_MEDIAN: self.median,\n ATTR_QUANTILES: self.quantiles,\n ATTR_STANDARD_DEVIATION: self.stdev,\n ATTR_VARIANCE: self.variance,\n ATTR_TOTAL: self.total,\n ATTR_MIN_VALUE: self.min,\n ATTR_MAX_VALUE: self.max,\n ATTR_MIN_AGE: self.min_age,\n ATTR_MAX_AGE: self.max_age,\n ATTR_CHANGE: self.change,\n ATTR_AVERAGE_CHANGE: self.average_change,\n ATTR_CHANGE_RATE: self.change_rate,\n }\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend, if any.\"\"\"\n return ICON\n\n def _purge_old(self):\n \"\"\"Remove states which are older than self._max_age.\"\"\"\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._max_age),\n self._max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()\n\n def _next_to_purge_timestamp(self):\n \"\"\"Find the timestamp when the next purge would occur.\"\"\"\n if self.ages and self._max_age:\n # Take the oldest entry from the ages list and add the configured max_age.\n # If executed after purging old states, the result is the next timestamp\n # in the future when the oldest state will expire.\n return self.ages[0] + self._max_age\n return None\n\n async def async_update(self):\n \"\"\"Get the latest data and updates the states.\"\"\"\n _LOGGER.debug(\"%s: updating statistics\", self.entity_id)\n if self._max_age is not None:\n self._purge_old()\n\n self.count = len(self.states)\n\n if not self.is_binary:\n try: # require only one data point\n self.mean = round(statistics.mean(self.states), self._precision)\n self.median = round(statistics.median(self.states), self._precision)\n except statistics.StatisticsError as err:\n _LOGGER.debug(\"%s: %s\", self.entity_id, err)\n self.mean = self.median = STATE_UNKNOWN\n\n try: # require at least two data points\n self.stdev = round(statistics.stdev(self.states), self._precision)\n self.variance = round(statistics.variance(self.states), self._precision)\n if self._quantile_intervals < self.count:\n self.quantiles = [\n round(quantile, self._precision)\n for quantile in statistics.quantiles(\n self.states,\n n=self._quantile_intervals,\n method=self._quantile_method,\n )\n ]\n except statistics.StatisticsError as err:\n _LOGGER.debug(\"%s: %s\", self.entity_id, err)\n self.stdev = self.variance = self.quantiles = STATE_UNKNOWN\n\n if self.states:\n self.total = round(sum(self.states), self._precision)\n self.min = round(min(self.states), self._precision)\n self.max = round(max(self.states), self._precision)\n\n self.min_age = self.ages[0]\n self.max_age = self.ages[-1]\n\n self.change = self.states[-1] - self.states[0]\n self.average_change = self.change\n self.change_rate = 0\n\n if len(self.states) > 1:\n self.average_change /= len(self.states) - 1\n\n time_diff = (self.max_age - self.min_age).total_seconds()\n if time_diff > 0:\n self.change_rate = self.change / time_diff\n\n self.change = round(self.change, self._precision)\n self.average_change = round(self.average_change, self._precision)\n self.change_rate = round(self.change_rate, self._precision)\n\n else:\n self.total = self.min = self.max = STATE_UNKNOWN\n self.min_age = self.max_age = dt_util.utcnow()\n self.change = self.average_change = STATE_UNKNOWN\n self.change_rate = STATE_UNKNOWN\n\n # If max_age is set, ensure to update again after the defined interval.\n next_to_purge_timestamp = self._next_to_purge_timestamp()\n if next_to_purge_timestamp:\n _LOGGER.debug(\n \"%s: scheduling update at %s\", self.entity_id, next_to_purge_timestamp\n )\n if self._update_listener:\n self._update_listener()\n self._update_listener = None\n\n @callback\n def _scheduled_update(now):\n \"\"\"Timer callback for sensor update.\"\"\"\n _LOGGER.debug(\"%s: executing scheduled update\", self.entity_id)\n self.async_schedule_update_ha_state(True)\n self._update_listener = None\n\n self._update_listener = async_track_point_in_utc_time(\n self.hass, _scheduled_update, next_to_purge_timestamp\n )\n\n async def _async_initialize_from_database(self):\n \"\"\"Initialize the list of states from the database.\n\n The query will get the list of states in DESCENDING order so that we\n can limit the result to self._sample_size. Afterwards reverse the\n list so that we get it in the right order again.\n\n If MaxAge is provided then query will restrict to entries younger then\n current datetime - MaxAge.\n \"\"\"\n\n _LOGGER.debug(\"%s: initializing values from the database\", self.entity_id)\n\n with session_scope(hass=self.hass) as session:\n query = session.query(States).filter(\n States.entity_id == self._entity_id.lower()\n )\n\n if self._max_age is not None:\n records_older_then = dt_util.utcnow() - self._max_age\n _LOGGER.debug(\n \"%s: retrieve records not older then %s\",\n self.entity_id,\n records_older_then,\n )\n query = query.filter(States.last_updated >= records_older_then)\n else:\n _LOGGER.debug(\"%s: retrieving all records\", self.entity_id)\n\n query = query.order_by(States.last_updated.desc()).limit(\n self._sampling_size\n )\n states = execute(query, to_native=True, validate_entity_ids=False)\n\n for state in reversed(states):\n self._add_state_to_queue(state)\n\n self.async_schedule_update_ha_state(True)\n\n _LOGGER.debug(\"%s: initializing from database completed\", self.entity_id)\n","sub_path":"homeassistant/components/statistics/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":14105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69416773","text":"class Solution:\n def sumSubarrayMins(self, A):\n N, mod = len(A), 10 ** 9 + 7\n prev, nxt = [0] * N, [0] * N\n stack = []\n for i in range(N):\n while stack and A[stack[-1]] >= A[i]:\n stack.pop()\n prev[i] = stack[-1] if stack else -1\n stack.append(i)\n\n stack = []\n for k in range(N - 1, -1, -1):\n while stack and A[k] < A[stack[-1]]:\n stack.pop()\n nxt[k] = stack[-1] if stack else N\n stack.append(k)\n print(prev, nxt)\n\n return sum([(nxt[i] - i) * (i - prev[i]) * A[i] for i in range(N)]) % mod\n\n # result = 0\n # while len(A) > 0:\n # result += sum(A)\n # tmp = []\n # for i in range(1, len(A)):\n # tmp.append(min(A[i], A[i - 1]))\n # A = tmp\n # print(result, tmp)\n # return result % 1000000007\n # N = len(A)\n # sum = 0\n # for i in range(N):\n # min_value = A[i]\n # for j in range(i, N):\n # if A[j] < min_value:\n # min_value = A[j]sta\n # sum += min_value\n\n\ns = Solution()\nprint(s.sumSubarrayMins([3, 1, 2, 4]))\nprint(s.sumSubarrayMins([3, 3, 3, 3]))\nprint(s.sumSubarrayMins([1, 2, 3, 4]))\nprint(s.sumSubarrayMins([4, 3, 2, 1]))\n","sub_path":"Python_Projects/6-Google codejam/183_sum_of_subarray_minimums.py","file_name":"183_sum_of_subarray_minimums.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45309684","text":"from models import db, User, Post\nfrom app import app\n\ndb.drop_all()\ndb.create_all()\n\n\nusers = [\n User(first_name='Carl', last_name='Boulder'),\n User(first_name='Brenda', last_name='Baker'),\n User(first_name='Susan', last_name='Rice',\n image_url='www.mycustomimage.com'),\n User(first_name='Oba', last_name='Zelous')\n]\n\nposts = [\n Post(title='Snax', content='content goes here', user_id=1),\n Post(title='Built', content='somewhere else', user_id=2),\n Post(title='Super', content='blasted shredz', user_id=1),\n Post(title='Obsurd', content='Major Content Right Here', user_id=3),\n Post(title='Blanket Statments', content='''Things are bad\n Things are good\n Dont believe me? \n Super star''', user_id=2)\n]\n\ndb.session.add_all(users)\ndb.session.commit()\n\ndb.session.add_all(posts)\ndb.session.commit()\n","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208419354","text":"#download the dataset\nimport keras\nimport numpy as np\n\npath = keras.utils.get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')\ntext = open(path).read().lower()\n\ndef log(stringA):\n fp = open('ch8.1.log','a')\n fp.write(stringA)\n fp.close()\n\nlog(\"Corpus length:\"+str(len(text))+'\\n')\n\n#extract sequences of \"maxlen\" chars\nmaxlen = 60\n#sample a new sequence every \"step\" chars\nstep = 3\n#hold the sequences\nsentences = []\n#hold the targets(the following char of a sequence)\nnext_chars = []\n\nfor i in range(0, len(text) - maxlen, step):\n sentences.append(text[i:i+maxlen])\n next_chars.append(text[i+maxlen])\nlog('Number of sequences:'+str(len(sentences))+'\\n')\nlog('sample at [2]:\"'+ sentences[2] + '\"\\n')\nlog('sample at [3]:\"'+ sentences[3] + '\"\\n')\nlog('next_chars at [2]:\"'+ next_chars[2]+'\"\\n')\nlog('next_chars at [3]:\"'+ next_chars[3]+'\"\\n')\n\nchars = sorted(list(set(text)))\nlog('unique chars:'+str(len(chars))+'\\n')\n#using a for to combine the index and char to build a dict\nchar_indices = dict((char,chars.index(char))for char in chars)\n\nlog('vectorization\\n')\n#x is an array [len of sentences][maxlen][len of chars], one hot encode\n#it is for samples\nx = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\n#y is [len of sentences][len of chars]\n#it is for next_chars\ny = np.zeros((len(sentences),len(chars)),dtype=np.bool)\n\n#one hot encode\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n x[i,t,char_indices[char]] = 1\n y[i,char_indices[next_chars[i]]] = 1\n\nfrom keras import layers\n\nmodel = keras.models.Sequential()\nmodel.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))\nmodel.add(layers.Dense(len(chars),activation='softmax'))\n\noptimizer = keras.optimizers.RMSprop(lr=0.01)\nmodel.compile(loss='categorical_crossentropy',optimizer=optimizer)\n\ndef sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1,preds,1)\n return np.argmax(probas)\n\nimport random\nimport sys\n\nfor epoch in range(1,60):\n log('\\nepochs: '+str(epoch)+'\\n')\n model.fit(x,y,batch_size=128,epochs=1)\n start_index = random.randint(0,len(text))\n generated_text = text[start_index: start_index+maxlen]\n log('--- Generating with seed: \"'+generated_text+'\"\\n')\n\n for temperature in [0.2,0.5,1.0,1.2]:\n log('------ temperature:'+ str(temperature)+'\\n')\n log(generated_text)\n for i in range(400):\n sampled = np.zeros((1,maxlen, len(chars)))\n for t, char in enumerate(generated_text):\n sampled[0,t,char_indices[char]] = 1.\n \n preds = model.predict(sampled,verbose=0)[0]\n next_index = sample(preds, temperature)\n next_char = chars[next_index]\n\n generated_text += next_char\n generated_text = generated_text[1:]\n\n log(next_char)\n \n if(epoch % 10 == 9):\n model.save(\"textGenEpoch\"+str(epoch+1)+\".h5\")","sub_path":"Week13/ch8.1.py","file_name":"ch8.1.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"582082374","text":"from monte_carlo import monte_carlo_apply\r\nimport numpy as np\r\nfrom population import Population\r\nimport pandas as pd\r\nfrom plot_builder import build_plot\r\n\r\n\r\ndef values_to_statistic(status_distribution_sequences):\r\n infected = np.array(status_distribution_sequences[1][0]) + np.array(status_distribution_sequences[2][0])\r\n examined = np.array(status_distribution_sequences[1][2]) + np.array(status_distribution_sequences[2][2])\r\n treated = np.array(status_distribution_sequences[1][3]) + np.array(status_distribution_sequences[2][3])\r\n result = [np.array(status_distribution_sequences[0][0]) + infected - examined,\r\n np.array(status_distribution_sequences[1][2]),\r\n np.array(status_distribution_sequences[2][2]),\r\n np.array(treated)]\r\n return result\r\n\r\n\r\ndf = pd.read_excel('data_studied.xlsx', usecols=[16, 19, 25, 26, 27, 28, 30], skiprows=[0],\r\n names=['hiv', 'aids', 'population', 'susceptible', 'examined', 'examined%', 'treated%'])\r\n\r\nmonte_carlo_iterations = 500\r\n\r\ntime = 144\r\nstep = [0, 1]\r\n\r\npopulation_statistic = np.array(df['population'].values.tolist()[12:157])\r\nsusceptible_statistic = np.array(df['susceptible'].values.tolist()[12:157])\r\ntreated_statistic = np.array(df['treated%'].values.tolist()[12:157])\r\npopulation_treated = df['treated%'].values.tolist()[:12]\r\nhiv_statistic = np.array(df['hiv'].values.tolist()[12:157])\r\naids_statistic = np.array(df['aids'].values.tolist()[12:157])\r\n\r\ntreated_statistic = np.array([int(np.round(i)) for i in (treated_statistic * (hiv_statistic + aids_statistic))])\r\n\r\nquantifier = 10\r\nagents = 1000 * quantifier\r\ndelimiter = population_statistic[0] / agents\r\n\r\nstatistic_values = [np.array([int(np.round(i / delimiter)) for i in susceptible_statistic]),\r\n np.array([int(np.round(i / delimiter)) for i in hiv_statistic]),\r\n np.array([int(np.round(i / delimiter)) for i in aids_statistic]),\r\n np.array([int(np.round(i / delimiter)) for i in treated_statistic])]\r\n\r\n\r\nsusceptible = 976 * quantifier\r\nhiv = 22 * quantifier\r\naids = 2 * quantifier\r\nsusceptible_examined = 46 * quantifier\r\nhiv_examined = 1 * quantifier\r\nhiv_wrong_examined = 0\r\nhiv_treated = 0\r\naids_examined = 1\r\naids_wrong_examined = 0\r\naids_treated = 0\r\n\r\npopulation_death_rate = [0.00040, 0.00055]\r\n\r\nwrong_examination = [0, 0]\r\n\r\naverage_infected = [0.005, 0.030]\r\naverage_treated_infected = [0.0005, 0.0030]\r\n\r\naverage_infected_vector = [average_infected, average_treated_infected]\r\n\r\nhiv_to_aids = [0.001, 0.01]\r\naids_death = [0.001, 0.01]\r\nhiv_death = [0.0001, 0.001]\r\nhiv_treated_to_aids = 0.0003\r\nhiv_treated_death = 0.00001\r\naids_treated_death = 0.00004\r\n\r\ntransition_matrix_min_max = [[1, [0, 0], [0, 0], [0, 0]],\r\n [[0, 0], 1, hiv_to_aids, hiv_death],\r\n [[0, 0], [0, 0], 1, aids_death]]\r\n\r\ntransition_treated_matrix_min_max = [[[0]],\r\n [[0, 0], 1, [0, hiv_treated_to_aids], [0, hiv_treated_death]],\r\n [[0, 0], [0, 0], 1, [0, aids_treated_death]]]\r\n\r\nexamination = [0.001, 0.01]\r\ntreating_hiv = [0.001, 0.01]\r\ntreating_aids = [0.01, 0.1]\r\ntransition_medical_matrix = [[examination], [treating_hiv, treating_aids]]\r\n\r\npopulation_distribution = [[susceptible, susceptible_examined], [hiv, hiv_wrong_examined, hiv_examined, hiv_treated],\r\n [aids, aids_wrong_examined, aids_examined, aids_treated]]\r\n\r\nnames = ['susceptible', 'hiv', 'aids', 'treated']\r\n\r\npopulation = Population(population_distribution)\r\n\r\noptimum_results = monte_carlo_apply(population, transition_matrix_min_max, transition_treated_matrix_min_max,\r\n transition_medical_matrix, population_death_rate,\r\n average_infected_vector, wrong_examination, statistic_values,\r\n time, step, monte_carlo_iterations, values_to_statistic)\r\n\r\ntime_sequence = list([i for i in range(time + 1)])\r\nprint(optimum_results[1], optimum_results[2], optimum_results[3],\r\n optimum_results[4], optimum_results[5])\r\nbuild_plot(optimum_results[0], statistic_values, time_sequence, names)\r\n","sub_path":"diploma_main.py","file_name":"diploma_main.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235439284","text":"\"\"\"\n2つの画像を比較し、MSE, SSIM, PSNRを算出する.\n\"\"\"\nimport math\nfrom skimage.measure import compare_ssim as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\n\ndef mse(image_a, image_b):\n \"\"\"\n 2つの画像の平均二乗誤差を求める (MSE)\n @param image_a 画像1\n @param image_b 画像2\n \"\"\"\n # the 'Mean Squared Error' between the two images is the\n # sum of the squared difference between the two images;\n # NOTE: the two images must have the same dimension\n # Translate: 2つの画像間の'MSE'は、2つの画像の要素の差を2乗したものの平均です。\n # 注意: 2つの画像は同じ次元でなければなりません\n err = np.sum((image_a.astype(\"float\") - image_b.astype(\"float\")) ** 2)\n err /= float(image_a.shape[0] * image_a.shape[1])\n\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n # Translate: MSE値を返す、MSEはエラー値が低いほど2つの画像がより'似ている'\n return err\n\ndef psnr(mse_value):\n \"\"\"\n mse値からpsnr値を算出する. 自作関数\n @param mse_value mse値\n \"\"\"\n if mse_value == 0:\n ans = None\n else:\n ans = 20 * math.log10(255 / math.sqrt(mse_value))\n return ans\n\ndef compare_images(cache, tolerance, image_a, image_b, title):\n \"\"\"\n 2つの画像を比較する関数.\n @param image_a 画像A\n @param image_b 画像B\n @param title 出力画像につけるタイトル\n \"\"\"\n\n # compute the mean squared error and structural similarity\n # index for the images\n # Translate: 平均二乗誤差と構造的類似度の計算をする\n result_mse = mse(image_a, image_b)\n result_psnr = psnr(result_mse)\n result_ssim = ssim(image_a, image_b)\n\n # setup the figure\n # Translate: 図の設定\n\n if result_psnr == None:\n f.write(str(cache) + ',' + str(tolerance) + ',' + str(result_mse) + ',0,' + str(result_ssim) + '\\n')\n else:\n f.write(str(cache) + ',' + str(tolerance) + ',' + str(result_mse) + ',' + str(result_psnr) + ',' + str(result_ssim) + '\\n')\n\n'''\nファイルオープン\n'''\nf = open('acCompareiACT_rx.csv', 'w')\n\n'''\nforループで各画像処理\n'''\ncache_size = [1,2,3,4,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100]\ntolerance_range = [1,2,3,4,5,10,15,20]\n\nfor c in cache_size:\n for t in tolerance_range:\n print(\"cache, tolerance = \" + str(c) + ', ' + str(t))\n\n # load the images -- the original, the original + suggested,\n # and the original + intel\n # Translate: 画像を読み込む - オリジナル、オリジナルと提案手法、オリジナルとインテルの手法\n # cv2.IMREAD_GRAYSCALEにより、最初から2値画像を読み込むこととする\n original = cv2.imread(\"img/image_without_AC.tif\", cv2.IMREAD_GRAYSCALE)\n proposed = cv2.imread(\"img_x/image_proposedAC_rx_tolerance_\" + str(t) + \"_cache_\" + str(c) + \".tif\", cv2.IMREAD_GRAYSCALE)\n intel = cv2.imread(\"img_x/image_intelAC_rx_tolerance_\" + str(t) + \"_cache_\" + str(c) + \".tif\", cv2.IMREAD_GRAYSCALE)\n\n # convert the images to grayscale\n # Translate: 2値画像へ変換\n # 不要なためコメントアウト\n # original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)\n # suggested = cv2.cvtColor(suggested, cv2.COLOR_BGR2GRAY)\n # intel = cv2.cvtColor(intel, cv2.COLOR_BGR2GRAY)\n\n # initialize the figure\n # Translate: 図の初期化\n\n # compare the images\n # Translate: 画像の比較\n\n # compare_images(c, t, original, original, \"Original vs. Original\")\n # compare_images(c, t, original, proposed, \"Original vs. ProposedAC\")\n compare_images(c, t, original, intel, \"Original vs. IntelAC\")\n\nf.close()\n","sub_path":"compare_outcsv.py","file_name":"compare_outcsv.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574282280","text":"import infoGen\nimport random\nimport csv\n\nclass Person:\n def __init__(self):\n self.name = infoGen.getMaleName()\n self.address = infoGen.getAddress()\n self.ssn = infoGen.getSSN()\n self.dob = infoGen.getDOB()\n self.size = infoGen.getSize()\n self.aptitude = random.randint(1, 99)\n self.salary = random.randint(50000, 120000)\n def printPerson(self):\n print(self.name + \"\\n\" + self.address + '\\n' + self.ssn + '\\n' + self.dob + '\\n' + self.size)\n def addToFile(self, file):\n with open(file, 'a') as myFile:\n myFile.write(self.name + ',' + self.address + ',' + self.ssn + ',' + self.dob + ',' +\n self.size + ',' + str(self.aptitude) + ',' + str(self.salary) + '\\n')\n\n def genFromFile(self, file, index):\n current = 0\n with open(file) as csvfile:\n opened = csv.reader(csvfile, delimiter=',')\n for row in opened:\n if current == index:\n self.name = row[0]\n self.address = row[1]\n self.ssn = row[2]\n self.dob = row[3]\n self.size = row[4]\n self.aptitude = row[5]\n self.salary = row[6]\n current += 1\n","sub_path":"person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352524649","text":"# coding=utf-8\n# author:yundanni\n# create_time:2020/11/9 14:18\nimport requests\nimport json\nfrom util.file_read import YamlHandle\nimport os\nimport sys\nimport ssl\nfrom util.rsa import Decript\nBASE_PATH = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]#将path分割成目录和文件名二元组返回 D:\\测试\\zhixing_test\n# print ('base_path= '+BASE_PATH)\nUTILS_PATH = os.path.join(BASE_PATH, 'utils')\nsys.path.append(BASE_PATH)\nsys.path.append(UTILS_PATH)\nDATA_PATH = os.path.join(BASE_PATH, r'data')\nDATA_FILE = os.path.join(BASE_PATH, r'data\\data2.yaml')\nfrom util.send_ding import SendMessage\n\n\nclass method():\n global s\n s = requests.Session()\n requests.packages.urllib3.disable_warnings()\n\n def send_get(self, url, data):\n res = s.get(url=url, data=data,verify=False)\n # print (type(res))\n return res\n\n def send_post(self, url, data):\n res = s.post(url=url, data=data,verify=False)\n # SendMessage(res)\n return res\n\n def run_main(self, url, method,data=None):\n res = None\n if method == 'GET':\n res = self.send_get(url, data)\n else:\n res = self.send_post(url,data)\n return res\n#post上传文件\n def run_file(self,url,files,data):\n res=None\n res=s.post(url=url,files=files,data=data).json()\n return res\nif __name__ == '__main__':\n host=YamlHandle(DATA_FILE).get_value('host')\n login_url=YamlHandle(DATA_FILE).get_value('api_path').get('Params')\n url=host+login_url\n data=None\n print (host+login_url)\n a=method()\n a.run_main(url,'GET')","sub_path":"util/request_method.py","file_name":"request_method.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265270138","text":"import urllib.request\nimport json\nimport os\n\nv_herolist_url = urllib.request.urlopen(\"http://pvp.qq.com/web201605/js/herolist.json\")\nv_herolist = v_herolist_url.read().decode('utf-8')\nv_hero = v_herolist.encode('utf8')[3:].decode('utf-8')\nhero_json = json.loads(v_hero)\nhero_num = len(hero_json)\n\nhero_dir = './img/'\nif not os.path.exists(hero_dir):\n os.mkdir(hero_dir)\nfor num in range(hero_num):\n skinsname = hero_json[num]['skin_name'].split(\"|\")\n print(hero_json[num]['cname'])\n print(skinsname)\n input()\n for i in range(len(skinsname)):\n skin_name = hero_json[num]['ename']\n skin_url = 'http://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/'+str(skin_name)+'/'+str(skin_name)+'-bigskin-'+str(i+1)+'.jpg'\n save_url=hero_dir+str(skin_name)+\"_\"+str(i+1)+\".jpg\"\n # urllib.request.urlretrieve(skin_url,save_url)","sub_path":"Python/LFR-Spider/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115903937","text":"# Plotting a complite populatin map\n# Extracting Relevant Data\nimport json\nfrom pygal.maps.world import COUNTRIES, World\nfrom pygal.style import RotateStyle as RS, LightColorizedStyle as LCS\n\nfrom country_codes import get_country_code\n\n# Load the data into a list.\nfilename='gdp.json'\nwith open(filename) as f:\n\tgdp_data = json.load(f)\n#print(pop_data)\n\n# Build a dictionary of population data.\ngdp_countries={}\nfor gdp_dict in gdp_data:\n\tif gdp_dict['Year'] == 2016:\t# select 2010 population for each country.\n\t\tcountry_name = gdp_dict['Country Name']\n\t\tgdp = int(float(gdp_dict['Value'])/1000000) # get rid of .0 \n\t\tcode=get_country_code(country_name)\n\t\t\n\t\tif code:\n\t\t\tgdp_countries[code]=gdp\n\t\n# Group the countries into 3 population levels.\ncc_pops_1, cc_pops_2, cc_pops_3 = {},{},{}\nfor cc, pop in gdp_countries.items():\n\tif pop < 500000:\n\t\tcc_pops_1[cc] = pop\n\telif pop < 1000000:\n\t\tcc_pops_2[cc] = pop\n\telse:\n\t\tcc_pops_3[cc] = pop\n\t\t\n# Styling world maps in pygal\nwm_style = RS('#1070EE',base_style=LCS)\nwm= World(style=wm_style)\n\n# See how many countries are in each level.\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\n\n#wm=World()\nwm.title='World Gross Domestic Product in 2016, by Country'\nwm.add('0-10m', cc_pops_1)\nwm.add('10m-1b', cc_pops_2)\nwm.add('>1bn', cc_pops_3)\n\nwm.render_to_file('world_gdp_2016.svg')\n","sub_path":"chapter16/gross_domestic_product.py","file_name":"gross_domestic_product.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165302121","text":"import openpyxl\nimport json\n\n#the file path to roguelike_table.json\npath = \"ArknightsGameData\\\\en_US\\\\gamedata\\\\excel\\\\roguelike_table.json\"\n\n#the properties of relics in the out put\nitems = ['id','name','usage','description','rarity','unlockCondDesc']\n\nif __name__ == \"__main__\":\n with open(path, encoding='utf-8') as relicsfile:\n relics = json.load(relicsfile)['itemTable']['items']\n\n wb = openpyxl.Workbook()\n\n sheets=wb.active\n\n sheets.append(items)\n \n for relic in relics:\n sheet = []\n for item in items:\n sheet.append(relics[relic][item])\n\n sheets.append(sheet)\n\n wb.save('relics.xlsx')\n \n \n","sub_path":"relic.py","file_name":"relic.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"141235763","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport math\nimport numpy as np\n\ndef saltPepper(imgage, times, kernelWidth):\n kernelSize = (kernelWidth -1) / 2\n img = np.copy(imgage)\n\n height, width = img.shape[:2]\n\n for n in range(0, times):\n newImage = np.copy(img)\n\n for h in range(kernelSize, height - kernelSize):\n for w in range(kernelSize, width - kernelSize):\n array = img[h-kernelSize: h+kernelSize+1, w-kernelSize:w+kernelSize+1]\n median = np.median(array)\n newImage.itemset((h,w), median)\n\n img = newImage\n\n return img\n\ndef averaging(imgage, times, kernelWidth):\n img = np.copy(imgage)\n kernelSize = (kernelWidth - 1) / 2\n img = cv2.copyMakeBorder(img, kernelSize, kernelSize, kernelSize, kernelSize, cv2.BORDER_REPLICATE)\n height, width = img.shape[:2]\n\n for n in range(0, times):\n newImage = np.copy(img)\n for h in range(kernelSize, height - kernelSize):\n for w in range(kernelSize, width - kernelSize):\n newV = 0.0\n\n for i in range(-1 * kernelSize, kernelSize + 1):\n for j in range(-1 * kernelSize, kernelSize + 1):\n newV += (img.item(h - i, w - j) / math.pow(kernelWidth, 2))\n\n newImage.itemset((h, w), newV)\n\n img = newImage\n\n return img[kernelSize:height-kernelSize, kernelSize:width-kernelSize]\n\n\nif __name__ == '__main__':\n orig = cv2.imread('SaltPepper.jpg')\n #orig = cv2.resize(orig, (0,0), fx=0.3, fy=0.3)\n orig = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)\n\n sP1 = saltPepper(orig, 1, 7)\n sP2 = saltPepper(sP1, 1, 7)\n sP3 = saltPepper(sP2, 1, 7)\n\n avr1 = averaging(orig, 1, 7)\n avr2 = averaging(avr1, 1, 7)\n avr3 = averaging(avr2, 1, 7)\n\n plt.subplot(241),plt.imshow(orig, cmap='Greys_r'),plt.title('orig')\n plt.subplot(242),plt.imshow(avr1, cmap='Greys_r'),plt.title('average 1')\n plt.subplot(243),plt.imshow(avr2, cmap='Greys_r'),plt.title('average 2')\n plt.subplot(244),plt.imshow(avr3, cmap='Greys_r'),plt.title('average 3')\n plt.subplot(245),plt.imshow(orig, cmap='Greys_r'),plt.title('orig')\n plt.subplot(246),plt.imshow(sP1, cmap='Greys_r'),plt.title('median 1')\n plt.subplot(247),plt.imshow(sP2, cmap='Greys_r'),plt.title('median 2')\n plt.subplot(248),plt.imshow(sP3, cmap='Greys_r'),plt.title('median 3')\n plt.show()\n\n","sub_path":"Labo02/opdracht2.py","file_name":"opdracht2.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292862368","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport logging\nimport json\nfrom webscraper.settings import LOG_FILE, LOG_FORMAT, LOG_LEVEL\nfrom webscraper.items import HumbleBundle\n\n\nformatter = logging.Formatter(LOG_FORMAT)\nfh = logging.FileHandler(LOG_FILE)\nfh.setLevel(LOG_LEVEL)\nfh.setFormatter(formatter)\n\nLOGGER = logging.getLogger(\"HumbleLogger\")\nLOGGER.setLevel(LOG_LEVEL)\nLOGGER.addHandler(fh)\n\n\nclass HumbleSpider(scrapy.Spider):\n name = 'humble'\n allowed_domains = ['humblebundle.com']\n custom_settings = {\n \"ITEM_PIPELINES\": {\n \"webscraper.pipelines.HumbleNotification\": 300\n }\n }\n\n def start_requests(self):\n urls = [\"https://www.humblebundle.com\"]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n script_object = response.xpath(\"//script[@id='webpack-json-data']\").extract_first()\n if script_object is None:\n LOGGER.error(\"Could not find bundle JSON data\")\n else:\n script_object = script_object\\\n .lstrip(\"<script id=\\\"webpack-json-data\\\" type=\\\"application/json\\\">\")\\\n .rstrip(\"</script>\")\\\n .strip()\n script_json = json.loads(script_object)\n bundle_objects = script_json[\"mosaic\"][1][\"products\"]\n for bundle in bundle_objects:\n if bundle[\"type\"] != \"bundle\":\n continue\n b = HumbleBundle()\n b[\"title\"] = bundle[\"tile_name\"]\n b[\"highlights\"] = \" | \".join(\n [h for h in bundle[\"highlights\"] if h != \"Pay What You Want\" and h != \"Support Charity\"]\n )\n b[\"link\"] = \"https://www.humblebundle.com/\" + bundle[\"product_url\"]\n LOGGER.info(\"Bundle: \" + str(b))\n yield b\n","sub_path":"webscraper/spiders/humble.py","file_name":"humble.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230079409","text":"import socket\nimport sys\n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Bind the socket to the address given on the command line\nserver_address = ('127.0.0.1', 20000)\nsock.bind(server_address)\n# print >>sys.stderr, 'starting up on %s port %s' % sock.getsockname()\nprint('starting up on {} port {}'.format(*sock.getsockname()), file=sys.stderr)\nsock.listen(2)\n\nwhile True:\n # print >>sys.stderr, 'waiting for a connection'\n print('waiting for a connection', file=sys.stderr)\n connection, client_address = sock.accept()\n try:\n # print >>sys.stderr, 'client connected:', client_address\n print(\"client connected: {}\".format(client_address), file=sys.stderr)\n while True:\n data = connection.recv(16)\n # print >>sys.stderr, 'received \"%s\"' % data\n if data:\n print(\"received: {}\".format(*data), file=sys.stderr)\n connection.sendall(data)\n elif data.decode().strip().upper() in ['END', 'QUIT', 'EXIT']:\n break\n else:\n break\n finally:\n connection.close()","sub_path":"demo_server.py","file_name":"demo_server.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"277835856","text":"import io\nimport os\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom subprocess import check_output\n\nTESTNAME = 'lasso_stress_benchmark'\nns = np.array([100, 500, 1000, 2000])\nps = 2**(np.linspace(1, 14, 14)).astype(int)\n\ndef plot(df, fig_dir):\n grouped = df.groupby('n')\n ncols=2\n nrows = int(np.ceil(grouped.ngroups/ncols))\n\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(12,6), sharey=True)\n\n for (key, ax) in zip(grouped.groups.keys(), axes.flatten()):\n print(grouped.get_group(key))\n grouped.get_group(key).plot(ax=ax, x='p', y=['glmnet', 'glmnetpp'],\n logx=True, logy=True)\n ax.set_xscale('log', base=2)\n ax.set_title('N={n}'.format(n=key))\n ax.legend()\n ax.set_yscale('log', base=2)\n ax.set_ylabel('Time (s)')\n\n plt.savefig(os.path.join(fig_dir, TESTNAME + '_fig.png'))\n\n# Run benchmark\n# bench_dir directory to glmnetpp benchmark program (e.g. build/release/benchmark)\n# data_dir directory to store our timing data (e.g. docs/data)\n# ref_dir directory to reference (glmnet) program for comparison (e.g. benchmark/reference)\n# data_scr_dir directory to scripts that generate data (e.g. benchmark/data/script)\n# gen boolean whether to generate data or not\ndef run(bench_dir, data_dir, ref_dir, data_scr_dir, gen):\n df = pd.DataFrame()\n\n # save current working directory\n cur_path = os.getcwd()\n\n # generate all the data if gen is true\n if gen:\n os.chdir(data_scr_dir)\n for n in ns:\n for p in ps:\n gen_script = 'gen_random_unif.py'\n args = ('python3', gen_script, '-n', str(n), '-p', str(p))\n check_output(args)\n os.chdir(cur_path)\n\n # change directory to glmnetpp benchmark location\n os.chdir(bench_dir)\n\n bench_path = os.path.join('.', TESTNAME)\n print('Benchmark path: {p}'.format(p=bench_path))\n\n # run our benchmark and get output\n args = (bench_path, \"--benchmark_format=csv\")\n data = io.StringIO(check_output(args).decode(\"utf-8\"))\n df_bench = pd.read_csv(data, sep=',')\n df = df_bench[['p', 'n']]\n df['glmnetpp'] = df_bench['real_time'] * 1e-9\n df.set_index(['p', 'n'], inplace=True)\n\n os.chdir(cur_path)\n\n # now run R script using glmnet\n os.chdir(ref_dir)\n ref_name = TESTNAME + '.R'\n args = ('Rscript', ref_name)\n data = io.StringIO(check_output(args).decode(\"utf-8\"))\n df_bench = pd.read_csv(data, sep=',', header=None)\n df_bench.columns = ['glmnet', 'p', 'n']\n df_bench.set_index(['p', 'n'], inplace=True)\n df_bench['glmnet'] *= 1e-9\n df = pd.concat([df, df_bench], axis=1)\n df['relative'] = df['glmnet'] / df['glmnetpp']\n\n df.reset_index(inplace=True)\n\n # save absolute time\n data_path = os.path.join(data_dir, TESTNAME + \".csv\")\n df.to_csv(data_path)\n\n return df\n\nif __name__ == '__main__':\n import argparse\n import path_names\n\n parser = argparse.ArgumentParser(description='Runs a benchmark against glmnet on uniform(-1,1) data.')\n parser.add_argument('-g', action='store_const', const=True, default=False,\n help='Generate data if set.')\n args = parser.parse_args()\n\n df = run(path_names.bench_dir,\n path_names.data_dir,\n path_names.ref_dir,\n path_names.data_scr_dir,\n gen=args.g)\n plot(df, path_names.fig_dir)\n","sub_path":"glmnetpp/benchmark/analyze/analyze_lasso_stress_benchmark.py","file_name":"analyze_lasso_stress_benchmark.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652667719","text":"# -*- coding: utf-8 -*-\n\nimport os,sqlite3\n\ndb_file = os.path.join(os.path.dirname(__file__),'test2.db')\nif os.path.isfile(db_file):\n os.remove(db_file)\n\n#初始数据:\nconn = sqlite3.connect(db_file)\ncursor = conn.cursor()\ncursor.execute('create table user(id varchar(20) primary key,name varchar(20),score int)')\ncursor.execute(r\"insert into user values('A-001','Adam',95)\")\ncursor.execute(r\"insert into user values('A-002','Bart',62)\")\ncursor.execute(r\"insert into user values('A-003','Lisa',78)\")\ncursor.close()\nconn.commit()\nconn.close()\n\ndef get_score_in(low,high):\n g_sco = sqlite3.connect(db_file)\n g_sco_cur = g_sco.cursor()\n g_sco_cur.execute('select name from user where score>=? and score<=? ORDER BY score',(low,high))\n name_list_tumple = g_sco_cur.fetchall()\n g_sco_cur.close()\n g_sco.close()\n return list(map(lambda x:x[0],name_list_tumple))\n\n#test:\nassert get_score_in(80,95)==['Adam'],get_score_in(80,95)\nassert get_score_in(60,85)==['Bart','Lisa'],get_score_in(60,80)\nassert get_score_in(60,100)==['Bart','Lisa','Adam'],get_score_in(60,100)\n\n","sub_path":"get_score_from_sqlite.py","file_name":"get_score_from_sqlite.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90986659","text":"#!/usr/bin/python3\n\n\"\"\"\nНайти сумму n элементов следующего ряда чисел: 1 -0.5 0.25 -0.125 ...Количество элементов (n) вводится с клавиатуры.\n\n\"\"\"\n\nn = int(input(\"Введите длину последовательности n: \"))\n\nval = 1\nsum = 0\n\nfor i in range(n):\n sum += val\n val *= -0.5\n\nprint(\"Сумма %d элементов последовательности равна %.10f\" % (n, sum))\n","sub_path":"Lesson_02/task_04.py","file_name":"task_04.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135944378","text":"import datetime\nimport pytest\nfrom django.utils import timezone\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom utils.models import *\nfrom user.models import *\nfrom core.models import *\n\n\nclass TesteEvento(TestCase):\n\n\n def test_validar_criacao_do_evento(self):\n o_dono = Usuario(nome='Ele')\n endereco_evento = Endereco(pais='Brasil', cidade='Ipanema', bairro='Dirceu', logradouro='Rua das avenidas',\n numero='41',cep='44522-98', estado='Piaui')\n periodo_evento = Periodo(data_inicio='2017-08-14',data_fim='2017-08-18')\n evento = Evento(dono=o_dono, nome=\"Evento inicial\", tipo_evento='padrao',\n endereco=endereco_evento, periodo=periodo_evento)\n self.assertEqual(evento.periodo, periodo_evento)\n self.assertEqual(evento.endereco, endereco_evento)\n self.assertEqual(evento.dono.nome, 'Ele')\n self.assertEqual(evento.nome, 'Evento inicial')\n self.assertEqual(evento.tipo_evento, 'padrao')\n \n def test_evento_criado_com_dono_e_conjunto_vazio_de_atividades(self):\n o_dono = Usuario(nome='Ele')\n evento = Evento(dono=o_dono)\n sem_atividades = evento.get_atividades()\n vazio = len(sem_atividades)\n self.assertEqual(evento.dono.nome, 'Ele')\n self.assertEqual(vazio, 0)\n \n def test_evento_com_lista_vazia_de_relacionamento_com_instituicoes(self):\n evento = Evento()\n sem_instituicoes = evento.get_instituicoes()\n vazio = len(sem_instituicoes)\n self.assertEqual(vazio, 0)\n\n def test_nao_permitir_Atividades_de_Eventos_distintos_na_mesma_Atividade(self):\n atividade_repetida = AtividadePadrao(nome=\"Teste\")\n evento = Evento()\n self.assertFalse(atividade_repetida in evento.get_atividades())\n \n def test_mudanca_de_comportamentos_que_sejam_observados_por_outros_componentes(self):\n o_dono = Usuario(nome='Ele')\n endereco_evento = Endereco(pais='Brasil', cidade='Ipanema', bairro='Dirceu', logradouro='Rua das avenidas',\n numero='41', cep='44522-98', estado='Piaui')\n periodo_evento = Periodo(data_inicio='2017-08-14', data_fim='2017-08-18')\n evento = Evento(dono=o_dono, nome=\"Evento inicial\", tipo_evento='padrao',\n endereco=endereco_evento, periodo=periodo_evento)\n evento_novo = evento.periodo.data_inicio='2017-08-10'\n self.assertFalse(evento != evento_novo, \"Informações de evento foram modificadas.\")\n\n def test_apenas_evento_satelite_pode_ser_adicionado_sozinho_a_inscricao(self):\n \"\"\"\n Em caso de Eventos Satélites será possível se inscrever isoladamente em\n eventos que são satélites de eventos principais, porém não será permitido se\n inscrever apenas no evento Principal.\n \"\"\"\n evento_novo = Evento(nome='evento')\n evento_extra = Evento(nome='evento grande')\n evento_satelite = EventoSatelite(eventos=evento_extra)\n usuario = Usuario(nome='Usuario')\n atividade = AtividadePadrao(nome='Atividade')\n trilha = Trilha(id=1, nome = 'trilha', valor = 15.00, evento = evento_novo)\n trilhaSatelite = Trilha(id=2, nome='trilha', valor=15.00, evento=evento_extra)\n inscricao = Inscricao(id=1, status_inscricao = 'ativa', usuario = usuario, evento = evento_novo)\n self.assertFalse(TrilhaInscricao(trilha, inscricao))\n self.assertTrue(TrilhaInscricao(trilhaSatelite, inscricao))\n\n\n\n\n\n\n ","sub_path":"tests/core/test_evento.py","file_name":"test_evento.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"363593683","text":"import connection\r\nimport numpy as np\r\nfrom numpy import random\r\n\r\ncur = connection.get_cursor()\r\n#cur.execute(\"\"\"ALTER TABLE PROGRAMMERS ADD (FULL_NAME nvarchar(50))\"\"\")\r\n#cur.execute(\"\"\"ALTER TABLE PROGRAMMERS ALTER (FULL_NAME nvarchar(50))\"\"\")\r\ncur.execute(\"\"\"SELECT ID FROM PROGRAMMERS WHERE NAME IS NOT NULL\"\"\")\r\nids = [i[0] for i in cur.fetchall()]\r\n\r\nfirstNameMale = open('firstNameMale.txt', 'r').readlines()\r\nfirstNameFemale = open('firstNameFemale.txt', 'r').readlines()\r\nsurname = open('surname.txt', 'r').readlines()\r\nfirstName = firstNameMale + firstNameFemale\r\n\r\nnames = []\r\nfor i in ids:\r\n name = firstName[random.random_integers(len(firstName)-1)].strip() + ' ' + surname[random.random_integers(len(surname) - 1)].strip()\r\n names.append((str(name), i))\r\n\r\ncur.executemany(\"\"\"UPDATE PROGRAMMERS SET full_name=? WHERE id=?\"\"\", names)\r\ncur.commit()\r\n\r\n\r\nquery = \"\"\"CREATE ROW TABLE \"TOUCHED\" ( \"ID\" INT CS_INT NOT NULL,\r\n \"TRANSACTION_ID\" INT CS_INT NOT NULL,\r\n \"PROGRAMMER_ID\" INT CS_INT NOT NULL,\r\n PRIMARY KEY ( \"ID\" ) ) \"\"\"\r\n\r\ncur.execute(query)\r\ncur.execute(\"\"\"SELECT TRANSACTIONS.ID, PROGRAMMER_ID FROM TRANSACTIONS INNER JOIN PROGRAMMERS ON TRANSACTIONS.PROGRAMMER_ID = PROGRAMMERS.ID WHERE PROGRAMMERS.NAME IS NOT NULL\"\"\")\r\nids = [(i[0], i[1]) for i in cur.fetchall()]\r\ncur.execute(\"\"\"SELECT ID FROM PROGRAMMERS WHERE NAME IS NOT NULL\"\"\")\r\nprogrammers_id = [i[0] for i in cur.fetchall()]\r\n\r\n\r\nnum = 0\r\ntouched = []\r\nfor i in ids:\r\n num_touched = random.random_integers(10)\r\n touched.append((num, i[0], i[1]))\r\n num += 1\r\n for t in range(num_touched - 1):\r\n touched.append((num, i[0], programmers_id[random.random_integers(len(programmers_id)-1)]))\r\n num += 1\r\n\r\ncur.executemany(\"\"\"INSERT INTO TOUCHED (ID, TRANSACTION_ID, PROGRAMMER_ID) VALUES (?,?,?)\"\"\", touched)\r\ncur.commit()\r\n\r\n\r\n\r\n\r\n\r\ncur.execute(\"\"\"ALTER TABLE TRANSACTIONS ADD (MPT DECIMAL(3,2))\"\"\")\r\nMPT = []\r\nfor i in ids:\r\n trans_id = i[0]\r\n MPT.append((random.gamma(2, 0.2), trand_id))\r\n\r\ncur.executemany(\"\"\"INSERT INTO TRANSACTIONS (MPT) VALUES (?) WHERE ID = ?\"\"\", MPT)\r\ncur.commit()\r\n\r\n\r\ntran.merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True)\r\n\r\n\r\n","sub_path":"fuzzy_adventure/hana/synthesizeData.py","file_name":"synthesizeData.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"155371958","text":"from baseRobDriver import BaseRobDriver\nfrom abbTypedef import *\n\nimport socket\n\n\nclass AbbRobDriver(BaseRobDriver):\n _socket = None\n _robInfo = None\n _robTargetInfo = None\n _onConnect = False\n\n def __init__(self):\n self._robInfo = RobInfo()\n self._robTargetInfo = RobTarget()\n\n def connect_to_rob(self, ip: str, port: int):\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if self._socket.connect_ex((ip, port)) is 0:\n self._onConnect = True\n\n def disconnect_from_rob(self):\n self._socket.close()\n self._onConnect = False\n\n @property\n def on_connect(self) -> bool:\n return self._onConnect\n\n @property\n def rob_sys_info(self) -> RobInfo:\n return self._robInfo\n\n @property\n def rob_target_info(self) -> RobTarget:\n return self._robTargetInfo\n\n def require_rob_sys_info(self):\n flag = 1\n self._socket.send(str(flag).encode('utf-8'))\n data = self._socket.recv(1024)\n data = self._socket.recv(1024)\n self._robInfo.decode(data)\n self._socket.send(\"COPY!\".encode('utf-8'))\n\n def require_rob_target_info(self):\n flag = 2\n self._socket.send(str(flag).encode('utf-8'))\n data = self._socket.recv(1024)\n\n data = self._socket.recv(1024)\n self._robTargetInfo.trans.decode(data)\n self._socket.send(\"COPY!\".encode('utf-8'))\n\n data = self._socket.recv(1024)\n self._robTargetInfo.rot.decode(data)\n self._socket.send(\"COPY!\".encode('utf-8'))\n\n data = self._socket.recv(1024)\n self._robTargetInfo.robConf.decode(data)\n self._socket.send(\"COPY!\".encode('utf-8'))\n\n data = self._socket.recv(1024)\n self._robTargetInfo.exTax.decode(data)\n self._socket.send(\"COPY!\".encode('utf-8'))\n\n def move_rob_target_to(self, target: RobTarget):\n flag = 3\n self._socket.send(str(flag).encode('utf-8'))\n data = self._socket.recv(1024)\n\n self._socket.send(target.trans.encode())\n data = self._socket.recv(1024)\n\n self._socket.send(target.rot.encode())\n data = self._socket.recv(1024)\n\n self._socket.send(target.robConf.encode())\n data = self._socket.recv(1024)\n\n self._socket.send(target.exTax.encode())\n data = self._socket.recv(1024)\n","sub_path":"src/Qt/Python/abbRobDriver.py","file_name":"abbRobDriver.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"596480627","text":"\"\"\"\n以model1为原型,新增real crop\n\"\"\"\nimport math\nimport os\nimport queue\nimport time\n\nimport keras\nfrom keras.layers import Dense, BatchNormalization, Activation\n\nimport config\nfrom util import data_loader\nfrom util import keras_util\nfrom util.keras_util import KerasModelConfig\n\nmodel_config = KerasModelConfig(k_fold_file=\"1.txt\",\n model_path=os.path.abspath(__file__),\n image_resolution=224,\n data_type=[config.DATA_TYPE_ORIGINAL],\n label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],\n label_color_augment=[0, 1, 3, 5, 6, 7, 9, 10, 11, 12],\n train_batch_size=[16, 16, 16],\n val_batch_size=256,\n predict_batch_size=256,\n epoch=[1, 4, 10],\n lr=[0.0005, 0.00005, 0.000005],\n data_visualization=True,\n tta_flip=True,\n freeze_layers=[-1, 0.6, 5])\n\n\ndef get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights=\"imagenet\"):\n base_model = keras.applications.DenseNet201(include_top=False, weights=weights,\n input_shape=model_config.image_shape, pooling=\"avg\")\n\n x = base_model.output\n x = Dense(256, use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n predictions = Dense(units=output_dim, activation='sigmoid')(x)\n model = keras.Model(inputs=base_model.input, outputs=predictions)\n\n if freeze_layers == -1:\n print(\"freeze all basic layers, lr=%f\" % lr)\n\n for layer in base_model.layers:\n layer.trainable = False\n else:\n if freeze_layers < 1:\n freeze_layers = math.floor(len(base_model.layers) * freeze_layers)\n for layer in range(freeze_layers):\n base_model.layers[layer].train_layer = False\n print(\"freeze %d basic layers, lr=%f\" % (freeze_layers, lr))\n\n model.compile(loss=\"binary_crossentropy\",\n optimizer=keras.optimizers.Adam(lr=lr))\n # model.summary()\n print(\"basic model have %d layers\" % len(base_model.layers))\n return model\n\n\ndef train():\n evaluate_queue = queue.Queue()\n evaluate_task = keras_util.EvaluateTask(evaluate_queue)\n evaluate_task.setDaemon(True)\n evaluate_task.start()\n checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)\n\n start = time.time()\n model_config.save_log(\"####### start train model\")\n\n init_stage = model_config.get_init_stage()\n model_config.save_log(\"####### init stage is %d\" % init_stage)\n\n for i in range(init_stage, len(model_config.epoch)):\n model_config.save_log(\"####### lr=%f, freeze layers=%2f epoch=%d\" % (\n model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))\n clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,\n step_size=model_config.get_steps_per_epoch(i) / 2)\n\n train_flow = data_loader.KerasGenerator(model_config=model_config,\n featurewise_center=True,\n featurewise_std_normalization=True,\n width_shift_range=0.15,\n height_shift_range=0.1,\n horizontal_flip=True,\n real_transform=True,\n rescale=1. / 256).flow_from_files(model_config.train_files, mode=\"fit\",\n target_size=model_config.image_size,\n batch_size=\n model_config.train_batch_size[i],\n shuffle=True,\n label_position=model_config.label_position)\n\n if i == 0:\n model_config.save_log(\"####### initial epoch is 0, end epoch is %d\" % model_config.epoch[i])\n model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],\n output_dim=len(model_config.label_position))\n model.fit_generator(generator=train_flow,\n steps_per_epoch=model_config.get_steps_per_epoch(i),\n epochs=model_config.epoch[i],\n workers=16,\n verbose=1,\n callbacks=[checkpoint, clr])\n else:\n model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),\n lr=model_config.lr[i], weights=None)\n\n if i == init_stage:\n model_config.save_log(\"####### load weight file: %s\" % model_config.get_weights_path(model_config.initial_epoch))\n model.load_weights(model_config.get_weights_path(model_config.initial_epoch))\n\n model_config.save_log(\"####### initial epoch is %d, end epoch is %d\" % (\n model_config.initial_epoch, model_config.epoch[i]))\n model.fit_generator(generator=train_flow,\n steps_per_epoch=model_config.get_steps_per_epoch(i),\n epochs=model_config.epoch[i],\n initial_epoch=model_config.initial_epoch,\n workers=16,\n verbose=1,\n callbacks=[checkpoint, clr])\n else:\n model_config.save_log(\"####### load weight file: %s\" % model_config.get_weights_path(model_config.epoch[i - 1]))\n model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))\n\n model_config.save_log(\n \"####### initial epoch is %d, end epoch is %d\" % (model_config.epoch[i - 1], model_config.epoch[i]))\n model.fit_generator(generator=train_flow,\n steps_per_epoch=model_config.get_steps_per_epoch(i),\n epochs=model_config.epoch[i],\n initial_epoch=model_config.epoch[i - 1],\n workers=16,\n verbose=1,\n callbacks=[checkpoint, clr])\n\n model_config.save_log(\"####### train model spend %d seconds\" % (time.time() - start))\n model_config.save_log(\"####### train model spend %d seconds average\" % ((time.time() - start) / model_config.epoch[-1]))\n\n","sub_path":"model/densenet201/model29_val3.py","file_name":"model29_val3.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109755001","text":"class Node:\n def __init__(self, l_child, r_child, data):\n self.l_child = l_child\n self.r_child = r_child\n self.data = data\n\n def __repr__(self):\n return self.data\n\n\ndef init_tree():\n d = Node(None, None, 'd')\n e = Node(None, None, 'e')\n f = Node(None, None, 'f')\n g = Node(None, None, 'g')\n b = Node(d, e, 'b')\n c = Node(f, g, 'c')\n a = Node(b, c, 'a')\n return a\n\n\ndef pre_traverse(node):\n if node is None:\n return\n print(node.data, end=' ')\n pre_traverse(node.l_child)\n pre_traverse(node.r_child)\n\n\ndef pre_traverse_loop(node):\n if node is None:\n return\n stack = []\n stack.append(node)\n while len(stack) > 0:\n n = stack.pop()\n print(n.data, end=' ')\n if n.r_child is not None:\n stack.append(n.r_child)\n if n.l_child is not None:\n stack.append(n.l_child)\n\n\ndef in_traverse(node):\n if node is None:\n return\n in_traverse(node.l_child)\n print(node.data, end=' ')\n in_traverse(node.r_child)\n\n\ndef in_traverse_loop(node):\n if node is None:\n return\n stack = []\n stack.append(node)\n traversed = []\n while len(stack) > 0:\n n = stack.pop()\n if n.l_child is not None and n not in traversed and n.l_child not in traversed:\n stack.append(n)\n stack.append(n.l_child)\n elif n.r_child is not None and n.r_child not in traversed:\n print(n.data, end=' ')\n stack.append(n.r_child)\n traversed.append(n)\n else:\n print(n.data, end=' ')\n traversed.append(n)\n\n\ndef post_traverse(node):\n if node is None:\n return\n post_traverse(node.l_child)\n post_traverse(node.r_child)\n print(node.data, end=' ')\n\n\ndef level_traverse(node):\n if node is None:\n return\n queue = []\n queue.insert(0, node)\n while len(queue) != 0:\n n = queue.pop()\n print(n, end=' ')\n if n.l_child is not None:\n queue.insert(0, n.l_child)\n if n.r_child is not None:\n queue.insert(0, n.r_child)\n pass\n pass\n\n\nif __name__ == '__main__':\n root = init_tree()\n print('-----pre_traverse----- ')\n pre_traverse(root)\n print()\n print('-----pre_traverse_loop----- ')\n pre_traverse_loop(root)\n print()\n print('-----in_traverse----- ')\n in_traverse(root)\n print()\n print('-----in_traverse_loop----- ')\n in_traverse_loop(root)\n print()\n print('-----post_traverse----- ')\n post_traverse(root)\n print()\n print('-----level_traverse----- ')\n level_traverse(root)\n","sub_path":"src/my/algorithm/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"641442202","text":"class Solution(object):\n def hammingDistance(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n bx = format(x,'b')\n by = format(y, 'b')\n if len(bx) > len(by):\n by = by.zfill(len(bx))\n else:\n bx = bx.zfill(len(by))\n count = 0\n for pos in range(0,len(by)):\n if by[pos] != bx[pos]:\n count += 1\n return count\n","sub_path":"python/hamming_distance.py","file_name":"hamming_distance.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354793401","text":"import copy\ntest_input = \"\"\"\nnop +0\nacc +1\njmp +4\nacc +3\njmp -3\nacc -99\nacc +1\njmp -4\nacc +6\n\"\"\"\n\ndef read_input8(filename: str):\n with open(filename) as f:\n return f.read()\n\n\ndef parse_input(_input):\n _dict = {}\n _input = _input.split(\"\\n\")\n _input = [i for i in _input if i]\n for index, line in enumerate(_input):\n line = line.split(\" \")\n _dict[index] = {\"op\":line[0], \"value\":line[1]}\n return _dict\n\n\ndef console_game(acc_value=0, input_dict: dict = {}):\n \"\"\"\n console game stops if keys of given dictionary is repeated\n \"\"\"\n keys_travelled = []\n counter = 0\n while counter not in keys_travelled:\n # add counter value to keys_travelled\n keys_travelled.append(counter)\n\n # add value to acc_value\n # print(acc_value, keys_travelled, input_dict[counter]['op'])\n # update the counter\n if input_dict[counter]['op'] != 'jmp':\n if input_dict[counter]['op'] == 'acc':\n acc_value += int(input_dict[counter]['value'])\n counter += 1\n else:\n counter = counter + int(input_dict[counter]['value'])\n return acc_value\n\n\ndef loop_through(acc_value=0, input_dict:dict={}):\n keys_travelled = []\n counter = 0\n final_counter = max(input_dict.keys())\n is_not_infinite_loop = False\n\n while True:\n if counter in keys_travelled:\n # print(\"infinite loop occured\")\n break\n else:\n keys_travelled.append(counter)\n # print(counter, acc_value, input_dict[counter]['op'])\n if input_dict[counter]['op'] != 'jmp':\n if input_dict[counter]['op'] == 'acc':\n acc_value += int(input_dict[counter]['value'])\n counter += 1\n else:\n counter = counter + int(input_dict[counter]['value'])\n if counter > final_counter:\n is_not_infinite_loop = True\n # print(is_not_infinite_loop)\n break\n # print(keys_travelled)\n\n if is_not_infinite_loop:\n return acc_value\n else:\n return False\n \n\n\ndef console_game_part2(input_dict: dict = {}):\n \"\"\"\n take into account all the jmp in the given input_dict\n # prepare jmp dictionary\n # loop through every jmp key and change the jmp key to nop and validate\n # if infinite loop occurs break from the loop\n # if last statement of the input is completed\n # return the acc_value\n \"\"\"\n jmp_dictionary = {key: value for key, value in input_dict.items() if value['op']=='jmp'}\n # print(jmp_dictionary)\n\n for key, value in jmp_dictionary.items():\n # deepcopy ensures input_dict is not changed\n changed_dictionary = copy.deepcopy(input_dict)\n changed_dictionary[key]['op'] = 'nop'\n\n # print(changed_dictionary)\n value = loop_through(0, changed_dictionary)\n if value is not False:\n return value\n \n\n","sub_path":"2020/problem_8.py","file_name":"problem_8.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"561432893","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport numpy as np\nimport pandas as pd\nimport os, re, glob, gc\n\nfrom sshtunnel import SSHTunnelForwarder\nimport mysql.connector\nimport pandas as pd\nimport pymorphy2\nfrom nltk.corpus import stopwords as sw\nimport os\nimport re\nimport numpy as np\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom multiprocessing import Pool\nfrom pandas.io import sql\nfrom nltk.corpus import stopwords\nfrom sqlalchemy import create_engine\nimport gc\n#import graphlab as gl\n\n\nfrom scipy.sparse import hstack\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.externals import joblib\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n\n\ndef prepare_ids(filename, path_to_save):\n \n #type_obr - 0 - всё, 1 - только процедуры, 2 - только suppliers\n tmp_id = ''\n \n if '44fz' in filename:\n save_name_contracts = '44fz_{}_id.csv'.format(filename[-20:-14])\n else:\n save_name_contracts = '223fz_{}_id.csv'.format(filename[-20:-14])\n \n \n \n with open(filename) as f:\n data = json.load(f)\n gc.collect()\n \n print('load data')\n \n \n #prepare arrays\n new_ids = np.zeros(len(data), dtype = 'object')\n guids = np.zeros(len(data), dtype = 'object')\n ids = np.zeros(len(data), dtype = 'object')\n\n for i in range(len(data)):\n new_ids[i] = ''\n guids[i] = ''\n ids[i] = ''\n\n \n \n\n #parsing\n for i, d in enumerate(data):\n \n if 'regNum' in d.keys():\n new_ids[i] = d['regNum']\n if 'lot' in d.keys():\n guids[i] = d['lot']['guid']\n if 'id' in d.keys():\n ids[i] = d['id'] \n \n\n df = pd.DataFrame(dict({'id':ids,\n 'new_id':new_ids,\n 'guids':guids\n }))\n\n df.to_csv(path_to_save+save_name_contracts, index = False, encoding = 'utf-8')\n\n del df, new_ids, ids, guids, \n gc.collect()\n\n print('end df')\n print('save file {}'.format(path_to_save+save_name_contracts))\n \n \n return 0\n\n\n\ndef prepare_proc(filename, path_to_save, type_obr = 0):\n \n #type_obr - 0 - всё, 1 - только процедуры, 2 - только suppliers\n tmp_id = ''\n \n if '44fz' in filename:\n save_name_contracts = '44fz_{}.csv'.format(filename[-20:-14])\n save_name_participate = '44fz_participate_{}.csv'.format(filename[-20:-14])\n else:\n save_name_contracts = '223fz_{}.csv'.format(filename[-20:-14])\n save_name_participate = '223fz_participate_{}.csv'.format(filename[-20:-14])\n \n \n with open(filename) as f:\n data = json.load(f)\n gc.collect()\n \n print('load data')\n \n \n #prepare arrays\n subjects = np.zeros(len(data), dtype = 'object')\n guids = np.zeros(len(data), dtype = 'object')\n ids = np.zeros(len(data), dtype = 'object')\n\n prices = np.zeros(len(data), dtype = 'float')\n currency = np.zeros(len(data), dtype = 'object')\n regions = np.zeros(len(data), dtype = 'object')\n address = np.zeros(len(data), dtype = 'object')\n dates = np.zeros(len(data), dtype = 'object')\n\n part_inn = []\n part_kpp = []\n part_id = []\n part_email = []\n part_phone = []\n part_orgname = []\n part_lastName = []\n part_firstName = []\n part_middleName = []\n\n\n for i in range(len(data)):\n subjects[i] = ''\n guids[i] = ''\n ids[i] = ''\n currency[i] = ''\n regions[i] = ''\n address[i] = ''\n dates[i] = ''\n \n \n if type_obr in [0,1]:\n \n #parsing\n for i, d in enumerate(data):\n #if 'schemaVersion' in d.keys():\n # schemaVersion = d['schemaVersion'] \n # if schemaVersion != '8.3':\n # print(f'schemaVersion = {schemaVersion:}')\n\n\n if 'regNum' in d.keys():\n ids[i] = d['regNum']\n if 'lot' in d.keys():\n subjects[i] = d['lot']['subject']\n guids[i] = d['lot']['guid']\n if 'purchaseInfo' in d.keys():\n if 'name' in d['purchaseInfo'].keys():\n subjects[i] = subjects[i]+' '+d['purchaseInfo']['name']\n if 'products' in d.keys():\n for prod in d['products']:\n if 'name' in prod.keys():\n subjects[i] = subjects[i]+' '+prod['name']\n if 'deliveryPlace' in prod:\n if 'address' in prod['deliveryPlace'].keys():\n address[i] = address[i]+' '+prod['deliveryPlace']['address']\n\n if 'price' in d.keys():\n prices[i] = d['price']\n if 'regionCode' in d.keys():\n regions[i] = d['regionCode']\n\n if 'currency' in d.keys():\n if 'code' in d['currency'].keys():\n currency[i] = d['currency']['code']\n elif 'digitalCode' in d['currency'].keys():\n currency[i] = d['currency']['digitalCode']\n\n if 'createDateTime' in d.keys():\n dates[i] = d['createDateTime']\n\n subjects[i] = re.sub('[\",.!?\\'\\s()]', ' ', subjects[i])\n address[i] = re.sub('[\",.!?\\'\\s()]', ' ', address[i])\n\n print('end cycle')\n\n\n\n\n df = pd.DataFrame(dict({'id':ids,\n 'proc_name':subjects,\n #'guids':guids,\n 'price':prices,\n 'currency':currency,\n 'region':regions,\n 'address':address,\n 'date':dates}))\n\n df.to_csv(path_to_save+save_name_contracts, index = False, encoding = 'utf-8')\n\n del df, subjects, guids, prices, currency, regions, address, dates\n gc.collect()\n\n print('end df')\n print('save file {}'.format(path_to_save+save_name_contracts))\n \n \n \n if type_obr in [0,2]:\n \n #parsing suppliers\n for i, d in enumerate(data):\n \n if 'id' in d.keys():\n tmp_id = d['regNum']\n else:\n tmp_id = ''\n \n if 'suppliers' in d.keys(): \n for sup in d['suppliers']: \n \n if 'inn' in sup.keys():\n part_inn.append(sup['inn'])\n else:\n part_inn.append('')\n\n if 'kpp' in sup.keys():\n part_kpp.append(sup['kpp'])\n else:\n part_kpp.append('') \n\n if 'contactPhone' in sup.keys():\n part_phone.append(sup['contactPhone'])\n else:\n part_phone.append('')\n\n if 'contactEMail' in sup.keys():\n part_email.append(sup['contactEMail'])\n else:\n part_email.append('')\n \n if 'organizationName' in sup.keys():\n part_orgname.append(sup['organizationName'])\n else:\n part_orgname.append('') \n \n if 'contactInfo' in sup.keys():\n cont = sup['contactInfo']\n if 'lastName' in cont.keys():\n part_lastName.append(cont['lastName'])\n else:\n part_lastName.append('')\n \n if 'middleName' in cont.keys():\n part_middleName.append(cont['middleName'])\n else:\n part_middleName.append('')\n \n if 'firstName' in cont.keys():\n part_firstName.append(cont['firstName'])\n else:\n part_firstName.append('')\n else:\n part_lastName.append('')\n part_middleName.append('')\n part_firstName.append('')\n \n part_id.append(tmp_id)\n\n\n part = pd.DataFrame(dict({'id':part_id,\n 'inn':part_inn,\n 'kpp':part_kpp,\n 'phone':part_phone,\n 'email':part_email,\n 'orgname':part_orgname,\n 'lastName':part_lastName,\n 'firstName':part_firstName,\n 'middleName':part_middleName\n }))\n\n\n\n part.to_csv(path_to_save+save_name_participate, index = False, encoding = 'utf-8')\n\n del part, part_inn, part_kpp, part_id, part_phone, part_email\n print('end part')\n\n gc.collect()\n \n \n print('save file {}'.format(path_to_save+save_name_participate))\n\n\n return 0\n\n\ndef normalize_text(filename, nrows = None):\n \n with open(filename, encoding = 'utf-8') as f:\n df = pd.read_csv(f)\n gc.collect()\n \n if nrows is not None:\n df = df.head(nrows)\n \n names = df['proc_name'].tolist()\n\n morph = pymorphy2.MorphAnalyzer()\n token_pattern = re.compile(r'(?u)\\w+')\n\n tokenize = lambda doc: list(token_pattern.findall(doc))\n m_parse_tag = lambda word: morph.parse(word)[0].tag.POS\n word_to_normal = lambda word: morph.parse(word)[0].normal_form\n \n t=[]\n for index, i in enumerate(names):\n a = tokenize(i)\n t.append(a)\n #if index%1000 == 0:\n # print(f'{index:} from {len(names):}')\n \n t_normal = []\n r = []\n i = 0\n for i in range(0, len(t)):\n for k in t[i]:\n t_normal.append(word_to_normal(k))\n r.append(t_normal)\n t_normal = []\n #if i%1000 == 0:\n # print(f'{i:} from {len(t):}')\n \n df['proc_name'] = r\n df['proc_name'] = df['proc_name'].apply(lambda x: ' '.join(x))\n \n return df\n\ndef filter_limit(data, limit_dict):\n result_filter = None\n\n for tag, limit in limit_dict.items():\n if result_filter is None:\n result_filter = data.loc[(data['preds'] == tag) & (data['p']>limit)].copy()\n else:\n result_filter = result_filter.append(data.loc[(data['preds'] == tag) & (data['p']>limit)])\n \n return result_filter\n\n\ndef tagging(filename):\n df = pd.read_csv(filename)\n df['proc_name'] = df['proc_name'].astype('str')\n \n X = keys_vectorizer.transform(df['proc_name'])\n \n pred = clf.predict_proba(X)\n pred_int = clf.predict(X)\n\n res = pd.DataFrame(pred)\n res['sphinx_id'] = df['id']\n res['maximum'] = np.max(np.array(res[res.columns[:-1]]), axis=1)\n res['preds'] = pred_int\n res.rename(columns={'maximum':'p'}, inplace=True)\n res['crm_tag'] = le.inverse_transform(res['preds'].values)\n res['proc_name'] = df['proc_name']\n\n res = res[['crm_tag','sphinx_id','proc_name','preds','p']]\n\n print('Before limiting ', res.shape)\n res = filter_limit(res, limit_dict)\n print('After limiting ', res.shape)\n \n return res[['crm_tag','sphinx_id','proc_name']]\n\n\n\n\n\n\n\n ","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379544769","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nhttps://leetcode.com/problems/find-duplicate-subtrees/description/\n\n\nGiven a binary tree, return all duplicate subtrees.\nFor each kind of duplicate subtrees,\nyou only need to return the root node of any one of them.\n\nTwo trees are duplicate if they have the same structure with same node values.\n\nExample 1:\n 1\n / \\\n 2 3\n / / \\\n 4 2 4\n /\n 4\n\nThe following are two duplicate subtrees:\n 2\n /\n 4\n\nand\n 4\n\nTherefore, you need to return above trees' root in the form of a list.\n\n注意!!\ninorder 才能構成獨一無二的 signature 嗎?\n錯! 只要 root node 也 encode 即可, preorder 也行!\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def findDuplicateSubtrees(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[TreeNode]\n Look for signature!\n \"\"\"\n\n from collections import defaultdict as dd\n resultDict= dd(None)\n keySet = set()\n\n def encode(root):\n if not root:\n return \"#\"\n\n code = []\n code.append(str(root.val))\n\n code.append(encode(root.left))\n code.append(encode(root.right))\n\n enc = \"\".join(code)\n if enc in keySet:\n resultDict[enc] = root\n\n keySet.add(enc)\n\n return enc\n\n\n encode(root)\n return resultDict.values()\n\n\ndef build():\n \"\"\"\n 1\n / \\\n 2 3\n / / \\\n 4 2 4\n /\n 4\n \"\"\"\n # root = TreeNode(1)\n # root.right = TreeNode(3)\n # root.left = TreeNode(1)\n # root.left.right = TreeNode(3)\n # return root\n\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.left.left = TreeNode(4)\n root.right = TreeNode(3)\n root.right.right = TreeNode(4)\n root.right.left = TreeNode(2)\n root.right.left.left = TreeNode(4)\n return root\n\ndef pp(node):\n if not node:\n return\n print(node.val)\n pp(node.left)\n pp(node.right)\n\n\nif __name__ == \"__main__\":\n s = Solution()\n [pp(l) for l in s.findDuplicateSubtrees(build())]\n","sub_path":"co_ms/652_Find_Duplicate_Subtrees.py","file_name":"652_Find_Duplicate_Subtrees.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102622162","text":"import matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nfrom matplotlib.font_manager import FontProperties\r\nimport numpy as np\r\nimport os\r\n\r\n# Control variables: where C4 default is 0.5cm and 64 azimuthal angle\r\ngeometries = ['xml-sample/geometry_1810_1_8.xml',\r\n 'xml-sample/geometry_1810_2_8.xml',\r\n 'xml-sample/geometry_1810_3_8.xml', \r\n 'xml-sample/geometry_1810_4_8.xml',\r\n 'xml-sample/geometry_1810_5_8.xml',\r\n 'xml-sample/geometry_1810_10_8.xml',\r\n 'xml-sample/geometry_1810_12_8.xml']\r\n\r\nmaterials = ['xml-sample/material_1810_1_8g.xml',\r\n 'xml-sample/material_1810_2_8g.xml',\r\n 'xml-sample/material_1810_3_8g.xml',\r\n 'xml-sample/material_1810_4_8g.xml', \r\n 'xml-sample/material_1810_5_8g.xml',\r\n 'xml-sample/material_1810_10_8g.xml',\r\n 'xml-sample/material_1810_12_8g.xml']\r\n\r\nts = [0.4] \r\nna = [16]\r\nfc = [1e-5]\r\n\r\n# Parameters for plotting\r\nls = ['--o', '-s', '-.v', '-<', '-^', '-.>', '--s', '-v', '-o', '-.<', '--<', '-->', '-.s']\r\nfontP = FontProperties()\r\nfontP.set_size('small')\r\nmax_num_lines = 0\r\nnum = 1;\r\ncounter = 0;\r\n\r\nfor i, geometry in enumerate(geometries):\r\n\r\n # Generates the geometry names\r\n geometry_no_slash = geometry.replace(\"/\", \"_\")\r\n geometry_name = geometry_no_slash[20:-4]\r\n\r\n # Runs OpenMOC\r\n for spacing in ts:\r\n for angle in na:\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry \r\n + ' -na ' + str(angle) \r\n + ' -ts ' + str(spacing) \r\n + ' -fc ' + str(fc[0]) \r\n + ' -wc -nlp')\r\n\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry\r\n + ' -na ' + str(angle)\r\n + ' -ts ' + str(spacing)\r\n + ' -fc ' + str(fc[0])\r\n + ' -wc -lp')\r\n\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry \r\n + ' -na ' + str(angle) \r\n + ' -ts ' + str(spacing) \r\n + ' -fc ' + str(fc[0]) \r\n + ' -wc -lp -dc')\r\n\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry \r\n + ' -na ' + str(angle) \r\n + ' -ts ' + str(spacing) \r\n + ' -fc ' + str(fc[0]) \r\n + ' -wl1 -nlp')\r\n\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry\r\n + ' -na ' + str(angle)\r\n + ' -ts ' + str(spacing)\r\n + ' -fc ' + str(fc[0])\r\n + ' -wl1 -lp')\r\n\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry\r\n + ' -na ' + str(angle)\r\n + ' -ts ' + str(spacing)\r\n + ' -fc ' + str(fc[0])\r\n + ' -wl2 -nlp')\r\n\r\n os.system('cd .. && ./bin/openmoc'\r\n + ' -m ' + materials[i]\r\n + ' -g ' + geometry \r\n + ' -na ' + str(angle) \r\n + ' -ts ' + str(spacing) \r\n + ' -fc ' + str(fc[0]) \r\n + ' -wl2 -lp')\r\n\r\n l2_norm_files = []\r\n\r\n # Obtain and sorts l2_norm file names in directory\r\n for file in os.listdir(\"../\"):\r\n if file.startswith(\"%s_%s_%s\"%(geometry_no_slash, na[0], ts[0])) and file.endswith(\".txt\"):\r\n l2_norm_files.append(file)\r\n num = num + 1\r\n l2_norm_files.sort()\r\n \r\n counter = 0;\r\n # parse output files\r\n for file in l2_norm_files:\r\n counter = counter + 1\r\n logfile = open('../'+file, \"r\").readlines()\r\n \r\n method = file[-8:-4]\r\n upscat = file[-15:-9]\r\n update = file[-22:-16]\r\n damp = file[-26:-23]\r\n bi = file[-28:-27]\r\n\r\n print(\"geometry = %s, ts = %s, na = %s, upscat = %s\"%(geometry_name, ts, na, upscat))\r\n\r\n # find number of lines in file\r\n for num_lines, l in enumerate(logfile):\r\n pass\r\n\r\n max_num_lines = max(num_lines, max_num_lines)\r\n\r\n # create numpy arrays\r\n iteration = np.zeros(num_lines)\r\n fsr_l2 = np.zeros(num_lines)\r\n\r\n # collect data together\r\n for k, line in enumerate(logfile):\r\n if k is not 0:\r\n iteration[k-1] = line.split()[0]\r\n fsr_l2[k-1] = line.split()[1]\r\n\r\n var = []\r\n var.append(fsr_l2);\r\n\r\n # plotting :)\r\n for j in range(1): \r\n plt.figure(j)\r\n plt.semilogy(iteration, \r\n var[j], \r\n ls[counter - 1],\r\n color=cm.jet(1.*counter / num),\r\n label = (\"%s %s\"%(method, upscat)), markersize=5)\r\n plt.xlim(0, max_num_lines + 1)\r\n plt.legend(loc='upper center', ncol=3, prop = fontP, shadow=True, \r\n bbox_to_anchor=(0.5,-0.1),fancybox=True)\r\n # end of one file\r\n\r\n # save figure including different configuration of the same geometries.\r\n plt.figure(0)\r\n plt.xlabel('# MOC iteration')\r\n plt.ylabel('L2 Norm on Cell Fission Source Relative Change')\r\n plt.title('Geometry: %s,'%(geometry_name) + ' spacing: %s,'%str(ts[0]) \r\n + ' #angles: %s'%str(na[0]))\r\n plt.savefig(geometry_name + '_l2.png', bbox_inches='tight')\r\n plt.clf()\r\n","sub_path":"Debug/runc5g7_8g.py","file_name":"runc5g7_8g.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15914111","text":"#! /usr/bin/env python\nimport re\nimport os\n\n\nclass Directories:\n root_dir = ''\n files_with_changes = []\n directories = []\n\n def __init__(self, files_with_changes, root_dir=''):\n self.root_dir = root_dir\n\n self.set_only_files(files_with_changes)\n\n # En caso que haya directorios en files_with_changes,\n # extrae los archivos y remueve esos elementos (directorios)\n def set_only_files(self, files_with_changes=None):\n for key in files_with_changes:\n self.process_directory(re.sub(r\"\" + os.sep + '$', '', key))\n\n # Retorna una ruta completa del archivo dado\n def full_path(self, path=''):\n return self.root_dir + os.sep + path\n\n # Procesa directorio y subdirectorios en busca de archivos\n # Se incluyen los archivos a la lista files_with_changes\n def process_directory(self, file=''):\n is_dir = 0\n\n for root, dirs, files in os.walk(file):\n is_dir = 1\n for dir in dirs:\n self.process_directory(file + os.sep + dir)\n\n for archivo in files:\n if os.path.exists(file + os.sep + archivo):\n self.files_with_changes.append(file + os.sep + archivo)\n\n root, ext = os.path.splitext(file)\n if is_dir == 0 and ext and os.path.exists(file):\n self.files_with_changes.append(file)\n\n return\n\n def set_directories_list(self):\n for arch in self.files_with_changes:\n dirs = arch.split(os.sep)\n dir_acum = ''\n count_aux = 0\n\n for dir in dirs:\n count_aux += 1\n\n dir_acum = dir_acum + os.sep + dir\n if count_aux == len(dirs):\n if not self.file_exists_in_directories(dir_acum):\n self.directories.append(dir_acum)\n else:\n if not self.file_exists_in_directories(dir_acum + os.sep):\n self.directories.append(dir_acum + os.sep)\n\n def file_exists_in_directories(self, path=''):\n if path in self.directories:\n return True\n else:\n return False\n\n def print_directories_list(self):\n print('')\n for directory in self.directories:\n print(directory)\n","sub_path":"app/tools/Directories.py","file_name":"Directories.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"120383009","text":"# pylint: disable=E1101,E1103\n\nfrom datetime import datetime\nimport operator\n\nimport numpy as np\n\nfrom pandas.core.index import Index\nimport pandas.core.datetools as datetools\n\n__all__ = ['DateRange']\n\n#-------------------------------------------------------------------------------\n# DateRange class\n\ndef _bin_op(op):\n def f(self, other):\n return op(self.view(np.ndarray), other)\n\n return f\n\n_CACHE_START = datetime(1950, 1, 1)\n_CACHE_END = datetime(2030, 1, 1)\n\nclass DateRange(Index):\n \"\"\"\n Fixed frequency date range according to input parameters.\n\n Input dates satisfy:\n begin <= d <= end, where d lies on the given offset\n\n Parameters\n ----------\n start : {datetime, None}\n left boundary for range\n end : {datetime, None}\n right boundary for range\n periods : int\n Number of periods to generate.\n offset : DateOffset, default is 1 BusinessDay\n Used to determine the dates returned\n timeRule : timeRule to use\n \"\"\"\n _cache = {}\n def __new__(cls, start=None, end=None, periods=None,\n offset=datetools.bday, timeRule=None, **kwds):\n\n # Allow us to circumvent hitting the cache\n index = kwds.get('index')\n if index is None:\n if timeRule is not None:\n offset = datetools.getOffset(timeRule)\n\n if timeRule is None:\n if offset in datetools._offsetNames:\n timeRule = datetools._offsetNames[offset]\n\n # Cachable\n if not start:\n start = kwds.get('begin')\n if not end:\n end = kwds.get('end')\n if not periods:\n periods = kwds.get('nPeriods')\n\n start = datetools.to_datetime(start)\n end = datetools.to_datetime(end)\n\n # inside cache range\n fromInside = start is not None and start > _CACHE_START\n toInside = end is not None and end < _CACHE_END\n\n useCache = fromInside and toInside\n\n if (useCache and offset.isAnchored() and\n isinstance(offset, datetools.CacheableOffset)):\n\n index = cls._cached_range(start, end, periods=periods,\n offset=offset, timeRule=timeRule)\n\n else:\n xdr = generate_range(start=start, end=end, periods=periods,\n offset=offset, timeRule=timeRule)\n\n index = np.array(list(xdr), dtype=object, copy=False)\n\n index = index.view(cls)\n index.offset = offset\n else:\n index = index.view(cls)\n\n return index\n\n def __reduce__(self):\n \"\"\"Necessary for making this object picklable\"\"\"\n a, b, state = Index.__reduce__(self)\n aug_state = state, self.offset\n\n return a, b, aug_state\n\n def __setstate__(self, aug_state):\n \"\"\"Necessary for making this object picklable\"\"\"\n state, offset = aug_state[:-1], aug_state[-1]\n\n self.offset = offset\n Index.__setstate__(self, *state)\n\n @property\n def _allDates(self):\n return True\n\n @classmethod\n def _cached_range(cls, start=None, end=None, periods=None, offset=None,\n timeRule=None):\n\n # HACK: fix this dependency later\n if timeRule is not None:\n offset = datetools.getOffset(timeRule)\n\n if offset is None:\n raise Exception('Must provide a DateOffset!')\n\n if offset not in cls._cache:\n xdr = generate_range(_CACHE_START, _CACHE_END, offset=offset)\n arr = np.array(list(xdr), dtype=object, copy=False)\n\n cachedRange = DateRange.fromIndex(arr)\n cachedRange.offset = offset\n\n cls._cache[offset] = cachedRange\n else:\n cachedRange = cls._cache[offset]\n\n if start is None:\n if end is None:\n raise Exception('Must provide start or end date!')\n if periods is None:\n raise Exception('Must provide number of periods!')\n\n assert(isinstance(end, datetime))\n\n end = offset.rollback(end)\n\n endLoc = cachedRange.indexMap[end] + 1\n startLoc = endLoc - periods\n elif end is None:\n assert(isinstance(start, datetime))\n start = offset.rollforward(start)\n\n startLoc = cachedRange.indexMap[start]\n if periods is None:\n raise Exception('Must provide number of periods!')\n\n endLoc = startLoc + periods\n else:\n start = offset.rollforward(start)\n end = offset.rollback(end)\n\n startLoc = cachedRange.indexMap[start]\n endLoc = cachedRange.indexMap[end] + 1\n\n indexSlice = cachedRange[startLoc:endLoc]\n\n return indexSlice\n\n @classmethod\n def fromIndex(cls, index):\n index = cls(index=index)\n return index\n\n def __array_finalize__(self, obj):\n if self.ndim == 0: # pragma: no cover\n return self.item()\n\n self.offset = getattr(obj, 'offset', None)\n\n __lt__ = _bin_op(operator.lt)\n __le__ = _bin_op(operator.le)\n __gt__ = _bin_op(operator.gt)\n __ge__ = _bin_op(operator.ge)\n __eq__ = _bin_op(operator.eq)\n\n def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))\n\n def __getitem__(self, key):\n \"\"\"Override numpy.ndarray's __getitem__ method to work as desired\"\"\"\n result = self.view(np.ndarray)[key]\n\n if isinstance(key, (int, np.integer)):\n return result\n elif isinstance(key, slice):\n newIndex = result.view(DateRange)\n\n if key.step is not None:\n newIndex.offset = key.step * self.offset\n else:\n newIndex.offset = self.offset\n\n return newIndex\n else:\n return Index(result)\n\n def __repr__(self):\n output = str(self.__class__) + '\\n'\n output += 'offset: %s\\n' % self.offset\n output += '[%s, ..., %s]\\n' % (self[0], self[-1])\n output += 'length: %d' % len(self)\n return output\n\n __str__ = __repr__\n\n def shift(self, n, offset=None):\n \"\"\"\n Specialized shift which produces a DateRange\n\n Parameters\n ----------\n n : int\n Periods to shift by\n offset : DateOffset or timedelta-like, optional\n\n Returns\n -------\n shifted : DateRange\n \"\"\"\n if offset is not None and offset != self.offset:\n return Index.shift(self, n, offset)\n\n if n == 0:\n # immutable so OK\n return self\n\n start = self[0] + n * self.offset\n end = self[-1] + n * self.offset\n return DateRange(start, end, offset=self.offset)\n\n def union(self, other):\n \"\"\"\n Specialized union for DateRange objects. If combine\n overlapping ranges with the same DateOffset, will be much\n faster than Index.union\n\n Parameters\n ----------\n other : DateRange or array-like\n\n Returns\n -------\n y : Index or DateRange\n \"\"\"\n if not isinstance(other, DateRange) or other.offset != self.offset:\n return Index.union(self.view(Index), other)\n\n offset = self.offset\n\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n left_start, left_end = left[0], left[-1]\n right_start, right_end = right[0], right[-1]\n\n # Only need to \"adjoin\", not overlap\n if (left_end + offset) >= right_start:\n return DateRange(left_start, max(left_end, right_end),\n offset=offset)\n else:\n return Index.union(self, other)\n\ndef generate_range(start=None, end=None, periods=None,\n offset=datetools.BDay(), timeRule=None):\n \"\"\"\n Generates a sequence of dates corresponding to the specified time\n offset. Similar to dateutil.rrule except uses pandas DateOffset\n objects to represent time increments\n\n Parameters\n ----------\n start : datetime (default None)\n end : datetime (default None)\n periods : int, optional\n\n Note\n ----\n * This method is faster for generating weekdays than dateutil.rrule\n * At least two of (start, end, periods) must be specified.\n * If both start and end are specified, the returned dates will\n satisfy start <= date <= end.\n\n Returns\n -------\n dates : generator object\n\n See also\n --------\n DateRange, dateutil.rrule\n \"\"\"\n\n if timeRule is not None:\n offset = datetools.getOffset(timeRule)\n\n if timeRule is None:\n if offset in datetools._offsetNames:\n timeRule = datetools._offsetNames[offset]\n\n start = datetools.to_datetime(start)\n end = datetools.to_datetime(end)\n\n if start and not offset.onOffset(start):\n start = offset.rollforward(start)\n\n if end and not offset.onOffset(end):\n end = offset.rollback(end)\n\n if periods is None and end < start:\n end = None\n periods = 0\n\n if end is None:\n end = start + (periods - 1) * offset\n\n if start is None:\n start = end - (periods - 1) * offset\n\n cur = start\n if offset._normalizeFirst:\n cur = datetools.normalize_date(cur)\n\n while cur <= end:\n yield cur\n\n # faster than cur + offset\n cur = offset.apply(cur)\n","sub_path":"pandas/core/daterange.py","file_name":"daterange.py","file_ext":"py","file_size_in_byte":9583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356695684","text":"\"\"\"This is the shell module.\n\n\"\"\"\nimport subprocess\nimport re\n\n\n\n\nclass Shell:\n \"\"\"Class that encapsulates a runner for simple shell commands \n\n Description:\n\n This class analyzes a valid shell commands, executes and returns output and error values\n using 'subprocess' module.\n\n Example:\n \n # Import Shell class\n from utility.shell.runner import Shell\n\n # Instance\n shell = Shell()\n\n # Executing a simple command \n (code, out, error) = shell.run('ls -l')\n\n # Executing multiple commands using | or &&\n (code, out, error) = shell.run('ls -l |grep spark') # It will raise a RunnerException\n \n shell = Shell(shellFlag=True) # WARNING - You MUST trust your inputs set turn on this flag!\n\n (code, out, error) = shell.run('ls -l |grep spark') # This works!\n\n (code, out, error) = shell.run('lss -l |grep spark') # This works but returns a error that \n you must treat using 'error' variable\n\n Atributtes:\n\n _errorCode: \n\n _output:\n\n _rawError:\n\n _alowMultiCmd: (bool) : WARNING - be carefuly with this option. It is extreamly insecure if you don't know what you doing!\n\n \"\"\"\n _errorCode = 0\n _output = ''\n _rawError = ''\n _allowMultiCmd = False\n\n\n def __init__(self, allowMultiCmd=False):\n \"\"\"Constructor\n\n \"\"\"\n self._setAllowMultiCmd(allowMultiCmd)\n\n\n def _setAllowMultiCmd(self, flag=False):\n \"\"\"Private method to change the _allowMultiCmd flag\n\n \"\"\"\n self._allowMultiCmd = flag\n\n\n def run(self,argsStr=''):\n \"\"\"Runs a shell command using subprocess module and returns results in a List that contains:\n\n \"\"\"\n # Normalizing spaces\n argsStr = re.sub(r'\\s{2,}',' ',argsStr)\n argsStr = re.sub(r'^\\s+','',argsStr)\n argsStr = re.sub(r'\\s+$','',argsStr)\n \n # Building args as a list\n argsList = re.split(r'\\s+',argsStr)\n\n \"\"\"Trying to execute a shell command. If some Peopen problem occurs, a exception will raise. But, \n if the error came from stderr of the S.O., the error will be included on _rawError and it will \n return a list with status code, stdout and stderr. \n \"\"\"\n try:\n proc = subprocess.Popen(argsList, stdout=subprocess.PIPE, \n stderr=subprocess.PIPE, \n shell=self._allowMultiCmd )\n output, errStr = proc.communicate()\n statusCode = proc.returncode\n except Exception as e:\n errStr = str(e)\n raise RunnerException(e)\n\n self._output = output\n if(errStr):\n self._rawError = errStr\n\n return statusCode, output, errStr \n\n\n# Exception classes\n\nclass RunnerException(Exception):\n def __init__(self, *args, **kwargs):\n Exception.__init__(self, *args, **kwargs)\n\n","sub_path":"mlbase/utility/shell/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637110966","text":"import json, pytest\nfrom mock import patch\n\nclass FakeResponse(object):\n # default response attributes\n status_code = 200\n def json(self):\n return {\n \"posts\": [\n { \"title\": 'R.I.P. Ruby on Rails. Thanks for everything.' }, \n {\"title\": 'The fraud behind the C.A. of digital certificates is over'}, \n {\"title\": 'Manteniendo las raices'}, \n {\"title\":'DF Tech Meetup, ya es una realidad'}\n ]\n }\n\n@pytest.mark.it(\"Make sure you are printing the author name of the FIRST post\")\ndef test_array_content(app):\n with patch('requests.get') as mock_request:\n from app import get_titles\n mock_request.return_value = FakeResponse()\n titles = ['R.I.P. Ruby on Rails. Thanks for everything.', 'The fraud behind the C.A. of digital certificates is over', 'Manteniendo las raices', 'DF Tech Meetup, ya es una realidad']\n assert titles == get_titles()\n","sub_path":"exercises/09-array-of-blog-titles/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"324782234","text":"# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nfrom pants.backend.python.lint.isort.subsystem import Isort\nfrom pants.backend.python.lint.python_fmt import PythonFmtRequest\nfrom pants.backend.python.target_types import PythonSources\nfrom pants.backend.python.util_rules import pex\nfrom pants.backend.python.util_rules.pex import (\n PexInterpreterConstraints,\n PexRequest,\n PexRequirements,\n VenvPex,\n VenvPexProcess,\n)\nfrom pants.core.goals.fmt import FmtResult\nfrom pants.core.goals.lint import LintRequest, LintResult, LintResults\nfrom pants.core.util_rules import stripped_source_files\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.engine.fs import (\n Digest,\n GlobExpansionConjunction,\n GlobMatchErrorBehavior,\n MergeDigests,\n PathGlobs,\n)\nfrom pants.engine.process import FallibleProcessResult, Process, ProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import FieldSet\nfrom pants.engine.unions import UnionRule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\n@dataclass(frozen=True)\nclass IsortFieldSet(FieldSet):\n required_fields = (PythonSources,)\n\n sources: PythonSources\n\n\nclass IsortRequest(PythonFmtRequest, LintRequest):\n field_set_type = IsortFieldSet\n\n\n@dataclass(frozen=True)\nclass SetupRequest:\n request: IsortRequest\n check_only: bool\n\n\n@dataclass(frozen=True)\nclass Setup:\n process: Process\n original_digest: Digest\n\n\ndef generate_args(*, source_files: SourceFiles, isort: Isort, check_only: bool) -> Tuple[str, ...]:\n # NB: isort auto-discovers config files. There is no way to hardcode them via command line\n # flags. So long as the files are in the Pex's input files, isort will use the config.\n args = []\n if check_only:\n args.append(\"--check-only\")\n args.extend(isort.args)\n args.extend(source_files.files)\n return tuple(args)\n\n\n@rule(level=LogLevel.DEBUG)\nasync def setup_isort(setup_request: SetupRequest, isort: Isort) -> Setup:\n isort_pex_request = Get(\n VenvPex,\n PexRequest(\n output_filename=\"isort.pex\",\n internal_only=True,\n requirements=PexRequirements(isort.all_requirements),\n interpreter_constraints=PexInterpreterConstraints(isort.interpreter_constraints),\n main=isort.main,\n ),\n )\n\n config_digest_request = Get(\n Digest,\n PathGlobs(\n globs=isort.config,\n glob_match_error_behavior=GlobMatchErrorBehavior.error,\n conjunction=GlobExpansionConjunction.all_match,\n description_of_origin=\"the option `--isort-config`\",\n ),\n )\n\n source_files_request = Get(\n SourceFiles,\n SourceFilesRequest(field_set.sources for field_set in setup_request.request.field_sets),\n )\n\n source_files, isort_pex, config_digest = await MultiGet(\n source_files_request, isort_pex_request, config_digest_request\n )\n source_files_snapshot = (\n source_files.snapshot\n if setup_request.request.prior_formatter_result is None\n else setup_request.request.prior_formatter_result\n )\n\n input_digest = await Get(Digest, MergeDigests((source_files_snapshot.digest, config_digest)))\n\n process = await Get(\n Process,\n VenvPexProcess(\n isort_pex,\n argv=generate_args(\n source_files=source_files, isort=isort, check_only=setup_request.check_only\n ),\n input_digest=input_digest,\n output_files=source_files_snapshot.files,\n description=f\"Run isort on {pluralize(len(setup_request.request.field_sets), 'file')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n return Setup(process, original_digest=source_files_snapshot.digest)\n\n\n@rule(desc=\"Format with isort\", level=LogLevel.DEBUG)\nasync def isort_fmt(request: IsortRequest, isort: Isort) -> FmtResult:\n if isort.skip:\n return FmtResult.skip(formatter_name=\"isort\")\n setup = await Get(Setup, SetupRequest(request, check_only=False))\n result = await Get(ProcessResult, Process, setup.process)\n return FmtResult.from_process_result(\n result,\n original_digest=setup.original_digest,\n formatter_name=\"isort\",\n strip_chroot_path=True,\n )\n\n\n@rule(desc=\"Lint with isort\", level=LogLevel.DEBUG)\nasync def isort_lint(request: IsortRequest, isort: Isort) -> LintResults:\n if isort.skip:\n return LintResults([], linter_name=\"isort\")\n setup = await Get(Setup, SetupRequest(request, check_only=True))\n result = await Get(FallibleProcessResult, Process, setup.process)\n return LintResults(\n [LintResult.from_fallible_process_result(result, strip_chroot_path=True)],\n linter_name=\"isort\",\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(PythonFmtRequest, IsortRequest),\n UnionRule(LintRequest, IsortRequest),\n *pex.rules(),\n *stripped_source_files.rules(),\n ]\n","sub_path":"src/python/pants/backend/python/lint/isort/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"9427822","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2009-2011 Bart Ogryczak\n# Copyright (C) 2012 Ryan J Ollos <ryan.j.ollos@gmail.com>\n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n\nimport sys, traceback\n\nHARD_DEADLINE_FIELD = 'hard_deadline1'\nIMPACT_FIELD = 'impact'\nNO_BACKLOG = 'no backlog'\n\nclass BacklogException(Exception): pass\n\nclass Backlog(object):\n \"Class representing an Backlog\"\n\n def __init__(self, env, id=None, name=None):\n \"\"\"\n Constructor\n @param env: Trac environment\n @param id: numeric id of the backlog\n @param name: textual name of the backlog\n \n Retrieves backlog either by id or name;\n Initializes empty object is nither is set.\n \"\"\"\n self.env = env\n self.db = self.env.get_db_cnx()\n\n if name == NO_BACKLOG: name = None\n if id:\n self._fetch_by_id(id)\n elif name:\n self._fetch_by_name(name)\n\n def create(self, name):\n \"\"\"\n Creates new backlog entry in DB.\n @param name: textual name of the backlog \n \"\"\"\n if not name or not name.strip():\n raise BacklogException(\"Backlog needs to have non-empty name\")\n\n self.name = name\n self.id = self._get_free_id()\n if(not self.id): return None\n return self._create()\n\n def _create(self):\n \"Performs actual insert in DB\"\n try:\n cursor = self.db.cursor()\n id = self._get_free_id()\n sql = \"INSERT INTO backlog (id, name) VALUES (%s, %s)\"\n cursor.execute(sql, (self.id, self.name,))\n self.db.commit()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to create new backlog\")\n\n return self\n\n def _get_free_id(self):\n \"\"\"\n Auto-increment emulation, as some DBs don't have it.\n \"\"\"\n try:\n cursor = self.db.cursor()\n sql = \"SELECT max(id) FROM backlog\"\n cursor.execute(sql) \n id = cursor.fetchone()[0] + 1\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to obtain next free id\") \n return id\n \n def _fetch_by_id(self, id):\n \"\"\"\n Retrieves backlog data by id\n @param id: numeric id of the backlog \n \"\"\"\n assert id, 'id not set'\n self.id = int(id)\n try:\n cursor = self.db.cursor()\n sql = \"SELECT name, owner, description FROM backlog WHERE id = %s\"\n cursor.execute(sql, (self.id,))\n self.name, self.owner, self.description = cursor.fetchone()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to retrieve backlog with id=%s\" % self.id) \n \n def _fetch_by_name(self, name):\n \"\"\"\n Retrieves backlog data by name\n @param name: textual name of the backlog \n \"\"\"\n try:\n cursor = self.db.cursor()\n sql = \"SELECT id FROM backlog WHERE name = %s\" \n cursor.execute(sql, (name,))\n self.id = cursor.fetchone()[0] \n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to retrieve backlog with name=%s\" % name)\n self._fetch_by_id(self.id)\n \n def get_tickets(self, all_in_one=False):\n assert self.id, 'id not set'\n \"\"\"\n Retrieves relevant data for tickets in backlog\n By default returns two list: prioritized and unprioritized tickets \n @param all_in_one: should all tickets be returned in one list \n \"\"\"\n try:\n cursor = self.db.cursor()\n #get name\n columns = ['id', 'summary', 'component', 'description', 'version', 'type', 'milestone', 'owner', 'status', 'time', 'tkt_order', 'keywords']\n sql = \"\"\"SELECT %s,tc.value as hard_deadline, tc2.value as impact\n FROM backlog_ticket b, ticket t \n LEFT OUTER JOIN ticket_custom tc \n ON t.id = tc.ticket\n AND tc.name = '%s'\n LEFT OUTER JOIN ticket_custom tc2 \n ON t.id = tc2.ticket\n AND tc2.name = '%s'\n WHERE t.id = b.tkt_id \n AND b.bklg_id = %%s \n AND (b.tkt_order IS NULL OR b.tkt_order > -1)\n ORDER BY b.tkt_order, t.time DESC\"\"\" % (','.join(columns), HARD_DEADLINE_FIELD, IMPACT_FIELD)\n columns.extend(('hard_deadline', 'impact'))\n self.env.log.info('GET_TICKETS sql = \"\"\"%s\"\"\"' % sql) \n cursor.execute(sql, (self.id,)) \n all_tickets = [dict(zip(columns, ticket)) for ticket in cursor] #creating list of column:value dictionaries\n #self.env.log.info('ALL_TICKETS = %s'%all_tickets)\n \n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to retrieve ticket data for backlog %s\" % self.name) \n\n if all_in_one:\n return all_tickets\n\n #splitting ordered and unordered\n ordered_tickets, unordered_tickets = [], []\n for ticket in all_tickets:\n if ticket['tkt_order'] is not None:\n ordered_tickets.append(ticket)\n else:\n unordered_tickets.append(ticket)\n return ordered_tickets, unordered_tickets\n \n def set_ticket_order(self, order):\n \"\"\"\n saves ticket priorities in the DB\n @param order: sequence of ticket IDs \n \"\"\"\n assert self.id, 'id not set'\n order = [ (self.id, int(tkt_id), tkt_order + 1) for (tkt_order, tkt_id) in enumerate(order)] \n # print 'order', order \n try:\n cursor = self.db.cursor() \n cursor.executemany('REPLACE INTO backlog_ticket (bklg_id, tkt_id, tkt_order) VALUES (%s, %s, %s)', order)\n self.db.commit()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to save order for backlog %s\" % self.name) \n \n def add_ticket(self, tkt_id):\n \"\"\" \n adds the ticket to this backlog, also removing it from previous backlog if any\n @param tkt_id: ticket's ID \n \"\"\"\n try:\n cursor = self.db.cursor() \n cursor.execute('DELETE FROM backlog_ticket WHERE tkt_id = %s', (tkt_id,)) \n cursor.execute('INSERT INTO backlog_ticket (bklg_id, tkt_id) VALUES (%s, %s)', (self.id, tkt_id))\n self.db.commit()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to add ticket %s to backlog %s\" % (tkt_id, self.name)) \n\n def reset_priority(self, tkt_id, only_if_deleted=False):\n \"\"\"\n resets the ticket's priority to NULL (unordered)\n @param tkt_id: ID or sequence of IDs of ticket(s)\n @param only_if_deleted: reset the priority only if ticket was deleted as closed (archived)\n \"\"\"\n try:\n cursor = self.db.cursor()\n sql = 'UPDATE backlog_ticket SET tkt_order = NULL WHERE tkt_id = %s'\n if(only_if_deleted):\n sql += ' AND tkt_order = -1' \n try:\n tkt_ids = [(id,) for id in tkt_id] # trying to iterate to see if it's a list\n cursor.executemany(sql, tkt_ids)\n except TypeError: #single id\n cursor.execute(sql, (tkt_id,)) \n self.db.commit()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to reset priority for ticket(s) %s \" % (tkt_id,)) \n \n def delete_ticket(self, tkt_id):\n \"\"\" \n removes ticket from this backlog\n @param tkt_id: ID of ticket\n \"\"\"\n if(not getattr(self, 'id')): \n self.env.log.warn('trying to delete ticket from uninitialized backlog')\n return\n try:\n cursor = self.db.cursor() \n cursor.execute('DELETE FROM backlog_ticket WHERE bklg_id = %s AND tkt_id = %s', (self.id, tkt_id)) \n self.db.commit()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to delete ticket %s from backlog\" % (tkt_id,)) \n\n def remove_closed_tickets(self):\n \"\"\"\n hides (archives) all closed tickets in the current backlog\n \"\"\"\n assert self.id, 'id not set'\n sql = \"\"\"UPDATE backlog_ticket SET tkt_order = -1\n WHERE bklg_id = %s\n AND tkt_id IN (SELECT id FROM ticket\n WHERE status = 'closed')\"\"\"\n try:\n cursor = self.db.cursor()\n cursor.execute(sql, (self.id,))\n self.db.commit()\n except:\n self.env.log.error(traceback.format_exc())\n raise BacklogException(\"Failed to clean up closed tickets in backlog %s\" % (tkt_id, self.name))\n \n def name2perm(self):\n \"\"\"\n creates string appropriate for Trac's permission system from current backlog's name \n \"\"\"\n import re\n return re.sub('[^A-Z0-9]', '_', self.name.upper())\n\n \nclass BacklogList(object):\n \"Class representing a sequence of all backlogs available in Trac\"\n\n def __init__(self, env):\n \"\"\"Initializes object with data fetched from the DB\n @param env: Trac environment\n \"\"\"\n self.env = env\n try:\n db = env.get_db_cnx()\n cursor = db.cursor()\n cursor.execute(\"SELECT id FROM backlog ORDER BY id\")\n self.backlogs = (Backlog(env, row[0]) for row in cursor)\n except:\n self.env.log.error(traceback.format_exc())\n self.backlogs = ()\n\n def __iter__(self):\n \"Returns iterator\"\n return self.backlogs.__iter__()\n\n","sub_path":"backlogplugin/trunk/backlog/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471170595","text":"from os import environ\n\nfrom freshen import *\n\nfrom embedly.client import Embedly\n\n@Given(\"an embedly endpoint( [^\\s]+)?( with key)?$\")\ndef init_api(domain, key_enabled):\n opts = {}\n\n if domain:\n opts['domain'] = domain\n\n if key_enabled:\n if not environ.get('EMBEDLY_KEY'):\n raise RuntimeError('Please set env variable $EMBEDLY_KEY')\n opts['key'] = environ[\"EMBEDLY_KEY\"]\n\n scc.api = Embedly(**opts)\n\n@When(\"(\\w+) is called with the (.*) URLs?( and ([^\\s]+) flag)?$\")\ndef call_urls(method, urls, _, flag):\n urls = urls.split(',')\n opts = {}\n if len(urls) == 1:\n opts['url_or_urls'] = urls[0]\n else:\n opts['url_or_urls'] = urls\n\n if flag:\n opts[flag] = 'true'\n\n scc.result = getattr(scc.api, method)(**opts)\n\n@Then(\"(the )?([^\\s]+) should be ([^\\s]+)\")\ndef check_value(_, key, value):\n if type(scc.result) == list:\n r = []\n for o in scc.result:\n r.append(str(o.get(key)))\n assert_equal(','.join(r), value)\n\n else:\n assert_equal(str(scc.result.get(key)), value)\n","sub_path":"features/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"108638188","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nimport traceback\nfrom libcnmc.core import MultiprocessBased\n\n\nclass F20(MultiprocessBased):\n def __init__(self, **kwargs):\n super(F20, self).__init__(**kwargs)\n self.year = kwargs.pop('year', datetime.now().year - 1)\n self.codi_r1 = kwargs.pop('codi_r1')\n self.report_name = 'F20 - CTS'\n self.base_object = 'CTS'\n\n def get_sequence(self):\n data_ini = '%s-01-01' % (self.year + 1)\n # data_baixa = '%s-12-31' % self.year\n search_params = [('active', '=', True),\n '|',\n ('create_date', '<', data_ini),\n ('create_date', '=', False)]\n return self.connection.GiscedataCupsPs.search(\n search_params, 0, 0, False, {'active_test': False})\n\n def get_cini(self, et):\n o = self.connection\n valor = ''\n if et:\n cts = o.GiscedataCts.search([('name', '=', et)])\n if cts:\n cini = o.GiscedataCts.read(cts[0], ['cini'])\n valor = cini['cini']\n return valor\n\n def consumer(self):\n o = self.connection\n fields_to_read = [\n 'name', 'et'\n ]\n while True:\n try:\n # generar linies\n item = self.input_q.get()\n self.progress_q.put(item)\n cups = o.GiscedataCupsPs.read(\n item, fields_to_read\n )\n o_codi_r1 = \"R1-\"+self.codi_r1\n o_cups = cups['name']\n o_cini = self.get_cini(cups['et'])\n if not o_cini:\n o_cini = 'False'\n o_codi_ct = cups['et']\n self.output_q.put([\n o_codi_r1,\n o_cups,\n o_cini,\n o_codi_ct\n ])\n except:\n traceback.print_exc()\n if self.raven:\n self.raven.captureException()\n finally:\n self.input_q.task_done()\n","sub_path":"libcnmc/cir_4_2015/F20.py","file_name":"F20.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505977226","text":"\"\"\"\nНаписать тесты(pytest) к предыдущим 3 заданиям, запустив которые, я бы смог бы проверить их корректность\n\"\"\"\n\nimport os\nimport pytest\nimport task1, task2, task3\nfrom task1 import PrintableFile, PrintableFolder\nfrom task2 import Graph\nfrom task3 import CaesarCipher\n\n\ndef test_printable():\n work_path = os.path.abspath(r\"..\\..\\06-advanced-python\\hw_test1\")\n pfolder = PrintableFolder(work_path)\n pfile1 = PrintableFile(\"file1.txt\")\n pfile3 = PrintableFile(\"file3.txt\")\n result = \"V hw_test1\\n\" \\\n \"|-> V hw_test0\\n\" \\\n \"|\\t|-> file3.txt\\n\" \\\n \"|-> file1.txt\\n\" \\\n \"|-> file2.txt\\n\"\n assert str(pfolder) == result\n assert pfile1 in pfolder\n assert pfile3 not in pfolder\n\n\ndef test_graph():\n E = {'A': ['B', 'C', 'D'], 'C': ['F'], 'D': ['A'], 'E': ['F'],\n 'F': ['G'], 'G': [], 'B': ['C', 'E']}\n out = list(\"ABCDEFG\")\n graph = Graph(E)\n for vertex, value in zip(graph, out):\n assert vertex == value\n\n\n@pytest.mark.parametrize('mes, text, expected', [\n ('message', 'abc', 'efg'),\n ('another_message', 'hello', 'olssv'),\n ('out_message', 'xyz', 'abc')\n])\ndef test_cipher(mes, text, expected):\n a = CaesarCipher()\n setattr(a, mes, text)\n assert getattr(a, mes) == expected\n\n\n","sub_path":"06-advanced-python/hw/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"212541476","text":"from Env.CustomEnv.DynamicMaze.DynamicMaze import DynamicMaze\nimport random\nimport matplotlib.pyplot as plt\nimport os\n\nimport numpy as np\nmat = np.genfromtxt('simpleMap.txt')\n\n#\n# import os\n#\n#\nconfig = {}\nconfig['mazeFileName'] = 'simpleMapSmall.txt'\nconfig['numCircObs'] = 2\nconfig['dynamicObsFlag'] = False\nconfig['agentReceptHalfWidth'] = 5\nconfig['obstacleMapPaddingWidth'] = 5\nconfig['targetState'] = (1, 1)\nconfig['dynamicTargetFlag'] = False\n\n\n#\n# #directory = config['mazeFileName'].split('.')[0]\n# #if not os.path.exists(directory):\n# # os.makedirs(directory)\n#\n# print(os.getcwd())\n#\n#\nenv = DynamicMaze(config)\nenv.reset()\nfor i in range(20):\n action = random.randint(0, env.nbActions - 1)\n state, reward, done, info = env.step(action)\n print('step ' + str(i))\n print(state)\n print(reward)\n print(done)\n print(info)\n# #maze.renderMapAndObs(directory +'/' + config['mazeFileName'].split(',')[0] + str(i) + '.png')","sub_path":"Env/Test/DynamicMazeTest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123283224","text":"# -*- coding: utf-8 -*-\nAPI_VERSION = \"0.0.2\"\nSESSION_COOKIE_NAME = \"upush_sessid\"\nDATABASE_TYPE = \"DBRedis\"\nMAX_CONTENT_LENGTH = 512 * 1024\n\nMAIL_SERVER = \"localhost\"\nMAIL_PORT = 25\n\n# Log level values:\n# CRITICAL=50\n# ERROR=40\n# WARNING=30\n# INFO=20\n# DEBUG=10\n#\n# NOTE: check your system syslog level as it might be less\n# verbose than you think\nLOG_LEVEL = 20\nDEBUG = True\nTESTING = False\n","sub_path":"upush/api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"242886423","text":"import json\nimport pickle\n\nwith open(\"db.p\", \"rb\") as picklefile:\n db_dict = pickle.load(picklefile)\n\nwith open(\"db.json\", \"w+\") as jsonfile:\n for key,val in db_dict.items():\n json.dump(val, jsonfile)\n jsonfile.write(\"\\n\")\n\n \n","sub_path":"arxivdb_to_json.py","file_name":"arxivdb_to_json.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35133162","text":"# create matrix from user input\ndef get_matrix(size):\n matrix = []\n for x in range(size[0]):\n row = input().split()\n for y in range(size[1]):\n row[y] = float(row[y])\n matrix.append(row)\n return matrix\n\n\n# multiply matrix by constant\ndef mult_const():\n size = [int(a) for a in input('Enter size of matrix: ').split()]\n print('Enter matrix:')\n matrix = get_matrix(size)\n multiplier = float(input('Enter constant: '))\n prod_matrix = [[matrix[x][y] * multiplier for y in range(size[1])] for x in range(size[0])]\n print('The result is:')\n print_matrix(prod_matrix)\n\n\n# multiply two matrices together\ndef mult_matrices():\n a_size = [int(a) for a in input('Enter size of first matrix: ').split()]\n print('Enter first matrix:')\n a_matrix = get_matrix(a_size)\n b_size = [int(b) for b in input('Enter size of second matrix: ').split()]\n print('Enter second matrix:')\n b_matrix = get_matrix(b_size)\n prod_matrix = [[sum(a*b for a,b in zip(a_row,b_col)) for b_col in zip(*b_matrix)] for a_row in a_matrix]\n print_matrix(prod_matrix)\n\n\n# add two matrices together\ndef add_matrices():\n a_size = [int(a) for a in input('Enter size of first matrix: ').split()]\n print('Enter first matrix:')\n a_matrix = get_matrix(a_size)\n b_size = [int(b) for b in input('Enter size of second matrix: ').split()]\n print('Enter second matrix:')\n b_matrix = get_matrix(b_size)\n if a_size[0] != b_size[0] or a_size[0] != b_size[0]:\n print('ERROR')\n else:\n sum_matrix = [[a_matrix[x][y] + b_matrix[x][y] for y in range(a_size[1])] for x in range(a_size[0])]\n print_matrix(sum_matrix)\n\n\n# transpose a matrix along the main diagonal\ndef main_trans():\n size = [int(a) for a in input('Enter size of matrix: ').split()]\n print('Enter matrix:')\n matrix = get_matrix(size)\n new_matrix = map(list, zip(*matrix))\n print('The result is:')\n print_matrix(new_matrix)\n\n\n# transpose a matrix along the side diagonal\ndef side_trans():\n size = [int(a) for a in input('Enter size of matrix: ').split()]\n print('Enter matrix:')\n matrix = get_matrix(size)\n for row in matrix:\n row = row.reverse()\n matrix = matrix[::-1]\n new_matrix = map(list, zip(*matrix))\n print('The result is:')\n print_matrix(new_matrix)\n\n\n# transpose a matrix along the vertical axis\ndef vert_trans():\n size = [int(a) for a in input('Enter size of matrix: ').split()]\n print('Enter matrix:')\n matrix = get_matrix(size)\n for row in matrix:\n row = row.reverse()\n print('The result is:')\n print_matrix(matrix)\n\n\n# transpose a matrix along the horizontal axis\ndef hori_trans():\n size = [int(a) for a in input('Enter size of matrix: ').split()]\n print('Enter matrix:')\n matrix = get_matrix(size)\n matrix = matrix[::-1]\n print('The result is:')\n print_matrix(matrix)\n\n\n# print a matrix\ndef print_matrix(matrix):\n for row in matrix:\n print(*row)\n\n\n# main code\nwhile True:\n print(r'''1. Add matrices\n 2. Multiply matrix by a constant\n 3. Multiply matrices\n 4. Transpose matrix\n 0. Exit''')\n choice = int(input('Your choice: '))\n if choice == 0:\n exit()\n elif choice == 1:\n add_matrices()\n elif choice == 2:\n mult_const()\n elif choice == 3:\n mult_matrices()\n elif choice == 4:\n print(r'''1. Main diagonal\n 2. Side diagonal\n 3. Vertical line\n 4. Horizontal line''')\n tr_choice = int(input('Your choice: '))\n if tr_choice == 1:\n main_trans()\n elif tr_choice == 2:\n side_trans()\n elif tr_choice == 3:\n vert_trans()\n elif tr_choice == 4:\n hori_trans()\n","sub_path":"(med) numeric matrix processor/04_transpose.py","file_name":"04_transpose.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646305354","text":"import os\r\nimport sys\r\nimport re\r\nimport requests\r\nimport pandas as pd\r\nimport json\r\nimport psycopg2\r\nfrom datetime import datetime\r\n\r\n\r\ndef main():\r\n\r\n url = 'https://www.52cp.cn/pk10/trend/index/from/1/type/0/period/30/date/all.html'\r\n page = requests.get(url).content\r\n raw_html = str(page)\r\n\r\n try:\r\n table = re.search('var output = (.+?);', raw_html).group(1)\r\n table = table.replace('\\\\', '').strip('\\'')\r\n data = json.loads(table)\r\n except AttributeError:\r\n table = ''\r\n\r\n for i in data['list']:\r\n issue_val = str(i['expect'])\r\n \r\n open_time = i['opentime']\r\n Time_division = datetime.fromtimestamp(open_time)\r\n \r\n all_num = i['all_num']\r\n Lottery_number = ','.join(map(str, all_num)) \r\n \r\n\r\n con = None\r\n try:\r\n con = psycopg2.connect(\"host='localhost' dbname='beijing_racing' user='developer' password='developer'\") \r\n cur = con.cursor()\r\n cur.execute(\"CREATE TABLE IF NOT EXISTS trend_chart(Id serial PRIMARY KEY, Issue VARCHAR(20), Time_division DATE NOT NULL DEFAULT CURRENT_DATE, Lottery_number VARCHAR(20))\")\r\n cur = con.cursor()\r\n cur.execute(\"SELECT Issue FROM public.trend_chart where Issue = %s;\",[issue_val])\r\n records = cur.fetchall() \r\n if len(records) != 0:\r\n continue\r\n else:\r\n insert_db(cur,con,issue_val,Time_division,Lottery_number)\r\n \r\n except psycopg2.DatabaseError as e:\r\n if con:\r\n con.rollback()\r\n print('Error %s' %e)\r\n sys.exit(1)\r\n finally:\r\n if con:\r\n con.close()\r\n if cur:\r\n cur.close()\r\n \r\n \r\ndef insert_db(cur,con,issue_val,Time_division,Lottery_number):\r\n postgres_insert_query = \"\"\" INSERT INTO trend_chart(Issue,Time_division,Lottery_number) VALUES (%s,%s,%s)\"\"\"\r\n record_to_insert = (issue_val,Time_division,Lottery_number)\r\n cur.execute(postgres_insert_query, record_to_insert)\r\n con.commit()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340332704","text":"#!/usr/bin/env python3\nimport os\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.log\n\nimport markdown2\nimport queries\n\nfrom jinja2 import \\\n Environment, PackageLoader, select_autoescape\n\nENV = Environment(\n loader=PackageLoader('blog', 'templates'),\n autoescape=select_autoescape(['html', 'xml'])\n)\n\nclass TemplateHandler(tornado.web.RequestHandler):\n def initialize(self):\n self.session = queries.Session(\n 'postgresql://postgres@localhost:5432/blog')\n \n def render_template (self, tpl, context):\n template = ENV.get_template(tpl)\n self.write(template.render(**context))\n \nclass MainHandler(TemplateHandler):\n def get (self):\n posts = self.session.query('SELECT * FROM post')\n self.render_template(\"home.html\", {'posts': posts})\n \nclass BlogPostHandler(TemplateHandler):\n def get (self, slug):\n posts = self.session.query(\n 'SELECT * FROM post WHERE slug = %(slug)s',\n {'slug': slug}\n )\n \n html = markdown2.markdown(posts[0]['body'])\n context = {\n 'post': posts[0],\n 'html': html\n }\n self.render_template(\"post.html\", context)\n \nclass CommentHandler(TemplateHandler):\n def get (self, slug):\n posts = self.session.query(\n 'SELECT * FROM post WHERE slug = %(slug)s',\n {'slug': slug}\n )\n self.render_template(\"comment.html\", {'post': posts[0]})\n \n def post (self, slug):\n comment = self.get_body_argument('comment')\n posts = self.session.query(\n 'SELECT * FROM post WHERE slug = %(slug)s',\n {'slug': slug}\n )\n # Save Comment Here\n self.redirect('/post/' + slug)\n \ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/post/(.*)/comment\", CommentHandler),\n (r\"/post/(.*)\", BlogPostHandler),\n (r\"/static/(.*)\", \n tornado.web.StaticFileHandler, {'path': 'static'}),\n ], autoreload=True)\n \nif __name__ == \"__main__\":\n tornado.log.enable_pretty_logging()\n app = make_app()\n app.listen(int(os.environ.get('PORT', '8080')))\n tornado.ioloop.IOLoop.current().start()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148073067","text":"\"\"\"\nVarious functions for molecular geometry transformations\n\"\"\"\nimport math\nimport numpy as np\nfrom itertools import combinations\n\ndef unit_vector(coords1, coords2):\n \"\"\"\n Calculate the unit vector between two cartesian coordinates\n \"\"\"\n distance = np.linalg.norm(coords2 - coords1)\n unit_vec = [0.0 for p in range(3)]\n for p in range(3):\n unit_vec[p] = (coords2[p] - coords1[p]) / distance \n return unit_vec\n\ndef unit_cross_product(uvec1, uvec2):\n \"\"\"\n Returns unit cross product between two unit vectors\n Ensures the result is itself a unit vector\n \"\"\"\n cos = np.dot(uvec1, uvec2)\n sin = math.sqrt(1 - cos**2)\n # if the number of atoms is > 3 and there are 3 colinear atoms this will fail\n csc = sin**-1\n return np.cross(uvec1, uvec2) * csc\n\n\ndef get_local_axes(coords1, coords2, coords3):\n u12 = unit_vector(coords1, coords2)\n u23 = unit_vector(coords2, coords3)\n if (abs(np.dot(u12, u23)) >= 1.0):\n print('\\nError: Co-linear atoms in an internal coordinate definition')\n u23_x_u12 = unit_cross_product(u23, u12)\n u12_x_u23_x_u12 = unit_cross_product(u12, u23_x_u12)\n z = u12\n y = u12_x_u23_x_u12\n x = unit_cross_product(y, z)\n local_axes = np.array([x, y, z])\n return local_axes\n\n# calculate vector of bond in local axes of internal coordinates\ndef get_bond_vector(r, a, d):\n x = r * math.sin(a) * math.sin(d)\n y = r * math.sin(a) * math.cos(d)\n z = r * math.cos(a)\n bond_vector = np.array([x, y, z])\n return bond_vector\n\ndef get_interatom_distances(cart):\n n = len(cart)\n matrix = np.zeros((n,n))\n for i,j in combinations(range(len(cart)),2):\n R = np.linalg.norm(cart[i]-cart[j])\n #create lower triangle matrix\n matrix[j,i] = R\n return matrix\n","sub_path":"MLChem/geometry_transform_helper.py","file_name":"geometry_transform_helper.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"535207730","text":"#!/usr/bin/env python3\n################################################################\n## Pygame template, following Harris' IDEA/ALTER approach. ##\n## ## \n## Author : Nick Efford ##\n## Updated : 2010-11-12 ##\n################################################################\n\n# I: Importing & initialising\n\nimport pygame\n\npygame.init()\n\n# D: Display configuration\n\nsize = (640, 480)\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption('Pygame Example :o')\n\n# E: Entity creation\n\nbackground = pygame.Surface(size).convert()\nbackground.fill((128, 0, 0))\ni = 0\n\n# A: Action, broken down as ALTER steps...\n\n# A: Assign values to key variables\nclock = pygame.time.Clock()\nrunning = True\n\n# L: Loop\nwhile running:\n # T: Timing, to control frame rate\n clock.tick(30)\n\n i -= 1\n\n # E: Event handling\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # R: Refresh display\n screen.blit(background, (200+i, 0))\n #\n # Code to redraw any moving/changing objects would go here\n #\n pygame.display.flip()\n","sub_path":"year1/python/week9/pgweek9/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18998840","text":"# -*- coding: utf-8 -*-\nimport json, datetime\nfrom django.http import HttpResponse\nfrom wxapp.models import Product\nfrom wxapp.constants import MEDIA_URL\nfrom api.decorator import signature\n\n\n@signature\ndef getProductList(request):\n result_dict = {'status': 1, 'msg': []}\n\n kwargs = {}\n\n kwargs.setdefault('enable_flag', '0')\n kwargs.setdefault('begin_date__lte', datetime.datetime.now())\n kwargs.setdefault('end_date__gte', datetime.datetime.now())\n\n products = Product.objects.filter(**kwargs)\n msg = []\n\n if products:\n for item in products:\n vardict = {}\n vardict['product_id'] = str(item.id)\n vardict['product_code'] = str(item.product_code)\n vardict['product_name'] = str(item.product_name)\n vardict['price'] = str(item.price)\n vardict['stock'] = str(item.stock)\n vardict['type_flag'] = str(item.type_flag)\n vardict['product_weight'] = str(item.product_weight)\n vardict['begin_date'] = str(item.begin_date.strftime(\"%Y-%m-%d\"))\n vardict['end_date'] = str(item.end_date.strftime(\"%Y-%m-%d\"))\n vardict['product_image'] = MEDIA_URL + str(item.product_image)\n msg.append(vardict)\n\n result_dict['status'] = 0\n result_dict['msg'] = msg\n\n return HttpResponse(json.dumps(result_dict), content_type=\"application/json\")\n\n\n@signature\ndef getProductInfo(request):\n result_dict = {'status': 1, 'msg': []}\n\n product_id = request.GET.get('product_id', '')\n\n product = Product.objects.get(pk=product_id, enable_flag='0')\n msg = {}\n\n if product:\n msg['id'] = str(product.id)\n msg['product_code'] = str(product.product_code)\n msg['product_name'] = str(product.product_name)\n msg['price'] = str(product.price)\n msg['stock'] = str(product.stock)\n msg['type_flag'] = str(product.type_flag)\n msg['product_weight'] = str(product.product_weight)\n msg['begin_date'] = str(product.begin_date.strftime(\"%Y-%m-%d\"))\n msg['end_date'] = str(product.end_date.strftime(\"%Y-%m-%d\"))\n msg['product_image'] = MEDIA_URL + str(product.product_image)\n\n result_dict['status'] = 0\n result_dict['msg'] = msg\n\n return HttpResponse(json.dumps(result_dict), content_type=\"application/json\")\n","sub_path":"apps/api/views/wxapp/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"485247089","text":"#-*- encoding: utf-8 -*-\n'''\nnew_seed.py.py\nCreated on 2017/10/19 11:21\nCopyright (c) 2017/10/19, 海牛学院版权所有.\n@author: 青牛\n'''\nimport sys\nsys.path.append('/home/hadoop/hainiu_crawler')\nfrom commons.util.log_util import LogUtil\nfrom commons.util.db_util import DBUtil\nfrom commons.util.html_util import HtmlUtil\nfrom configs import config\nfrom tld import get_fld\nfrom commons.util.util import Util\nimport sys\n\ndef create_seed():\n url = \"https://www.autohome.com.cn/all\"\n catetory = \"汽车\"\n sql = \"\"\"\n insert into hainiu_web_seed (url,md5,domain,host,category,status) values\n (%s,%s,%s,%s,%s,%s);\n \"\"\"\n hu = HtmlUtil()\n domain = get_fld(url)\n host = hu.get_url_host(url)\n u = Util()\n md5 = u.get_md5(url)\n\n rl = LogUtil().get_base_logger()\n try:\n d = DBUtil(config._HAINIU_DB)\n sql_param = [url,md5,domain,host,catetory,0]\n d.execute(sql,sql_param)\n except:\n rl.exception()\n d.rollback()\n finally:\n d.close()\n\n\nif __name__ == '__main__':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n create_seed()","sub_path":"download_page/util/new_seed.py","file_name":"new_seed.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"196910076","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport requests\nimport time\nimport urllib.request\nimport re\n\n\npath = 'H:/huabanwang/imgs/'\n\npin_ids = []\ngaoqing_links = []#图片的详情页列表\npicture_links = []#图片的下载链接列表\nhuaban_urls = ['https://huaban.com/favorite/beauty/']#每一大页的链接\n'''\n#获取每个图片详情页链接\ndef get_picture_info(huaban_url='https://huaban.com/favorite/beauty/'):\n wb_data = requests.get(huaban_url)\n soup = BeautifulSoup(wb_data.text,'lxml')\n #每张图片有对应的pin_id,从源码里看到这些图片并不是一种匹配就能全部包含,因此用两个正则来匹配,最后列表相加\n pin_id_one = re.findall('\"extra\":null}}, \\{\"pin_id\":(.*?), \"user_id\":',soup.decode('utf-8'),re.S)\n pin_id_two = re.findall('\\\"pin_id\\\":\\\"(.*?)\\\"',soup.decode('utf-8'),re.S)\n pin_id = pin_id_one + pin_id_two\n # pin_id = list(set(pin_id)) #去掉重复的图片\n pin_ids.append(pin_id)\n for i in pin_id:\n gaoqing_url = 'http://huaban.com/pins/{}/'.format(i) #图片详情链接\n global gaoqing_links\n gaoqing_links.append(gaoqing_url)\n gaoqing_links = list(set(gaoqing_links))\n print(gaoqing_links)\n return pin_ids\n return gaoqing_links\n\n#从图片详情页中找到图片下载的链接\ndef get_picture_links():\n global i\n i += 1\n for gaoqing_link in gaoqing_links:\n wb_data = requests.get(gaoqing_link)\n soup = BeautifulSoup(wb_data.text,'lxml')\n picture_key = re.findall('\"hbimg\", \"key\":\"(.*?)\", \"type\"',soup.decode('utf-8'),re.S)[0]\n #高清大图下载链接\n picture_url = 'http://img.hb.aicdn.com/' + picture_key\n print(picture_url)\n picture_links.append(picture_url)\n print('第{}页链接完成'.format(i))\n return picture_links\n\n#下载图片\ndef download_picture():\n x = 0\n for picture_link in picture_links:\n xiazaitupian = urllib.request.urlretrieve(picture_link,path + '%s.jpg' % str(x))\n x += 1\n print('download {}'.format(x))\n\n\n\n# #异步加载页面,获取更多页面\n# def get_more_page_picture():\n# get_picture_info()\n# if True:\n# for a in pin_ids[0]:\n# print(a)\n# huaban_url = 'http://huaban.com/favorite/beauty/?ixkhwcmb&max={}&limit=20&wfl=1'.format(a)\n# get_picture_info(huaban_url)\n# get_picture_links()\n# download_picture()\n\nif __name__ == '__main__':\n i = 0\n x = 0\n get_picture_info()\n get_picture_links()\n download_picture()\n # get_more_page_picture()\n'''\n\n#下面这种方法是selenium + phantomjs + beautifulsoup,速度可能较慢\ndef get_picture_info(huaban_url='https://huaban.com/favorite/beauty/'):\n driver = webdriver.PhantomJS()\n driver.get(huaban_url)\n wb_data = driver.page_source\n soup = BeautifulSoup(wb_data,'lxml')\n pictures = soup.select('#waterfall > div > a > img')\n pin_ids = soup.select('#waterfall > div > a')\n\n for picture in pictures:\n picture = 'http:' + picture.get('src')[:-6]\n picture_links.append(picture)\n f = open('H:/huabanwang/links/img_urls.txt','a')\n f.write(picture + '\\n')\n f.close()\n print(picture)\n print('='*32,'第' + str(len(huaban_urls)) + '页加载完毕','='*32)\n\n\n if len(huaban_urls) < a:\n last_pin_id = pin_ids[-1].get('href').split('/')[2] # 每一页的最后一张图片的pin_id,刚好是下一页url里的\n huaban_url = 'https://huaban.com/favorite/beauty/?izhxd6qg&max=' + last_pin_id + '&limit=20&wfl=1'\n huaban_urls.append(huaban_url)\n f = open(r'H:/huabanwang/links/page_urls.txt','a')\n f.write(huaban_url + '\\n')\n f.close()\n get_picture_info(huaban_url)\n else:\n print('%d页加载完成' % a,'准备开始下载,请稍后...')\n\ndef downlosd_picture():\n i = 1\n f = open('H:/huabanwang/links/img_urls.txt', 'r')\n lines = f.readlines()\n for line in lines:\n download = urllib.request.urlretrieve(line, path + '%d.jpg' % i)\n print('download...%d' % i)\n i += 1\n print('%d页全部下载完毕,总共'%a + str(len(lines)) + '张图片')\n\n\nif __name__ == '__main__':\n print('由于使用的是selenium + phantomjs + beautifulsoup,脚本前期解析速度可能较慢')\n a = int(input('输入要下载的页数,按回车确定:'))\n get_picture_info()\n downlosd_picture()\n\n","sub_path":"huoqu_tupian.py","file_name":"huoqu_tupian.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53542629","text":"import os, sys, json, yaml, subprocess, logging\nimport socket\nimport requests\n\nfrom dku_utils.access import _safe_get_value\n\nGCLOUD_INFO = None\nMETADATA_SERVER_BASE_URL=\"http://metadata/computeMetadata/v1/\"\n\ndef _get_gcloud_info():\n global GCLOUD_INFO\n if GCLOUD_INFO is None:\n logging.info(\"Retrieving gcloud info\")\n try:\n gcloud_info_str = subprocess.check_output([\"gcloud\", \"info\", \"--format\", \"json\"])\n GCLOUD_INFO = json.loads(gcloud_info_str)\n except:\n raise ValueError(\"gcloud CLI not found, check if Google Cloud SDK is properly installed and configured.\")\n return GCLOUD_INFO\n\n\ndef get_sdk_root():\n sdk_root = _safe_get_value(_get_gcloud_info(), [\"installation\", \"sdk_root\"], None)\n return sdk_root\n\n\ndef get_access_token_and_expiry(config={}):\n logging.info(\"Retrieving gcloud access token and expiry\")\n cmd_path = config.get(\"cmd-path\", os.path.join(get_sdk_root(), \"bin\", \"gcloud\"))\n cmd_args = config.get(\"cmd-args\", \"config config-helper --format=json\")\n info_str = subprocess.check_output(\"%s %s\" % (cmd_path, cmd_args), shell=True)\n info = json.loads(info_str)\n token_key_chunks = config.get(\"token-key\", \"{.credential.access_token}\")[2:-1].split('.')\n expiry_key_chunks = config.get(\"expiry-key\", \"{.credential.token_expiry}\")[2:-1].split('.')\n return _safe_get_value(info, token_key_chunks), _safe_get_value(info, expiry_key_chunks)\n\n\ndef get_account():\n account = _safe_get_value(_get_gcloud_info(), [\"config\", \"account\"], None)\n return account\n\n\ndef _run_cmd(cmd=None):\n \"\"\"\n Run command via subprocess. Clean retrieval of error message if fails. Trims any trailing space.\n \"\"\"\n\n logging.info(\"Running CMD {}\".format(cmd))\n try:\n out = subprocess.check_output(cmd).rstrip()\n except subprocess.CalledProcessError as e:\n print(e.output)\n return out\n\n\ndef get_instance_info():\n \"\"\"\n Retrieve the instance name, project, region and zone by calling the local\n metadata server.\n \"\"\"\n\n metadata_flavor = {\"Metadata-Flavor\": \"Google\"}\n instance_info = {}\n instance_info[\"project\"] = requests.get(\"/\".join([METADATA_SERVER_BASE_URL,\n \"project\",\n \"project-id\"]),\n headers=metadata_flavor).text\n zone_full = requests.get(\"/\".join([METADATA_SERVER_BASE_URL,\n \"instance\",\n \"zone\"]),\n headers=metadata_flavor).text\n zone = zone_full.split(\"/\")[-1] \n instance_info[\"zone\"] = zone\n instance_info[\"region\"] = zone.split(\"-\")[:-1]\n instance_info[\"vm_name\"] = requests.get(\"/\".join([METADATA_SERVER_BASE_URL,\n \"instance\",\n \"name\"]),\n headers=metadata_flavor).text\n return instance_info\n\n\ndef get_instance_network():\n \"\"\"\n Retrieve the network and subnetwork of the DSS host.\n \"\"\"\n\n instance_info = get_instance_info()\n cmd = [\"gcloud\", \"compute\", \"instances\", \"describe\"]\n cmd += [\n instance_info[\"vm_name\"],\n \"--project\",\n instance_info[\"project\"],\n \"--zone\",\n instance_info[\"zone\"],\n \"--format=json\"\n ] \n instance_full_info = json.loads(_run_cmd(cmd))\n network_interfaces = instance_full_info[\"networkInterfaces\"]\n default_nic = network_interfaces[0]\n if len(network_interfaces) > 1:\n logging.info(\"WARNING! Multiple NICs detected, will use {}\".format(default_nic))\n network = default_nic[\"network\"]\n subnetwork = default_nic[\"subnetwork\"]\n return network, subnetwork\n\n\ndef get_instance_service_account():\n \"\"\"\n Retrieve the active service account of the DSS host\n \"\"\"\n\n logging.info(\"Retrieving gcloud auth info\")\n cmd = [\"gcloud\", \"auth\", \"list\", \"--format=json\"]\n instance_auth_info = json.loads(_run_cmd(cmd))\n for identity in instance_auth_info:\n if identity[\"status\"] == \"ACTIVE\":\n instance_active_sa = identity[\"account\"]\n logging.info(\"Active service account on DSS host is {}\".format(instance_active_sa))\n return instance_active_sa\n","sub_path":"python-lib/dku_google/gcloud.py","file_name":"gcloud.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"543144512","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/Toolserver/ClientMachinery.py\n# Compiled at: 2010-03-01 05:50:02\n\"\"\"\nToolserver Framework for Python - client machinery for remote tools\n\nCopyright (c) 2002, Georg Bauer <gb@rfc1437.de>, except where the file\nexplicitly names other copyright holders and licenses.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of \nthis software and associated documentation files (the \"Software\"), to deal in \nthe Software without restriction, including without limitation the rights to \nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n \nthe Software, and to permit persons to whom the Software is furnished to do so, \nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all \ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n \nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR \nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER \nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN \nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re, urllib, httplib\ntry:\n from Crypto.Hash import SHA256\n from Crypto.Util.randpool import RandomPool\n hasCrypto = 1\nexcept ImportError:\n hasCrypto = 0\n\nfrom base64 import decodestring, encodestring\nfrom zlib import decompress\ntry:\n from cPickle import load, dumps, loads\nexcept:\n from pickle import load, dumps, loads\n\nfrom Toolserver.ClientRegistry import protocols\ndocumentEncoding = 'iso-8859-1'\nurlre = re.compile('^(https?)://([a-zA-Z0-9\\\\-\\\\.]*)(:[0-9]*)?(/.*)$')\n\nclass Method:\n\n def __init__(self, name, client):\n self._name = name\n self._client = client\n\n def __str__(self):\n return '<Toolserver.Client.Method %s on %s>' % (self._name, self._client._url)\n\n def __getattr__(self, name):\n return Method(self._name + '.' + name, self._client)\n\n def _rsaheaders(self, r, data):\n if hasCrypto and self._client._srv.privkey and self._client._srv.localname:\n crc = SHA256.new()\n crc.update(data)\n hash = crc.hexdigest()\n signature = self._client._srv.privkey.sign(hash, '')\n signature = str(signature[0])\n r.putheader('X-TooFPy-Hash', hash)\n r.putheader('X-TooFPy-Signature', signature)\n r.putheader('X-TooFPy-Signer', self._client._srv.localname)\n\n def __call__(self, *args, **kw):\n data = self._client.build_request(self._name, args, kw)\n obj = None\n if type(data) == type(()):\n obj = data[1]\n data = data[0]\n r = self._client.connect()\n self._client.output_header_hook(r, data, obj)\n self._rsaheaders(r, data)\n if not self._client._already_compressed:\n r.putheader('Accept-encoding', 'deflate')\n r.putheader('Content-length', str(len(data)))\n r.endheaders()\n r.send(data)\n (code, msg, headers) = r.getreply()\n if code == 401:\n sendheaders = []\n wwwauth = headers.getheader('WWW-Authenticate').split()\n if wwwauth[0] == 'Basic':\n if hasattr(self._client._srv, 'basic'):\n sendheaders.append(('Authorization', 'Basic %s' % encodestring('%s:%s' % self._client._srv.basic)))\n else:\n raise ValueError('No credentials for basic auth available')\n else:\n raise ValueError('Auth method %s not supported' % wwwauth[0])\n r = self._client.connect()\n self._client.output_header_hook(r, data, obj)\n self._rsaheaders(r, data)\n for (header, value) in sendheaders:\n r.putheader(header, value)\n\n r.putheader('Content-length', str(len(data)))\n r.endheaders()\n r.send(data)\n (code, msg, headers) = r.getreply()\n if code < 200 or code > 299:\n raise ValueError(msg)\n data = r.getfile().read()\n content_encoding = headers.getheader('Content-encoding')\n if content_encoding == 'deflate':\n data = decompress(data)\n obj = self._client.input_header_hook(headers, data)\n if hasCrypto and self._client._srv.pubkey:\n hash = headers.getheader('X-TooFPy-Hash')\n signature = headers.getheader('X-TooFPy-Signature')\n if not hash or not signature:\n raise ValueError('either hash or signature not provided (or both)')\n signature = (\n long(signature),)\n if not self._client._srv.pubkey.verify(hash, signature):\n raise ValueError(\"signature can't be verified\")\n crc = SHA256.new()\n crc.update(data)\n if crc.hexdigest() != hash:\n raise ValueError(\"hash can't be verified\")\n res = self._client.parse_response(data, obj)\n if self._client._srv.simplify:\n res = self._client.simplify_value(res)\n encryption = headers.getheader('X-PickleRPC-Encryption')\n if self._client.is_exception(res):\n raise res\n else:\n return res\n return\n\n\nclass AbstractClient:\n _prefix = 'AbstractRPC'\n _name = 'Abstract RPC Client'\n _mimetype = 'text/xml'\n _already_compressed = 0\n\n def __init__(self, srv, url, *args):\n self._srv = srv\n m = urlre.match(url)\n assert m, 'Wrong format for URL - only http/https allowed'\n self._proto = m.group(1)\n assert self._proto in ('http', 'https'), 'Only http and https allowed'\n self._host = m.group(2)\n if self._proto == 'http':\n self._port = 80\n elif self._proto == 'https':\n self._port = 443\n if m.group(3):\n self._port = int(m.group(3)[1:])\n self._path = m.group(4)\n self._proxyhost = getattr(self._srv, 'proxyhost', None)\n self._proxyport = getattr(self._srv, 'proxyport', None)\n return\n\n def __str__(self):\n return '<%s Client>' % self._prefix\n\n def connect(self):\n path = self._path\n if self._proxyhost:\n r = httplib.HTTP(self._proxyhost)\n path = self._url\n elif self._proto == 'https':\n r = httplib.HTTPS(self._host, self._port)\n elif self._proto == 'http':\n r = httplib.HTTP(self._host, self._port)\n r.putrequest('POST', path)\n r.putheader('Host', self._host)\n r.putheader('User-agent', 'TooFPy %s Client' % self._name)\n r.putheader('Content-type', self._mimetype)\n return r\n\n def __getattr__(self, name):\n return Method(name, self)\n\n def simplify_value(self, value):\n return value\n\n def is_exception(self, value):\n return isinstance(value, Exception)\n\n def build_request(self, method, args, kw):\n raise NotImplemented()\n\n def parse_response(self, data, obj):\n raise NotImplemented()\n\n def output_header_hook(self, request, data, obj):\n pass\n\n def input_header_hook(self, headers, data):\n return\n\n\nclass RemoteToolserver:\n \"\"\"\n This class factors out the connection handling for remote tools\n and the used protocol. Just instantiate a RemoteToolserver instance\n and fetch tool proxies with the getTool method. Typical keywords\n are proxyhost and proxyport to specify the proxy or secret to specify\n a shared secret for protocols relying on this. Another one is simplify,\n which can be used to simplify results of calls (especially usefull\n for SOAP). It's default is off. If you want to use RSA authentication\n you can specify privkey and pubkey to point to files where your private\n key and the public key of the server reside. You need to specify the\n localname option, too. If you want to use basic authentication, set\n the option basic to a tuple (user, password).\n \"\"\"\n\n def __init__(self, baseurl, protocol, **kw):\n assert protocols.has_key(protocol), 'unknown protocol ' + protocol\n if hasCrypto:\n self._randpool = RandomPool(500)\n self._baseurl = baseurl\n while self._baseurl and self._baseurl.endswith('/'):\n self._baseurl = self._baseurl[:-1]\n\n self._protocol = protocol\n if kw.has_key('pubkey'):\n kw['pubkey'] = load(open(kw['pubkey']))\n else:\n kw['pubkey'] = None\n if kw.has_key('privkey'):\n kw['privkey'] = load(open(kw['privkey']))\n else:\n kw['privkey'] = None\n self.simplify = 0\n self.localname = ''\n for k in kw.keys():\n setattr(self, k, kw[k])\n\n return\n\n def getTool(self, path, protocol=''):\n \"\"\"\n This builds a proxy for a remote tool. You can override the\n protocol name if the server uses different access points than\n you use.\n \"\"\"\n if not protocol:\n protocol = self._protocol\n if type(path) in (type(''), type('')):\n path = path.split('.')\n assert type(path) in (type(()), type([])), 'path must be either string or sequence'\n self.url = '%s/%s/%s' % (\n self._baseurl, protocol,\n ('/').join(path))\n r = protocols[protocol]\n args = []\n for i in range(1, len(r)):\n args.append(getattr(self, r[i]))\n\n return r[0](self, *args)","sub_path":"pycfiles/Toolserver-0.4.1-py2.6/ClientMachinery.py","file_name":"ClientMachinery.py","file_ext":"py","file_size_in_byte":9876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334371762","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='TalkInUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('username', models.CharField(max_length=40, unique=True)),\n ('first_name', models.CharField(max_length=20, blank=True)),\n ('second_name', models.CharField(max_length=20, blank=True)),\n ('email', models.EmailField(max_length=254, unique=True)),\n ('is_staff', models.BooleanField(default=False)),\n ('is_active', models.BooleanField(default=True)),\n ('about', models.CharField(max_length=1500, default='')),\n ('date_joined', models.DateTimeField(default=datetime.datetime(2015, 5, 11, 2, 13, 11, 248214, tzinfo=utc))),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n","sub_path":"login_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343697461","text":"from django.shortcuts import render, redirect\nimport urllib.request\nimport random\n\n\n\n# Create your views here.\ndef index(request):\n\tif 'guess_count' in request.session:\n\t\trequest.session['guess_count']+=1\n\telse:\n\t\trequest.session['guess_count']=0\n\tword_url = \"http://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain\"\n\tresponse = urllib.request.urlopen(word_url)\n\tlong_txt = response.read().decode()\n\twords = long_txt.splitlines()\n\tupper_words = [word for word in words if word[0].isupper()]\n\tname_words = [word for word in upper_words if not word.isupper()]\n\trequest.session['random_word'] = ' '.join([name_words[random.randint(0, len(name_words))] for i in range(1)])\n\treturn render(request, \"random_word_generator/index.html\")\n\ndef newCount(request):\n\tdel request.session['guess_count']\n\treturn redirect('/random_word')\n","sub_path":"python_stack/django/django_intro/project_one_django/apps/random_word_generator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315623552","text":"import lib_agent\nimport sys\nimport time\n\n\ndef main():\n ip = sys.argv[1]\n port = sys.argv[2]\n platform = lib_agent.PlatformWrapper(ip, port)\n while True:\n functions = platform.get_all_functions()\n print(functions)\n time.sleep(10)\n\nif __name__ == '__main__':\n main()","sub_path":"examples/registeredFunctions.py","file_name":"registeredFunctions.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341334038","text":"#!/usr/bin/python3\n'''\nA remote service to display a worm moving randomly in an Adafruit \n8x8 LED matrix connected to a Raspberry Pi computer.\n'''\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Copyright (C) 2013 Ron Niessen\n#\n# This program is released under the MIT License, which is available\n# in file COPYING.MIT and at: http://opensource.org/licenses/MIT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\nimport sys, os, getopt\nimport time\nimport datetime\nimport random\nimport queue\nimport traceback\n#from Adafruit_LED_Backpack import Matrix8x8\nimport Matrix8x8\n#sys.path.append('../peer') # msgkit access relative to command-line\nfrom msgkit import *\n\n\n\n# define the four directions\nNorth = 'n'\nSouth = 's'\nEast = 'e'\nWest = 'w'\n\n\nclass Worm8x8():\n ''' This class encapsulates all methods necessary to control \n the worm on the 8x8 LED display.\n '''\n\n def __init__(self, delay, lenworm):\n self.grid = Matrix8x8.Matrix8x8() # Matrix8x8(address=0x70)\n self.speed = float(delay)\n self.newspeed = self.speed\n if int(lenworm) > 8:\n lw = 8 # max length to start\n else:\n lw = int(lenworm)\n # define an initial worm\n worm1 = [(0,0), (1,0), (2,0), (3,0), (4,0), (5,0), (6,0), (7,0)]\n self.worm = []\n for i in range(lw):\n self.worm.append(worm1[i])\n # controls for changing worm length\n self.bLonger = False # when true, lengthen the worm\n self.bShorter = False # when true, shorten the worm, if possible\n self.bNewSpeed = False # when true, set speed from newspeed\n self.bRun = True # when false, the app will exit\n\n\n def Run(self):\n ''' This is the mainline code for this remote service.\n '''\n # init display\n for pix in self.worm:\n self.grid.set_pixel(pix[0], pix[1], 1)\n self.grid.write_display()\n\n time.sleep(self.speed)\n while(self.bRun == True):\n if self.bNewSpeed == True and self.newspeed >= 0.01:\n self.speed = self.newspeed\n self.bNewSpeed = False\n nxt = self.RandForward() # move the worm forward, or if stuck then reverse\n self.grid.set_pixel(nxt[0], nxt[1], 1)\n self.worm.append(nxt) # append an item at the head end\n if self.bLonger == False:\n self.worm.reverse()\n gone = self.worm.pop() # pop off the item item at the tail end\n self.grid.set_pixel(gone[0], gone[1], 0)\n if self.bShorter == True:\n if len(self.worm) > 1:\n gone = self.worm.pop()\n self.grid.set_pixel(gone[0], gone[1], 0)\n self.bShorter = False\n self.worm.reverse()\n else:\n self.bLonger = False\n self.grid.write_display()\n time.sleep(self.speed)\n \n time.sleep(1)\n self.grid.clear()\n\n\n def RandForward(self):\n ''' This routine has two parts: (a) referring to the head of the\n worm, it builds a list of possible NSEW directions the worm could \n move without exiting the 8x8 matrix or crossing over/under itself \n (b) given the list of possible head move directions, one move \n direction is randomly chosen from the list and its corresponding \n coordinates returned.\n '''\n stuck = True\n while(stuck):\n ref = self.worm[-1] # extract the location of the head of the worm\n choices = []\n # build a list of possible directions to go forward\n goN = (ref[0], ref[1]-1)\n if goN not in self.worm[0:-1] and goN[1] >= 0:\n choices.append(North)\n goE = (ref[0]+1, ref[1])\n if goE not in self.worm[0:-1] and goE[0] < 8:\n choices.append(East)\n goS = (ref[0], ref[1]+1)\n if goS not in self.worm[0:-1] and goS[1] < 8:\n choices.append(South)\n goW = (ref[0]-1, ref[1])\n if goW not in self.worm[0:-1] and goW[0] >= 0:\n choices.append(West)\n # print 'choices at %s: %s' % (ref, choices)\n\n if len(choices) == 0:\n print('worm is stuck at (%d,%d), reversing' % (ref[0], ref[1]))\n self.worm.reverse()\n else:\n stuck = False\n # end of while\n\n go = random.choice(choices)\n if go == 'n':\n return goN\n elif go == 'e':\n return goE\n elif go == 's':\n return goS\n else:\n return goW\n\n\n def SetDelay(self, delay):\n ''' Set a new delay between worm moves.\n '''\n print('setting new delay to %s seconds' % delay)\n self.newspeed = delay\n self.bNewSpeed = True\n\n\n def ChgWormLength(self, chg):\n ''' Handle the worm length change command.\n '''\n if chg == 'inc':\n self.__LengthenWorm()\n else:\n if len(self.worm) > 1:\n self.__ShortenWorm()\n\n\n def __LengthenWorm(self):\n ''' Tell the rem app to lengthen the worm by one pixel (ie. LED).\n '''\n l = len(self.worm)\n print('increasing worm length from %d to %d' % (l, l+1))\n self.bLonger = True\n\n\n def __ShortenWorm(self):\n ''' Command to shorten the worm by one pixel (ie. LED).\n '''\n l = len(self.worm)\n if l == 1:\n print('cannot decrease worm length: worm length is 1')\n else:\n print('decreasing worm length from %d to %d' % (l, l-1))\n self.bShorter = True\n\n # end of Worm8x8 class\n\n\n\n# ===========================================================================\n# 8x8 LED Matrix Demo Remote Application (r-app)\n# ===========================================================================\n\nclass LED8x8:\n ''' This class is the service that displays a \"worm\" on an 8x8 LED display.\n '''\n\n def __init__(self, execname, speed=0.5, wormlen=1, debug=False):\n \n self.execname = execname\n self.speed = speed\n self.wormlen = wormlen\n self.bRun = True\n self.debug = debug\n self.quit = False # a flag to signal termination\n\n self.worm = Worm8x8(self.speed, self.wormlen)\n self.msgq = Queue.Queue(10)\n\n \n\n def HandleAppMessages(self, peerconn, msgdata):\n ''' Message handler receives APPS messages and dispatches commands.\n '''\n print('led8x8 msg received: |%s|' % msgdata)\n # format of msgdata: <appcode> <options>\n # this service app has appcode=led8x8\n \n cmd = msgdata.strip().lower().split()\n nItems = len(cmd)\n method = ''\n param = ''\n appcode = cmd[0]\n if nItems > 1:\n method = cmd[1]\n if nItems > 2:\n param = cmd[2]\n\n if method == 'start':\n self.worm.bRun = True\n peerconn.senddata(REPLY, 'starting ...')\n self.worm.Run()\n\n elif appcode == QUIT:\n peerconn.senddata(REPLY, 'quitting ...')\n self.worm.bRun = False\n \n elif method == 'delay':\n peerconn.senddata(REPLY, 'setting new delay ...')\n self.worm.SetDelay(float(param))\n\n elif method == 'length':\n if param == 'inc' or param == 'dec':\n peerconn.senddata(REPLY, 'changing length: %s ...' % param)\n self.worm.ChgWormLength(param)\n self.msgq.put(msgdata, True, None)\n else:\n peerconn.senddata(REPLY, 'invalid length change: %s ...' % param)\n\n else:\n peerconn.senddata(ERROR, 'unknown message: %s' % msgdata)\n\n # ~~~~~~~~~~~ end of LED8x8 class ~~~~~~~~~~~~~\n\n\n\n\n# setup and run r-app\nif __name__=='__main__':\n\n os.chdir(sys.path[0]) # set the current working directory\n svcname = os.path.basename(sys.argv[0])\n remapp = LED8x8(svcname, 0.5, 1)\n try:\n mk = msgkit('app', execname=svcname, \n msghandler=remapp.HandleAppMessages,\n access='public')\n print('%s is now running msgkit' % svcname)\n '''\n peeraddr, peerport, peername = mk.getPeerID()\n print 'PeerID: addr=%s, port=%d, name=%s' % (\n peeraddr, peerport, peername) \n appaddr, appport, appname = mk.getAppID()\n print 'AppID: addr=%s, port=%d, name=%s' % (appaddr, appport, appname)\n '''\n mk.StartServer()\n print('%s is ready to roll ...' % svcname)\n\n while remapp.worm.bRun == True:\n #print remapp.msgq.get(False, None)\n time.sleep(1)\n\n print('%s is quitting ...' % svcname)\n mk.StopServer()\n time.sleep(1)\n except:\n traceback.print_exc()\n\n\n","sub_path":"led8x8svc.py","file_name":"led8x8svc.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"151241463","text":"#! /usr/bin/env python\r\n\r\n\"\"\"\r\n Program: ch08_01_file_read.py\r\n Function: First of several script to explore \r\n opening and reading from a file.\r\n\"\"\"\r\n\r\nimport sys\r\n\r\nwork_file = eval(input( \"Enter file to read: \" ))\r\nif work_file == \"\":\r\n print(\"Could not read from\", work_file, sys.stderr)\r\n sys.exit(1)\r\n\r\nfile_read = open( work_file, \"r\")\r\n\r\nfor line in file_read:\r\n line = line[:-1]\r\n print (line)\r\n\r\nfile_read.close()\r\n\r\nprint (\"That's all folks!\") \r\nsys.exit(0)\r\n","sub_path":"Ch08_io/ch08_01_file_read.py","file_name":"ch08_01_file_read.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503484194","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport glob\nimport distutils\n\nfrom setuptools import setup, Extension\n\nif sys.platform == 'win32':\n sys.exit('error: this module is not meant to work on Windows')\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nNEWS = open(os.path.join(here, 'NEWS.rst')).read()\n\nVERSION = '6.2.4.1'\nDESCRIPTION = 'The standard Python readline extension statically linked against the GNU readline library.'\nLONG_DESCRIPTION = README + '\\n\\n' + NEWS\nCLASSIFIERS = [\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n]\n\n# If we are on Mac OS 10.5 or later, attempt a universal binary, which is the way\n# the original system version of readline.so was compiled. Set up flags here.\nUNIVERSAL = ''\nplatform = distutils.util.get_platform()\nif platform.startswith('macosx'):\n osx_version = platform.split('-')[1]\n SDK = ''\n if osx_version == '10.5':\n SDK = '/Developer/SDKs/MacOSX10.5.sdk'\n UNIVERSAL = '-arch i386 -arch ppc -arch x86_64 -arch ppc64'\n elif osx_version == '10.6':\n # Starting with 10.6 (Snow Leopard), only Intel architecture is supported\n SDK = '/Developer/SDKs/MacOSX10.6.sdk'\n UNIVERSAL = '-arch i386 -arch x86_64'\n elif osx_version > '10.6':\n # Starting with 10.7 (Lion) and Xcode 4.3, the developer sysroot is inside the Xcode.app - ignore it\n UNIVERSAL = '-arch i386 -arch x86_64'\n\n if os.path.exists(SDK):\n # only add sysroot if it exists:\n UNIVERSAL = \"-isysroot %s %s\" % (SDK, UNIVERSAL)\n\n# Since we have the latest readline (post 4.2), enable all readline functionality\n# These macros can be found in pyconfig.h.in in the main directory of the Python tarball\nDEFINE_MACROS = [\n ('HAVE_RL_CALLBACK', None),\n ('HAVE_RL_CATCH_SIGNAL', None),\n ('HAVE_RL_COMPLETION_APPEND_CHARACTER', None),\n ('HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK', None),\n ('HAVE_RL_COMPLETION_MATCHES', None),\n ('HAVE_RL_COMPLETION_SUPPRESS_APPEND', None),\n ('HAVE_RL_PRE_INPUT_HOOK', None),\n]\n\n# Check if any of the distutils commands involves building the module,\n# and check for quiet vs. verbose option\nbuilding = False\nverbose = True\nfor s in sys.argv[1:]:\n if s.startswith('bdist') or s.startswith('build') or s.startswith('install'):\n building = True\n if s in ['--quiet', '-q']:\n verbose = False\n if s in ['--verbose', '-v']:\n verbose = True\n \n# Build readline first, if it is not there and we are building the module\nif building and not os.path.exists('readline/libreadline.a'):\n if verbose:\n print(\"\\n============ Building the readline library ============\\n\")\n os.system('cd rl && /bin/bash ./build.sh')\n print(\"\\n============ Building the readline extension module ============\\n\")\n else:\n os.system('cd rl && /bin/bash ./build.sh > /dev/null 2>&1') \n # Add symlink that simplifies include and link paths to real library\n if not (os.path.exists('readline') or os.path.islink('readline')):\n os.symlink(os.path.join('rl','readline-lib'), 'readline')\n\nsetup(\n name=\"readline\",\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n classifiers=CLASSIFIERS,\n maintainer=\"Ludwig Schwardt; Sridhar Ratnakumar\",\n maintainer_email=\"ludwig.schwardt@gmail.com; github@srid.name\",\n url=\"http://github.com/ludwigschwardt/python-readline\",\n license=\"GNU GPL\",\n platforms=['MacOS X', 'Posix'],\n include_package_data=True,\n ext_modules=[\n Extension(name=\"readline\",\n sources=[\"Modules/%s.x/readline.c\" % (sys.version_info[0],)],\n include_dirs=['.'],\n define_macros=DEFINE_MACROS,\n extra_compile_args=['-Wno-strict-prototypes'] + UNIVERSAL.split(),\n extra_link_args=UNIVERSAL.split(),\n extra_objects=['readline/libreadline.a', 'readline/libhistory.a'], \n libraries=['ncurses']\n ),\n ],\n zip_safe=False,\n)\n","sub_path":"lib/readline-6.2.4.1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426712122","text":"# Boltons strutils Exercise: Repl.it Usernames :cyclone:\n# From the given text file, transform the camelCased words into snake_cased words\n# and vice versa. Shuffle the list and print it on the console with its index in\n# ordinal form that starts at 1.\n# Output:\n# 1st FancyAndPowerful\n# 2nd serving_glorious_error\n# 3rd complaining_bitter_marketing\n# 4th smoking_poised_solution\n# 5th ScaryYetDiligent\n# 6th MentalNotCheap\n# 7th VerdantYetResonant\n# 8th FoolishYetTedious\n# 9th embracing_magical_event\n# 10th disappearing_maniacal_library\n# 11th dismissing_unhealthy_cancer\n# 12th SqualidButSqueamish\n# 13th EnchantedButVisible\n# 14th governing_standing_hotel\n# 15th MajesticNotRecondite\n# 16th appearing_impartial_death\n# 17th SuccinctButPushy\n# 18th translating_literate_revolution\n# 19th OmniscientAndBlack\n# 20th biting_severe_cousin\n\nfrom boltons.strutils import camel2under, under2camel, ordinalize\nimport random\n\nwith open(\"elijah_file.txt\", \"r\") as f:\n phrases = []\n for line in f:\n if line != line.lower() and line != line.upper() and \"_\" not in line:\n phrases.append(camel2under(line))\n else:\n phrases.append(under2camel(line))\n\n random.shuffle(phrases)\n for idx, phrase in enumerate(phrases, start=1):\n print(ordinalize(idx), phrase)\n\n\n\n","sub_path":"Modules/boltons_module/dev_exercise/elijah.py","file_name":"elijah.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135046521","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport math\nimport numpy as np\nimport scipy as sp\nimport pandas\nimport matplotlib.pyplot as plt\nfrom progressbar import ProgressBar\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import linalg as sparse_linalg\nimport sys\nfile_dir = '/localhome/pykb/physics_code/Exact_Diagonalization/Classes/'\nsys.path.append(file_dir)\nfile_dir = '/localhome/pykb/physics_code/Exact_Diagonalization/functions/'\nsys.path.append(file_dir)\n\nfrom Hamiltonian_Classes import Hamiltonian,H_table,clock_Hamiltonian,spin_Hamiltonian\nfrom System_Classes import unlocking_System,U1_system\nfrom Symmetry_Classes import translational,parity,model_sym_data,charge_conjugation\n# from Plotting_Classes import eig_overlap,fidelity,entropy,energy_basis\nfrom Non_observables import zm\nfrom Construction_functions import bin_to_int_base_m,int_to_bin_base_m,cycle_bits_state\nfrom Search_functions import find_index_bisection\nfrom State_Classes import zm_state,sym_state,prod_state,bin_state,ref_state\nfrom rw_functions import save_obj,load_obj\nfrom Calculations import level_stats,fidelity,eig_overlap,entropy,site_precession,site_projection,time_evolve_state\nimport numpy as np\nimport scipy as sp\nimport math\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Computer Modern'],'size':26})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n# matplotlib.rcParams['figure.dpi'] = 400\n\ndef find_hamming_sectors(state_bits,system):\n #organize states via hamming distance from Neel\n hamming_sectors = dict()\n for n in range(0,system.N+1):\n hamming_sectors[n] = []\n for n in range(0,system.dim):\n h = 0\n for m in range(0,system.N,1):\n if system.basis[n][m] != state_bits[m]:\n h = h+1\n hamming_sectors[int(h)] = np.append(hamming_sectors[int(h)],system.basis_refs[n])\n return hamming_sectors\n\n\nimport operator as op\nfrom functools import reduce\ndef ncr(n, r):\n r = min(r, n-r)\n numer = reduce(op.mul, range(n, n-r, -1), 1)\n denom = reduce(op.mul, range(1, r+1), 1)\n return numer / denom\n\ndef W(N):\n choose = ncr(int(N/2),2)\n return 1/np.power(choose,0.5)*1/np.power(int(N/2)-1,0.5)*N/2\n\ndef power(H,n,e,u):\n diag = np.power(e,n)\n return np.dot(u,np.dot(np.diag(diag),np.conj(np.transpose(u))))\n\n#find roots from root sector to target sector\ndef find_root_refs(root_sector,target_sector,H,sector_refs,from_sector,system,exclude):\n #find excluded states in target sector\n exclude_sector = []\n for n in range(0,np.size(exclude,axis=0)):\n if (from_sector[exclude[n]] == target_sector).all():\n exclude_sector = np.append(exclude_sector,exclude[n])\n target_refs = sector_refs[perm_key(target_sector,system)]\n to_del=[]\n for n in range(0,np.size(target_refs,axis=0)):\n if target_refs[n] in exclude_sector:\n to_del = np.append(to_del,n)\n for n in range(np.size(to_del,axis=0)-1,-1,-1):\n target_refs=np.delete(target_refs,to_del[n])\n \n no_actions = np.sum(target_sector-root_sector)\n if no_actions > 1:\n H_power = power(H.sector.matrix(),no_actions,H.sector.eigvalues(),H.sector.eigvectors())\n else:\n H_power = H.sector.matrix()\n\n root_refs = sector_refs[perm_key(root_sector,system)]\n roots_kept = []\n for m in range(0,np.size(root_refs,axis=0)):\n maps_to = []\n for u in range(0,system.dim):\n if np.abs(H_power[system.keys[root_refs[m]],u])>1e-5:\n maps_to = np.append(maps_to,system.basis_refs[u])\n\n for u in range(0,np.size(target_refs,axis=0)):\n if target_refs[u] in maps_to:\n roots_kept = np.append(roots_kept,root_refs[m])\n break\n \n # for u in range(0,np.size(maps_to,axis=0)):\n # if (from_sector[int(maps_to[u])] == target_sector).all():\n # roots_kept = np.append(roots_kept,root_refs[m])\n # break\n\n return roots_kept\n\ndef perm_key(sector,system):\n return bin_to_int_base_m(sector,int(system.N/2)+1)\n\ndef cube_fsa(root_sector,sublattice_parity,sector_refs,system):\n refs = sector_refs[perm_key(root_sector,system)]\n # #find root refs, those with two neighbouring 1->0 from Neel\n root_refs = []\n for n in range(0,np.size(refs,axis=0)):\n bits = system.basis[system.keys[refs[n]]]\n for m in range(0,np.size(bits,axis=0)):\n if m == np.size(bits)-1:\n mp1 = 0\n mp2 = 1\n mp3 = 2\n elif m == np.size(bits)-2:\n mp1 = m + 1\n mp2 = 0\n mp3 = 1\n elif m == np.size(bits)-3:\n mp1 = m + 1\n mp2 = m + 2\n mp3 = 0\n else:\n mp1 = m + 1\n mp2 = m + 2\n mp3 = m + 3\n\n if bits[m] == 0 and bits[mp1] == 0 and bits[mp2] == 0 and bits[mp3] == 0:\n root_refs = np.append(root_refs,refs[n])\n break\n\n root_bits = np.zeros((np.size(root_refs),system.N))\n for n in range(0,np.size(root_refs,axis=0)):\n root_bits[n] = system.basis[system.keys[root_refs[n]]]\n\n fsa_min_bit_loc = np.zeros((np.size(root_bits,axis=0),int(system.N/2)-2))\n fsa_plus_bit_loc = np.zeros(np.size(root_bits,axis=0))\n\n for n in range(0,np.size(root_bits,axis=0)):\n c=0\n for m in range(0,np.size(root_bits[n],axis=0)):\n if root_bits[n,m] == 1:\n fsa_min_bit_loc[n,c] = m\n c = c+1\n\n\n if sublattice_parity == \"L\":\n if m % 2 != 0:\n if m == system.N-1:\n mp1 = 0\n else:\n mp1 = m + 1\n if m == 0:\n mm1 = system.N-1\n else:\n mm1 = m - 1\n if root_bits[n,mm1] == 0 and root_bits[n,m] == 0 and root_bits[n,mp1] == 0:\n fsa_plus_bit_loc[n] = m\n\n elif sublattice_parity == \"R\":\n if m % 2 == 0:\n if m == system.N-1:\n mp1 = 0\n else:\n mp1 = m + 1\n if m == 0:\n mm1 = system.N-1\n else:\n mm1 = m - 1\n if root_bits[n,mm1] == 0 and root_bits[n,m] == 0 and root_bits[n,mp1] == 0:\n fsa_plus_bit_loc[n] = m\n\n fsa_plus = dict()\n fsa_min = dict()\n for n in range(0,np.size(fsa_plus_bit_loc,axis=0)):\n fsa_plus[n] = np.zeros((system.dim,system.dim))\n #scan basis + sites\n for m in range(0,np.size(system.basis_refs,axis=0)):\n for k in range(0,system.N):\n #sp\n if np.abs(k - fsa_plus_bit_loc[n])<1e-5:\n bits = np.copy(system.basis[m])\n if k == system.N-1:\n kp1 = 0\n else:\n kp1 = k +1\n if k == 0:\n km1 = system.N-1\n else:\n km1 = k - 1\n\n if bits[kp1] == 0 and bits[km1] == 0 and bits[k] == 0:\n bits[k] = 1\n new_ref = bin_to_int_base_m(bits,system.base)\n fsa_plus[n][m,system.keys[new_ref]] = 1\n #sm\n if k in fsa_min_bit_loc[n]:\n bits = np.copy(system.basis[m])\n if k == system.N-1:\n kp1 = 0\n else:\n kp1 = k +1\n if k == 0:\n km1 = system.N-1\n else:\n km1 = k - 1\n\n if bits[kp1] == 0 and bits[km1] == 0 and bits[k] == 1:\n bits[k] = 0\n new_ref = bin_to_int_base_m(bits,system.base)\n fsa_plus[n][m,system.keys[new_ref]] = 1\n\n for n in range(0,len(fsa_plus)):\n fsa_min[n] = np.conj(np.transpose(fsa_plus[n]))\n\n fsa_basis = dict()\n fsa_dim = int(system.N/2-2)\n for n in range(0,np.size(root_refs,axis=0)):\n fsa_basis[n] = ref_state(root_refs[n],system).prod_basis()\n current_state = fsa_basis[n]\n for m in range(0,fsa_dim):\n new_state = np.dot(fsa_min[n],current_state)\n new_state = new_state / np.power(np.vdot(new_state,new_state),0.5)\n fsa_basis[n] = np.vstack((fsa_basis[n],new_state))\n current_state = new_state\n fsa_basis[n] = np.transpose(fsa_basis[n])\n\n basis = fsa_basis[0]\n for n in range(1,len(fsa_basis)):\n basis = basis + fsa_basis[n]\n for n in range(0,np.size(basis,axis=1)):\n basis[:,n] = basis[:,n] / np.power(np.vdot(basis[:,n],basis[:,n]),0.5)\n return basis\n\n","sub_path":"projects/approximate_su2/subcube_functions.py","file_name":"subcube_functions.py","file_ext":"py","file_size_in_byte":9066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"50204280","text":"\"\"\" Setup for cupyopt \"\"\"\nimport os\nimport pathlib\n\nimport setuptools\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.rst\").read_text()\n\n# Pull requirements from the text file\nREQUIREMENT_PATH = HERE / \"requirements.txt\"\nINSTALL_REQUIRES = []\nif os.path.isfile(REQUIREMENT_PATH):\n with open(REQUIREMENT_PATH) as f:\n INSTALL_REQUIRES = f.read().splitlines()\n\n# This call to setup() does all the work\nsetuptools.setup(\n name=\"cupyopt\",\n version=\"1.1.0.2\",\n description=\"CU Python Opinionated Prefect Tasks\",\n long_description=README,\n long_description_content_type=\"text/x-rst\",\n author=\"CU Boulder, OIT\",\n author_email=\"stta9820@colorado.edu\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.8\",\n package_dir={\"\": \"src\"},\n install_requires=INSTALL_REQUIRES,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480012924","text":"def fib(n):\n if n == 0:\n return 0\n if n == 1:\n return 1\n # table for tabulation\n table = [None] * (n+1) \n table[0] = 0 # base case 1, fib(0) = 0\n table[1] = 1 # base case 2, fib(1) = 1\n # filling up tabulation table starting from 2 and going upto n\n for i in range(2,n+1): \n # we have result of i-1 and i-2 available because these had been evaluated already\n table[i] = table[i-1] + table[i-2] \n # return the value of n in tabulation table\n return table[n] \n\nprint(fib(100000))","sub_path":"prepa/TD7/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418800608","text":"\n\"\"\"\nc = 0\nfor number in range(1, 10 + 1):\n print(number)\n c = c + number\nprint(c)\n\"\"\"\n\n\ndef add_numbers(start,end):\n \"\"\"\n write the body of this\n function, similar to the block\n of code we just saw. Hint:\n don’t forget to use return\n \"\"\"\n\n\n c = 0\n for number in range(start, end + 1):\n print(number)\n c = c + number\n return(c)\n\n'''\ntest = add_numbers(333,777)\nprint(test)\n'''\n'''\ntest2 = add_numbers(1,100)\nprint(test2)\n'''\n'''\ntest3 = add_numbers(1000, 5000)\nprint(test3) \n'''\n","sub_path":"fun1.py","file_name":"fun1.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"56440895","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, render_to_response\nfrom django.template import RequestContext, loader\nfrom ngo.forms import NgoAddForm\nfrom django.contrib.auth.models import User \nfrom ngo.models import Ngo, Address, NameForm\nfrom django.contrib.contenttypes.models import ContentType\n\ndef add(request):\n if request.method == 'POST':\n form = NgoAddForm(request.POST, request.FILES)\n exist = User.objects.filter(email = (request.POST.get('email')))\n if form.is_valid:\n if not exist:\n create(request.POST, request.FILES)\n added = True\n return render(request, 'ngo/addngo.html', {'added': added})\n else:\n error = \"Email Already Exists.\"\n return render(request, 'ngo/addngo.html', {'error': error, 'form': form})\n else:\n return render(request, 'ngo/addngo.html', {'form': form})\n else:\n form = NgoAddForm()\n return render(request, 'ngo/addngo.html', {'form': form})\n\ndef create(data, files):\n#Create User...................................\n u = User.objects.create(\n email = data.get('email'),\n first_name = data.get('first_name'),\n last_name = data.get('last_name'),\n password = data.get('password'),\n username = data.get('email'),\n )\n u.save()\n \n#Create ngo.....................................\n ngo = Ngo.objects.create(\n name = data.get('name'),\n description = data.get('description'),\n image = files.get('image'),\n user = u,\n )\n ngo.save()\n for i in data.getlist('project_article'):\n ngo.project_article.add(i)\n ngo.save()\n\n#Create Address................................\n Address.objects.create(\n address1 = data.get('address1'),\n address2 = data.get('address2'),\n state = data.get('state'),\n country = data.get('country'),\n content_type = ContentType.objects.get(model = 'ngo'),\n object_id = ngo.id,\n )\n\ndef index(request):\n return render(request, 'ngo/table.html', {'ngos': Ngo.objects.all()})\n\ndef name(request):\n if request.method == 'POST':\n NameForm.objects.create(name = request.POST.get('name'))\n return render(request, 'ngo/name.html', {'added': True})\n else:\n return render(request, 'ngo/name.html')\n\ndef see(request):\n names = \"\"\n for i in NameForm.objects.all():\n names = names + i.name + \"--\"\n return HttpResponse (names)\n","sub_path":"mcms/ngo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"68912488","text":"import numpy as np\nimport argparse\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True,\nhelp = \"Path to the image\")\nargs = vars(ap.parse_args())\n\nimage=cv2.imread(args[\"image\"])\nimage=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n#blurred=cv2.GaussianBlur(image,(5,5),0) #not necessary\ncv2.imshow(\"Image\",image)\n\nlap=cv2.Laplacian(image,cv2.CV_64F)\nlap=np.uint8(np.absolute(lap))\ncv2.imshow(\"Laplacian\",lap)\n\nsobelX=cv2.Sobel(image,cv2.CV_64F,1,0)\nsobelY=cv2.Sobel(image,cv2.CV_64F,0,1)\n\nsobelX=np.uint8(np.absolute(sobelX))\nsobelY=np.uint8(np.absolute(sobelY))\n#taking absolute will result in edges being shown white irrespective of whether they are postive slope or negative slope\nsobelCombined=cv2.bitwise_or(sobelX,sobelY)\n\ncv2.imshow(\"Sobel X\", sobelX)\ncv2.imshow(\"Sobel Y\", sobelY)\ncv2.imshow(\"Sobel Combined\", sobelCombined)\ncv2.waitKey(0)\n","sub_path":"codes_opencv/sobel_and_laplacian.py","file_name":"sobel_and_laplacian.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"599549398","text":"# /usr/bin/env python3\n\n# pylint: disable=C0413,C0411,C0116\n\n\"\"\"Code inside the chapter for the book PySpark in Action (chapter 8).\"\"\"\n\n# tag::ch08-simple-rdd[]\n\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.getOrCreate()\n\ncollection = [1, \"two\", 3.0, (\"four\", 4), {\"five\": 5}] # <1>\n\nsc = spark.sparkContext # <2>\n\ncollection_rdd = sc.parallelize(collection) # <3>\n\nprint(collection_rdd)\n# ParallelCollectionRDD[0] at parallelize at PythonRDD.scala:195 <4>\n\n# end::ch08-simple-rdd[]\n\n# tag::ch08-rdd-map[]\n\n\ndef add_one(value):\n return value + 1 # <1>\n\n\ncollection_rdd = collection_rdd.map(add_one) # <2>\n\nprint(collection_rdd.collect()) # <3>\n# Stack trace galore! The important bit, you'll get one of the following:\n# TypeError: can only concatenate str (not \"int\") to str\n# TypeError: unsupported operand type(s) for +: 'dict' and 'int'\n# TypeError: can only concatenate tuple (not \"int\") to tuple\n\n# end::ch08-rdd-map[]\n\n# tag::ch08-rdd-map2[]\n\ncollection_rdd = sc.parallelize(collection) # <1>\n\n\ndef safer_add_one(value):\n try:\n return value + 1\n except TypeError:\n return value # <2>\n\n\ncollection_rdd = collection_rdd.map(safer_add_one)\n\nprint(collection_rdd.collect())\n# [2, 'two', 4.0, ('four', 4), {'five': 5}] <3>\n\n# end::ch08-rdd-map2[]\n\n# tag::ch08-rdd-filter[]\n\ncollection_rdd = collection_rdd.filter(lambda elem: isinstance(elem, (float, int)))\n\nprint(collection_rdd.collect())\n# [2, 4.0]\n\n# end::ch08-rdd-filter[]\n\n# tag::ch08-rdd-reduce[]\n\nfrom operator import add # <1>\n\ncollection_rdd = sc.parallelize([4, 7, 9, 1, 3])\n\nprint(collection_rdd.reduce(add)) # 24\n# end::ch08-rdd-reduce[]\n\n# tag::ch08-df-to-rdd[]\n\ndf = spark.createDataFrame([[1], [2], [3]], schema=[\"column\"])\n\nprint(df.rdd)\n# MapPartitionsRDD[22] at javaToPython at NativeMethodAccessorImpl.java:0\n\nprint(df.rdd.collect())\n# [Row(column=1), Row(column=2), Row(column=3)]\n\n# end::ch08-df-to-rdd[]\n\n# tag::ch08-fraction-df[]\nimport pyspark.sql.functions as F\nimport pyspark.sql.types as T\n\nfractions = [[x, y] for x in range(100) for y in range(1, 100)] # <1>\n\nfrac_df = spark.createDataFrame(fractions, [\"numerator\", \"denominator\"])\n\nfrac_df = frac_df.select(\n F.array(F.col(\"numerator\"), F.col(\"denominator\")).alias(\"fraction\"), # <2>\n)\n\nfrac_df.show(5, False)\n# +--------+\n# |fraction|\n# +--------+\n# |[0, 1] |\n# |[0, 2] |\n# |[0, 3] |\n# |[0, 4] |\n# |[0, 5] |\n# +--------+\n# only showing top 5 rows\n# end::ch08-fraction-df[]\n\n# tag::ch08-udf-python[]\n\nfrom fractions import Fraction # <1>\nfrom typing import Tuple, Optional # <2>\n\nFrac = Tuple[int, int] # <3>\n\n\ndef py_reduce_fraction(frac: Frac) -> Optional[Frac]: # <4>\n \"\"\"Reduce a fraction represented as a 2-tuple of integers.\"\"\"\n num, denom = frac\n if denom:\n answer = Fraction(num, denom)\n return answer.numerator, answer.denominator\n return None\n\n\nassert py_reduce_fraction((3, 6)) == (1, 2) # <5>\nassert py_reduce_fraction((1, 0)) is None\n\n\ndef py_fraction_to_float(frac: Frac) -> Optional[float]:\n \"\"\"Transforms a fraction represented as a 2-tuple of integers into a float.\"\"\"\n num, denom = frac\n if denom:\n return num / denom\n return None\n\n\nassert py_fraction_to_float((2, 8)) == 0.25\nassert py_fraction_to_float((10, 0)) is None\n# end::ch08-udf-python[]\n\n# tag::ch08-udf1[]\nSparkFrac = T.ArrayType(T.LongType()) # <1>\n\nreduce_fraction = F.udf(py_reduce_fraction, SparkFrac) # <2>\n\nfrac_df = frac_df.withColumn(\n \"reduced_fraction\", reduce_fraction(F.col(\"fraction\")) # <3>\n)\n\nfrac_df.show(5, False)\n# +--------+----------------+\n# |fraction|reduced_fraction|\n# +--------+----------------+\n# |[0, 1] |[0, 1] |\n# |[0, 2] |[0, 1] |\n# |[0, 3] |[0, 1] |\n# |[0, 4] |[0, 1] |\n# |[0, 5] |[0, 1] |\n# +--------+----------------+\n# only showing top 5 rows\n\n# end::ch08-udf1[]\n\n# tag::ch08-udf2[]\n@F.udf(T.DoubleType()) # <1>\ndef fraction_to_float(frac: Frac) -> Optional[float]:\n \"\"\"Transforms a fraction represented as a 2-tuple of integers into a float.\"\"\"\n num, denom = frac\n if denom:\n return num / denom\n return None\n\n\nfrac_df = frac_df.withColumn(\n \"fraction_float\", fraction_to_float(F.col(\"reduced_fraction\"))\n)\n\nfrac_df.select(\"reduced_fraction\", \"fraction_float\").distinct().show(5, False)\n# +----------------+-------------------+\n# |reduced_fraction|fraction_float |\n# +----------------+-------------------+\n# |[3, 50] |0.06 |\n# |[3, 67] |0.04477611940298507|\n# |[7, 76] |0.09210526315789473|\n# |[9, 23] |0.391304347826087 |\n# |[9, 25] |0.36 |\n# +----------------+-------------------+\n# only showing top 5 rows\nassert fraction_to_float.func((1, 2)) == 0.5 # <2>\n# end::ch08-udf2[]\n\n# This to read the data from BQ\n\n# Restart your Spark. If launching from the command line `pyspark --jars`\n# Or set\nimport os\n\nos.environ.setdefault(\"ARROW_PRE_0_15_IPC_FORMAT\", \"1\")\n\nimport pyspark.sql.functions as F\nimport pyspark.sql.types as T\n\n# tag::ch08-python-bq[]\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.config(\n \"spark.jars.packages\",\n\"com.google.cloud.spark:spark-bigquery-with-dependencies_2.12:0.17.3\"\n).getOrCreate()\n\n# Ivy Default Cache set to: /Users/jonathan_rioux/.ivy2/cache\n# The jars for the packages stored in: /Users/jonathan_rioux/.ivy2/jars\n# :: loading settings :: url = jar:file:/usr/local/Cellar/apache-spark/2.4.5/libexec/jars/ivy-2.4.0.jar!/org/apache/ivy/core/settings/ivysettings.xml\n# com.google.cloud.spark#spark-bigquery-with-dependencies_2.11 added as a dependency\n# :: resolving dependencies :: org.apache.spark#spark-submit-parent-035f1392-cda4-4935-a62b-969bda5449d5;1.0\n# \tconfs: [default]\n# \tfound com.google.cloud.spark#spark-bigquery-with-dependencies_2.11;0.15.1-beta in central\n# :: resolution report :: resolve 134ms :: artifacts dl 2ms\n# \t:: modules in use:\n# \tcom.google.cloud.spark#spark-bigquery-with-dependencies_2.11;0.15.1-beta from central in [default]\n# \t---------------------------------------------------------------------\n# \t| | modules || artifacts |\n# \t| conf | number| search|dwnlded|evicted|| number|dwnlded|\n# \t---------------------------------------------------------------------\n# \t| default | 1 | 0 | 0 | 0 || 1 | 0 |\n# \t---------------------------------------------------------------------\n# :: retrieving :: org.apache.spark#spark-submit-parent-035f1392-cda4-4935-a62b-969bda5449d5\n# \tconfs: [default]\n# \t0 artifacts copied, 1 already retrieved (0kB/4ms)\n# [...]\n# end::ch08-python-bq[]\n\n# TODO: Explain unionbyname\n# TODO: https://jaceklaskowski.gitbooks.io/mastering-spark-sql/spark-sql-Dataset-typed-transformations.html#unionByName\n\n# tag::ch08-read-bq[]\n\nfrom functools import reduce\nfrom pyspark.sql import DataFrame\n\n\ndef read_df_from_bq(year): # <1>\n return (\n spark.read.format(\"bigquery\") # <2>\n .option(\"table\", f\"bigquery-public-data.noaa_gsod.gsod{year}\") # <3>\n .option(\"credentialsFile\", \"bq-key.json\") # <4>\n .load()\n )\n\n\ngsod = (\n reduce(\n DataFrame.union, [read_df_from_bq(year) for year in range(2010, 2020)] # <5>\n )\n .dropna(subset=[\"year\", \"mo\", \"da\", \"temp\"])\n .where(F.col(\"temp\") != 9999.9)\n)\n\n# end::ch08-read-bq[]\n\n# tag::ch08-read-bq-alternate[]\ngsod_alt = read_df_from_bq(2010) # <1>\nfor year in range(2011, 2020):\n gsod_alt = gsod_alt.union(read_df_from_bq(year))\n\n# end::ch08-read-bq-alternate[]\n\n# tag::ch08-scalar-udf[]\n\nimport pandas as pd\n\n\n@F.pandas_udf(T.DoubleType(), F.PandasUDFType.SCALAR) # <1>\ndef f_to_c(degrees):\n \"\"\"Transforms Farhenheit to Celcius.\"\"\"\n return (degrees - 32) * 5 / 9\n\n\nf_to_c.func(pd.Series(range(32, 213))) # <2>\n# 0 0.000000\n# 1 0.555556\n# 2 1.111111\n# 3 1.666667\n# 4 2.222222\n# ...\n# 176 97.777778\n# 177 98.333333\n# 178 98.888889\n# 179 99.444444\n# 180 100.000000\n# Length: 181, dtype: float64\n\ngsod = gsod.withColumn(\"temp_c\", f_to_c(F.col(\"temp\")))\ngsod.select(\"temp\", \"temp_c\").distinct().show(5)\n\n# +-----+-------------------+\n# | temp| temp_c|\n# +-----+-------------------+\n# | 37.2| 2.8888888888888906|\n# | 85.9| 29.944444444444443|\n# | 53.5| 11.944444444444445|\n# | 71.6| 21.999999999999996|\n# |-27.6|-33.111111111111114|\n# +-----+-------------------+\n# only showing top 5 rows\n\n# end::ch08-scalar-udf[]\n\n# \"year string, mo string, da string, temp double, temp_norm double\",\n\n# tag::ch08-grouped-map-udf-verbose[]\n@F.pandas_udf(\n T.StructType(\n [\n T.StructField(\"stn\", T.StringType()),\n T.StructField(\"year\", T.StringType()),\n T.StructField(\"mo\", T.StringType()),\n T.StructField(\"da\", T.StringType()),\n T.StructField(\"temp\", T.DoubleType()),\n T.StructField(\"temp_norm\", T.DoubleType()),\n ]\n ),\n F.PandasUDFType.GROUPED_MAP,\n)\ndef scale_temperature(temp_by_day):\n \"\"\"Returns a simple normalization of the temperature for a site.\n\n If the temperature is constant for the whole window, defaults to 0.5.\"\"\"\n temp = temp_by_day.temp\n answer = temp_by_day[[\"stn\", \"year\", \"mo\", \"da\", \"temp\"]]\n if temp.min() == temp.max():\n return answer.assign(temp_norm=0.5)\n return answer.assign(temp_norm=(temp - temp.min()) / (temp.max() - temp.min()))\n\n\n# end::ch08-grouped-map-udf-verbose[]\n\n# tag::ch08-grouped-map-udf[]\n\ngsod = gsod.where(F.col(\"year\") == \"2018\") # <1>\ngsod = gsod.groupby(\"stn\", \"year\", \"mo\").apply(scale_temperature)\n\ngsod.show(5, False)\n# +------+----+---+---+-------------------+-------------------+\n# |stn |year|mo |da |temp_c |temp_norm |\n# +------+----+---+---+-------------------+-------------------+\n# |010250|2018|12 |08 |-5.666666666666667 |0.06282722513088991|\n# |010250|2018|12 |27 |-2.0555555555555554|0.40314136125654443|\n# |010250|2018|12 |31 |-1.6111111111111103|0.4450261780104712 |\n# |010250|2018|12 |19 |-2.4444444444444438|0.3664921465968586 |\n# |010250|2018|12 |04 |2.5555555555555562 |0.8376963350785341 |\n# +------+----+---+---+-------------------+-------------------+\n# only showing top 5 rows\n\n# end::ch08-grouped-map-udf[]\n\n# tag::ch08-grouped-aggregate-udf[]\nfrom sklearn.linear_model import LinearRegression # <1>\n\n\n@F.pandas_udf(T.DoubleType(), F.PandasUDFType.GROUPED_AGG)\ndef rate_of_change_temperature(day, temp):\n \"\"\"Returns the slope of the daily temperature for a given period of time.\"\"\"\n return (\n LinearRegression() # <2>\n .fit(X=day.astype(\"int\").values.reshape(-1, 1), y=temp) # <3>\n .coef_[0] # <4>\n )\n\n# end::ch08-grouped-aggregate-udf[]\n\n\n# tag::ch08-agg[]\n\nresult = gsod.groupby(\"stn\", \"year\", \"mo\").agg(\n rate_of_change_temperature(gsod[\"da\"], gsod[\"temp_norm\"]).alias( # <1>\n \"rt_chg_temp\"\n )\n)\n\nresult.show(5, False)\n# +------+----+---+---------------------+\n# |stn |year|mo |rt_chg_temp |\n# +------+----+---+---------------------+\n# |010250|2018|12 |-0.01014397905759162 |\n# |011120|2018|11 |-0.01704736746691528 |\n# |011150|2018|10 |-0.013510329829648423|\n# |011510|2018|03 |0.020159116598556657 |\n# |011800|2018|06 |0.012645501680677372 |\n# +------+----+---+---------------------+\n# only showing top 5 rows\n\nresult.groupby(\"stn\").agg(\n F.sum(F.when(F.col(\"rt_chg_temp\") > 0, 1).otherwise(0)).alias(\"temp_increasing\"),\n F.count(\"rt_chg_temp\").alias(\"count\"),\n).where(F.col(\"count\") > 6).select(\n F.col(\"stn\"),\n (F.col(\"temp_increasing\") / F.col(\"count\")).alias(\"temp_increasing_ratio\"),\n).orderBy(\n \"temp_increasing_ratio\"\n).show(\n 5, False\n)\n# +------+---------------------+ <2>\n# |stn |temp_increasing_ratio|\n# +------+---------------------+\n# |681115|0.0 |\n# |384572|0.0 |\n# |682720|0.0 |\n# |672310|0.0 |\n# |654530|0.08333333333333333 |\n# +------+---------------------+\n# only showing top 5 rows\n# end::ch08-agg[]\n\n# tag::ch08-local[]\ngsod_local = gsod.where(\"year = '2018' and mo = '08' and stn = '710920'\").toPandas()\n\n\nprint(rate_of_change_temperature.func(gsod_local[\"da\"], gsod_local[\"temp_norm\"]))\n# -0.007830974115511494\n\n# end::ch08-local[]\n","sub_path":"code/Ch08/book_code.py","file_name":"book_code.py","file_ext":"py","file_size_in_byte":12290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545647677","text":"#import boto3\n#import botocore\nimport sys\nimport os\nimport shutil\nimport json\nimport pickle\nimport lmdb\n#import uuid\nimport subprocess\nimport argparse\nimport time\nimport S3\nfrom concurrent.futures import ThreadPoolExecutor\n#import asyncio\nimport requests\nimport subprocess\n\nmap_size = 100 * 1024 * 1024 * 1024 \n\nendpoint_pdf = 'http://localhost:8060/annotateSoftwarePDF'\nendpoint_txt = 'http://localhost:8060/annotateSoftwareText'\n\nclass software_mention_client(object):\n \"\"\"\n Client for using the GROBID software mention service. \n \"\"\"\n\n def __init__(self, config_path='./config.json'):\n self.config = None\n \n # standard lmdb environment for storing processed biblio entry uuid\n self.env = None\n\n # lmdb environment for keeping track of PDF annotation failures\n self.env_fail = None\n\n self._load_config(config_path)\n self._init_lmdb()\n\n if self.config['bucket_name'] is not None and len(self.config['bucket_name']) > 0:\n self.s3 = S3.S3(self.config)\n\n def _load_config(self, path='./config.json'):\n \"\"\"\n Load the json configuration \n \"\"\"\n config_json = open(path).read()\n self.config = json.loads(config_json)\n\n def _init_lmdb(self):\n # open in write mode\n envFilePath = os.path.join(self.config[\"data_path\"], 'entries')\n self.env = lmdb.open(envFilePath, map_size=map_size)\n\n envFilePath = os.path.join(self.config[\"data_path\"], 'fail')\n self.env_fail = lmdb.open(envFilePath, map_size=map_size)\n\n def annotation(self, file_in, file_out):\n the_file = {'input': open(file_in, 'rb')}\n response = requests.post(endpoint_pdf, files=the_file)\n jsonStr = None\n if response.status_code >= 500:\n print('[{0}] Server Error'.format(response.status_code))\n elif response.status_code == 404:\n print('[{0}] URL not found: [{1}]'.format(response.status_code,api_url))\n elif response.status_code == 401:\n print('[{0}] Authentication Failed'.format(response.status_code))\n elif response.status_code >= 400:\n print('[{0}] Bad Request'.format(response.status_code))\n print(ssh_key )\n print(response.content )\n elif response.status_code >= 300:\n print('[{0}] Unexpected redirect.'.format(response.status_code))\n elif response.status_code == 200:\n jsonStr = response.json()\n else:\n print('Unexpected Error: [HTTP {0}]: Content: {1}'.format(response.status_code, response.content))\n\n if jsonStr is not None:\n print(jsonStr)\n\n\n def annotation_collection(self):\n # init lmdb transactions\n txn = self.env.begin(write=True)\n txn_fail = self.env_fail.begin(write=True)\n\n \n\n\n \"\"\"\n def reprocess_failed(self):\n \"\"\"\n\n\n def reset(self):\n \"\"\"\n Remove the local lmdb keeping track of the state of advancement of the annotation and\n of the failed entries\n \"\"\"\n # close environments\n self.env.close()\n self.env_fail.close()\n\n envFilePath = os.path.join(self.config[\"data_path\"], 'entries')\n shutil.rmtree(envFilePath)\n\n envFilePath = os.path.join(self.config[\"data_path\"], 'fail')\n shutil.rmtree(envFilePath)\n\n # re-init the environments\n self._init_lmdb()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = \"GROBID Software Mention recognition client\")\n parser.add_argument(\"--data-path\", default=None, help=\"path to the JSON dump file created by biblio-glutton-harvester\") \n parser.add_argument(\"--config\", default=\"./config.json\", help=\"path to the config file, default is ./config.json\") \n parser.add_argument(\"--reprocess\", action=\"store_true\", help=\"Reprocessed failed PDF\") \n parser.add_argument(\"--reset\", action=\"store_true\", help=\"Ignore previous processing states, and re-init the annotation process from the beginning\") \n parser.add_argument(\"--file-in\", default=None, help=\"A PDF input file to be processed by the GROBID software mention recognizer\") \n parser.add_argument(\"--file-out\", default=None, help=\"Path to output the software mentions in JSON format, extracted from the PDF file-in\") \n \n args = parser.parse_args()\n\n data_path = args.data_path\n config_path = args.config\n reprocess = args.reprocess\n reset = args.reset\n file_in = args.file_in\n file_out = args.file_out\n\n client = software_mention_client(config_path=config_path)\n\n if reset:\n client.reset()\n\n if reprocess:\n client.reprocess_failed()\n elif data_path is not None: \n client.annotation_collection()\n elif file_in is not None:\n client.annotation(file_in, file_out)\n\n","sub_path":"python/software_mention_client.py","file_name":"software_mention_client.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293293128","text":"\"\"\"\nVFS Demo App\nCopyright (C) 2014 Kibble Games Inc. In cooperation with Vancouver Film School All Rights Reserved. \n\n\"\"\"\nimport logging\n\nfrom app.models.user import User\n\n# this is the parent class of all pages that need to respond to AJAX messages\nfrom app.views.page_controller import PageController \n\nfrom app.views.sub import SubPage\n\n\n\"\"\"\nHome Page handler\n\n\"\"\" \nclass IndexPage( PageController ):\n \n def get(self): \n\n # use a sub page partial to render some HTML to use within this page. Optional. \n panel = SubPage()\n markup = panel.get_markup()\n \n tValues = {\n 'msg': \"Scott is the Winner\",\n 'current_panel': markup\n }\n\n logging.debug( \"rendering main page\" )\n self.send_html( '../templates/index.html', tValues ) \n return\n \n \n def error(self, cmd, return_code):\n \"\"\" \n invalid command handler \n \n \"\"\" \n logging.warning('MainPage.post() unrecognized command['+cmd+']')\n self.send_json( {'returnCode': return_code} )\n return\n \n \n def do_add_user(self, params):\n \n # initialize the result, set the value to indicate an error\n result = { 'returnCode': -1 }\n \n # Get player data from self.request\n pName = params['PlayerName']\n dName = params['PersonaName']\n \n # Create and save the persona so it has a key\n # should really check for an existing persona here first\n newUser = User( name = dName )\n \n try:\n # try blocks should be limited just to calls that may fail \n userKey = newUser.put()\n \n except ValueError:\n logging.error( 'Attempt to save a Player/Driver failed' )\n self.send_json( result ) \n \n result['keySafe'] = userKey.urlsafe(); \n result['playerName'] = pName\n result['driverName'] = dName\n result['returnCode'] = 0\n \n self.send_json( result )\n return\n \n\n def do_get_player_data(self, params):\n \"\"\"\n Command handler for 'get_player_data' command\n \n \"\"\"\n result = { 'returnCode': 0 }\n \n #result['playerList'] = self.get_player_list()\n self.send_json( result )\n return\n \n \n def get_player_list( self ):\n \"\"\"\n Returns player list from datastore\n \n \"\"\"\n # get players\n query = User.query() \n player_list = None # query.fetch( self.MAX_PLAYERS ) \n return player_list\n ","sub_path":"Telemetry/app/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100731139","text":"from transitions.extensions import GraphMachine\nfrom utils import *\nimport json\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nfrom make_post import *\nfrom uploadtoimgur import *\n\nclass TocMachine(GraphMachine):\n # def __init__(self, **machine_configs):\n # self.machine = GraphMachine(\n # model=self,\n # **machine_configs\n # )\n\n # def is_going_to_state1(self, event):\n # if event.get(\"message\"):\n # text = event['message']['text']\n # return text.lower() == 'go to state1'\n # elif event.get(\"postback\"):\n # text = event['postback']['title']\n # return text.lower() == 'go to state1'\n # return False\n\n # def is_going_to_state2(self, event):\n # if event.get(\"message\"):\n # text = event['message']['text']\n # return text.lower() == 'go to state2'\n # return False\n\n # def on_enter_state1(self, event):\n # print(\"I'm entering state1\")\n # sender_id = event['sender']['id']\n # responese = send_text_message(sender_id, \"I'm entering state1\")\n # self.go_back()\n\n # def on_exit_state1(self):\n # print('Leaving state1')\n\n # def on_enter_state2(self, event):\n # print(\"I'm entering state2\")\n # sender_id = event['sender']['id']\n # # response = send_postback(sender_id)\n # # response = send_image_url(sender_id)#,\"https://i.imgur.com/o7lmGSy.png\")\n # response = send_generic(sender_id)\n # # responese = send_text_message(sender_id, \"I'm entering state2\")\n # self.go_back()\n\n # def on_exit_state2(self):\n # print('Leaving state2')\n count = 1\n cred = credentials.Certificate('./serviceAccount.json')\n\n # 初始化firebase,注意不能重複初始化\n firebase_admin.initialize_app(cred)\n\n # 初始化firestore\n db = firestore.client()\n\n def __init__(self, **machine_configs):\n self.machine = GraphMachine(\n model=self,\n **machine_configs\n )\n\n def is_going_to_user(self):\n return True\n # if event.get(\"message\"):\n # text = event['message']['text']\n # return text.lower() == 'start'\n # return False\n\n def is_going_to_state1(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n if text.lower()!='about' and text.lower()!='post' and text.lower()!='getinfo' :\n return True\n else:\n return False\n if event.get(\"postback\"):\n text = event['postback']['title']\n if text.lower()!='about' and text.lower()!='post' and text.lower()!='getinfo' :\n return True\n else:\n return False\n # return text.lower() == 'about'\n # return bool(text.strip())\n # return text.lower() == 'go to state1'\n\n def is_going_to_state2(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return text.lower() == 'about'\n if event.get(\"postback\"):\n text = event['postback']['title']\n return text.lower() == 'about'\n return False\n # return text.lower() == 'about'\n\n def is_going_to_state3(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return text.lower() == 'post'\n if event.get(\"postback\"):\n text = event['postback']['title']\n return text.lower() == 'post'\n return False\n # return text.lower() == 'post'\n\n def is_going_to_state4(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return text.lower() == 'getinfo'\n if event.get(\"postback\"):\n text = event['postback']['title']\n return text.lower() == 'getinfo'\n return False\n # return text.lower() == 'getinfo'\n\n def is_going_to_class(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return bool(text.strip())\n return False\n # return bool(text.strip())\n\n def is_going_to_date(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return bool(text.strip())\n return False\n # return bool(text.strip())\n\n def is_going_to_time(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return bool(text.strip())\n return False\n # return bool(text.strip())\n\n def on_enter_user(self):\n # sender_id = event['sender']['id']\n # msg = \"I'm entering user\\nkey :\\n about \\n post \\n getinfo\\n\"\n # responese = send_text_message(sender_id, msg)\n print(\"I'm entering user\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"key :\\n about \\n post \\n getinfo\\n\")\n # postback button\n # 1. about timing chatbot\n # 2. I want to share my event...\n # 3. I want to get events' info...\n\n\n def on_enter_state1(self,event):\n sender_id = event['sender']['id']\n msg = \"Please push the following button or\\nkey the following command :\\n about \\n post \\n getinfo\\n\"\n responese = send_postback(sender_id, msg)\n # sender_id = event['sender']['id']\n # msg = \"please key start\"\n # responese = send_text_message(sender_id, msg)\n # print(\"I'm entering state1\")\n # print('CURRENT STATE: ' + machine.state)\n self.go_back()\n\n def on_exit_state1(self):\n print('Leaving state1')\n\n def on_enter_state2(self,event):\n sender_id = event['sender']['id']\n msg = \"「好的街舞活動,應該要讓更多人知道。」\\n\\n不管是教室成果展、party、battle、熱舞社迎新舞展....,\\n只要是與街舞相關的大小活動,都歡迎告訴我\\n\\n我將固定更新於 Facebook 粉專的置頂貼文,並新增圖片至粉專相簿中,曝光您的活動!\\n\\n 若有人向我詢問相關活動,我也會向他們推薦您的活動!\\n\\n還等什麼,趕快按下'post'向我介紹你的活動吧!\"\n response = send_postback(sender_id,msg)\n # responese = send_text_message(sender_id, msg)\n # print(\"I'm entering state2\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"my name is bot bot\")\n self.go_back()\n\n def on_exit_state2(self):\n print('Leaving state2')\n\n def on_enter_state3(self,event):\n sender_id = event['sender']['id']\n msg = \"告訴我你的活動名稱!\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering state3\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"what is the event's name?\")\n # self.go_back()\n\n\n def on_enter_state4(self,event):\n sender_id = event['sender']['id']\n msg = \"Tell me the date/month!\\nI can reply you what event will take place!\\nPlease key the date/month in this format\\nMM/DD EX:11/31\\nM EX:7\\nMM EX:12\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering state4\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"enter the date\\nI can tell you what event will take place on the day!\\n\")\n # print(\"please key the date in this format\\nMMDD EX:1231\\nMM EX:11\")\n # self.go_back()\n\n def on_enter_getpost(self,event):\n text = event['message']['text']\n sender_id = event['sender']['id']\n flag = text.find('/')\n if flag == -1:\n flag = False\n else:\n flag = True\n if flag :\n tmp = text.split('/')\n docs = self.db.collection('timing_event').where('month','==',int(tmp[0])).where('date','==',int(tmp[1])).get()\n for doc in docs:\n doc = doc.to_dict()\n response1 = send_image_url(sender_id,doc['imgurl'])\n else :\n docs = self.db.collection('timing_event').where('month','==',int(text)).get()\n for doc in docs:\n doc = doc.to_dict()\n response1 = send_image_url(sender_id,doc['imgurl'])\n msg = \"Here are all the events!!\\nHave a nice day!^^\"\n responese = send_postback(sender_id, msg)\n # ensure the input format is correct\n # find all the imageURL in firebase\n # send all the image back to client\n # print(\"I'm entering getpost\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"this is all the event on the day!!\\nhave a noce day!^^\")\n self.go_back()\n\n def on_enter_class(self,event):\n # update database\n collection_ref = list(self.db.collection('timing_event').get())\n self.count = len(collection_ref) + 2\n doc_name = \"event%d\" % self.count\n doc = {}\n doc['title'] = event['message']['text']\n self.db.collection('timing_event').document(doc_name).set(doc)\n # reply\n sender_id = event['sender']['id']\n msg = \"活動類別(限單選)\\n請輸入以下選項:\\nParty, Workshop, Dance Camp, Lecture, Showcase, Battle, Audition\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering class\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"what is the event's class?\")\n # self.go_back()\n\n def on_enter_date(self,event):\n # update database\n doc = {}\n doc['class'] = event['message']['text']\n doc_name = \"event%d\" % self.count\n self.db.collection('timing_event').document(doc_name).update(doc)\n # reply\n sender_id = event['sender']['id']\n msg = \"活動日期/日期區間(Ex:11/31)\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering date\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"what is the event's date?\")\n # self.go_back()\n\n def on_enter_time(self,event):\n # update database\n text = event['message']['text']\n tmp = text.split('/')\n doc = {}\n doc['month'] = int(tmp[0])\n doc['date'] = int(tmp[1])\n doc_name = \"event%d\" % self.count\n self.db.collection('timing_event').document(doc_name).update(doc)\n # reply\n sender_id = event['sender']['id']\n msg = \"活動時段\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering time\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"what is the event's time?\")\n # self.go_back()\n\n def on_enter_location(self,event):\n # update database\n doc = {}\n doc['time'] = event['message']['text']\n doc_name = \"event%d\" % self.count\n self.db.collection('timing_event').document(doc_name).update(doc)\n # reply\n sender_id = event['sender']['id']\n msg = \"活動地點\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering location\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"what is the event's the location?\")\n # self.go_back()\n\n def on_enter_website(self,event):\n # update database\n doc = {}\n doc['location'] = event['message']['text']\n doc_name = \"event%d\" % self.count\n self.db.collection('timing_event').document(doc_name).update(doc)\n # reply\n sender_id = event['sender']['id']\n msg = \"活動網址\"\n responese = send_text_message(sender_id, msg)\n # print(\"I'm entering website\")\n # print('CURRENT STATE: ' + machine.state)\n # print(\"what is the event's website URL??\")\n # self.go_back()\n\n def on_enter_makepost(self,event):\n # update database\n doc = {}\n doc['url'] = event['message']['text']\n doc_name = \"event%d\" % self.count\n self.db.collection('timing_event').document(doc_name).update(doc)\n # reply\n # making post based on database \n doc = self.db.collection('timing_event').document(doc_name).get().to_dict()\n makepost(doc,self.count)\n # upload image\n imgurl = upload2imgur(self.count)\n # update database\n doc = {}\n doc['imgurl'] = imgurl\n doc_name = \"event%d\" % self.count\n self.db.collection('timing_event').document(doc_name).update(doc)\n # send image to client\n sender_id = event['sender']['id']\n msg = \"This is the post will be show!!\\nif have any problem, feel free to contact us!\"\n response1 = send_image_url(sender_id,imgurl)\n response2 = send_postback(sender_id, msg)\n # print(\"I'm entering makepost\")\n # print('CURRENT STATE: ' + machine.state)\n # call make_post()\n # upload the image to imgur\n # send the image back to client\n # print(\"this is the post will be show!!\\nif have any problem, feel free to contact us!\")\n self.go_back()\n","sub_path":"fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":13038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58190465","text":"import os\nimport sys\nimport json\nimport warnings\nimport logging\nimport logging.config\nfrom logging import LogRecord\nimport datetime\nimport multiprocessing\nimport time\nimport inspect\nimport functools\n\nimport tqdm\n\nfrom MCQ.base import DecoratorContextManager\n\nfrom .io import RotateItems\n\n\nclass WaitingBar(DecoratorContextManager):\n def __init__(self, msg: str, ncols: int = 10):\n assert ncols > 8, f\"ncols must greater than 8, got {ncols}\"\n self._msg = msg\n self._ticker = None\n self._stillRunning = None\n self._ncols = ncols\n self.animation = list()\n # \" = \"\n template = (\" \" * (ncols + 1) + \"=\" * (ncols - 8) + \" \" * (ncols + 1))\n for i in range(2 * (ncols - 2)):\n start = 2 * (ncols - 2) - i\n end = 3 * (ncols - 2) - i\n self.animation.append(\"[\" + template[start:end] + \"]\" + r\" %s\")\n\n def __enter__(self):\n self._stillRunning = multiprocessing.Value(\"b\", True)\n self._ticker = multiprocessing.Process(name=\"waitingBarTicker\", target=self._print, args=[self._stillRunning])\n self._ticker.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._stillRunning.value = False\n self._ticker.join()\n print(\" \" * (len(self._msg) + self._ncols + 1), end=\"\\r\", file=sys.stderr)\n\n def _print(self, stillRunning: multiprocessing.Value):\n i = 0\n while bool(stillRunning.value):\n print(self.animation[i % len(self.animation)] % self._msg, end='\\r', file=sys.stderr)\n time.sleep(.06)\n i += 1\n\n\nclass LoggingDisabler:\n def __init__(self, logger: logging.Logger, disable: bool):\n self._logger = logger\n self._disable = disable\n self._previous_status = False\n\n def __enter__(self):\n if self._disable:\n self._previous_status = self._logger.disabled\n self._logger.disabled = True\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self._disable:\n self._logger.disabled = self._previous_status\n\n\nclass DeprecationFilter:\n def filter(self, record: LogRecord):\n if \"depreca\" in record.msg:\n return 0\n return 1\n\nclass TqdmLoggingHandler(logging.Handler):\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\ndef ConfigLogging(logDir: str, rootName: str, level: str, useTqdm: bool = False, logName: str = None, rotateLogs: int = 10, ignoreWarnings: list = None) -> logging.Logger:\n os.makedirs(logDir, exist_ok=True)\n if rotateLogs > 0:\n RotateItems(logDir, rotateLogs)\n if logName is None:\n fPrefix = os.path.join(logDir, \"{0}\".format(datetime.datetime.now().strftime(r\"%y%m%d-%H%M%S\")))\n else:\n fPrefix = os.path.join(logDir, logName)\n logging_config = {\n \"version\": 1,\n \"formatters\": {\n \"full\": {\n \"format\": \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n },\n \"simple\": {\n \"format\": \"%(asctime)s - %(message)s\",\n \"datefmt\": \"%m/%d %H:%M:%S\"\n }\n },\n \"filters\": {\n \"deprecation\": {\n \"()\": DeprecationFilter\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": TqdmLoggingHandler if useTqdm else \"logging.StreamHandler\",\n \"level\": level,\n \"formatter\": \"simple\",\n \"stream\": \"ext://sys.stdout\"\n },\n \"info_file\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"full\",\n \"filename\": f\"{fPrefix}.log\",\n \"mode\": \"w\"\n },\n \"err_file\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"ERROR\",\n \"formatter\": \"full\",\n \"filename\": f\"{fPrefix}.err\",\n \"mode\": \"w\"\n }\n },\n \"loggers\": {\n rootName: {\n \"propagate\": False,\n \"level\": level,\n \"handlers\": [\n \"console\",\n \"info_file\",\n \"err_file\"\n ],\n \"filters\": [\n \"deprecation\"\n ]\n }\n }\n }\n logging.config.dictConfig(logging_config)\n\n def handleException(exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n logger = logging.getLogger(rootName)\n logger.exception(\"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback))\n sys.excepthook = handleException\n\n def handleWarning(message, category, filename, lineno, file=None, line=None):\n logger = logging.getLogger(rootName)\n if ignoreWarnings is not None and category in ignoreWarnings:\n return\n logger.warning(warnings.formatwarning(message, category, filename, lineno, line))\n warnings.showwarning = handleWarning\n return logging.getLogger(rootName)\n\n\ndef PPrint(d: dict) -> str:\n return str(json.dumps(d, default=lambda x: x.__dict__, indent=4))\n","sub_path":"src/MCQ/utils/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"85784573","text":"from typing import List\n\nfrom utils.dictionary import Dictionary\nfrom trader.share import share\n\n\nclass Positions:\n configuration: Dictionary\n book: List[share] = []\n initial_budget: float = 0.\n budget = 0.\n\n def __init__(self, configuration, initial_budget):\n self.params = configuration\n self.book = []\n self.initial_budget = initial_budget\n self.budget = self.initial_budget\n\n def reset(self):\n del self.book[:]\n self.book = []\n\n def buy(self, num, price, mode='bull'):\n \"\"\"\n Buy a number of positions, at a given price.\n Returns the cost of the buy operation.\n \"\"\"\n self.book.append(share(price, num, mode))\n self.budget -= self.book[-1].cost_\n return self.book[-1].cost_\n\n def sell(self, num_shares_to_sell, sell_price):\n \"\"\"\n Sell `num` positions from our portfolio. The shares must be already\n updated with the proper current price, before executing the sell\n operation.\n\n - If num is greater than the number of shares, then sell all.\n - If no positions, return 0, 0.\n - If trying to sell more shares than owned, return 0, 0.\n\n :param num_shares_to_sell: ditto\n :param sell_price: the selling price\n\n :return: the amount at which the positions were sold,\n and the benefit.\n \"\"\"\n num_shares_i_have = self.num_shares()\n if num_shares_i_have == 0. or num_shares_to_sell > num_shares_i_have:\n return 0., 0.\n # if num_shares_i_have == num_shares_to_sell:\n # return self.sell_all(sell_price)\n\n # Sort positions by profit to sell first those with max prof.\n sell_book: list[share] = sorted(\n self.book, key=lambda x: x.performance_, reverse=True)\n num_shares_sold = 0.\n total_income = 0.\n total_profit = 0.\n idx = 0\n while num_shares_sold != num_shares_to_sell:\n num = num_shares_to_sell - num_shares_sold\n sold, income, profit = self.sell_position(sell_book[idx], num,\n sell_price)\n total_income += income\n total_profit += profit\n num_shares_sold += sold\n idx += 1\n self.budget += (total_income + total_profit)\n return total_income, total_profit\n\n # def sell_all(self, price):\n # \"\"\" Sell all the options we have. Clear the book \"\"\"\n # self.update(price)\n # sell_value = self.cost()\n # sell_profit = self.profit()\n # del self.book[:]\n # return sell_value, sell_profit\n\n def sell_position(self, position: share, num_shares: float,\n sell_price: float):\n \"\"\"\n Sell the number of shares specified from position.\n - If the nr of shares to sell is lower than the nr of shares\n available, then those are removed from the position.\n - If as a result the position has zero shares, the position is removed\n - If the nr. of shares to sell is greater than the nr of shares\n available the position is also removed\n\n :param position: the position to be sold\n :param num_shares: the nr of shares to sell\n :param sell_price: the selling price\n :return: the number of shares sold, income and profit\n \"\"\"\n # Get the reference in the actual book, not the selling book.\n position = self.book[self.book.index(position)]\n\n # Adjust how many shares can I sell.\n if num_shares > position.num_:\n num_shares = position.num_\n\n # Sell the adjusted amount from the position\n value, profit = position.sell(num_shares) # , sell_price)\n\n # Check if position is empty, to remove it from the book.\n if position.num_ == 0.:\n self.book.remove(position)\n\n return num_shares, value, profit\n\n def num_positions(self):\n \"\"\" Returns the total number of positions currently in portfolio \"\"\"\n if len(self.book) == 0:\n return 0.\n return len(self.book)\n\n def num_shares(self):\n \"\"\" Returns the total number of shares purchased \"\"\"\n num_shares = 0.\n for s in self.book:\n num_shares += s.num_\n return num_shares\n\n def update(self, current_price):\n \"\"\" Updates each position's value according to the new price. \"\"\"\n for s in self.book:\n s.update(current_price)\n\n def value(self):\n \"\"\" Computes the value of the positions with the price passed \"\"\"\n total_value = 0.\n for s in self.book:\n total_value += s.value_\n return total_value\n\n def cost(self):\n \"\"\" Computes the cost of the positions with the price passed \"\"\"\n total_cost = 0.\n for s in self.book:\n total_cost += s.cost_\n return total_cost\n\n def profit(self) -> float:\n \"\"\" Returns the total profit accumulated by each share \"\"\"\n total_profit = 0.\n for s in self.book:\n total_profit += s.profit_\n return total_profit\n\n def debug(self):\n if not self.book:\n return\n debug = self.params.log.debug\n ht = ' | num.( b.price) | cost | value | perf | proft | m |'\n hl = ' +----------------+-----------+-----------+-------+-------+---+'\n debug(hl)\n debug(ht)\n debug(hl)\n # ' | 12.4(123456.8) | 123456.89 | 123456.89 | 12.45 | 12.45 | c |')\n fm = ' | {:<4.1f}({:>8.1f}) | {:>9.2f} | {:>9.2f} | {:>5.2f} '\n fm += '| {:>5.2f} | {} |'\n for s in self.book:\n debug(fm.format(s.num_, s.buy_price_, s.cost_, s.value_,\n s.performance_, s.profit_,\n 'B' if s.mode_ == 'bull' else 'b'))\n debug(hl)\n fm = ' | {:<14.1f} | {:>9.2f} | {:>9.2f} | ----- | {:>5.2f} | - |'\n debug(fm.format(\n self.num_shares(), self.cost(), self.value(), self.profit()))\n debug(hl)\n","sub_path":"src/trader/positions.py","file_name":"positions.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576792038","text":"from algebras.operations import Steenrod\nfrom algebras.constructions import FreeModuleMod2 as FM\nfrom algebras import linalg\nfrom GUI.specseq import SpecSeq\nfrom typing import List, Tuple\n\n\nclass Ext:\n def __init__(self, s_max, t_max):\n self.s_max = s_max\n self.t_max = t_max\n self.h = [[] for _ in range(s_max + 1)] # type: List[List[Tuple[int, FM]]]\n\n def compute_minimal(self, ring):\n \"\"\" return Ext_R(k, k) \"\"\"\n for r in ring.basis(0):\n FM.set_ring(type(r))\n break\n\n s_max, t_max = self.s_max, self.t_max\n self.h[0].append((0, FM.gen(1))) # (internal_deg, diff)\n\n # initialize the augmented ideal of ring\n ideal = [None] # type: List[Tuple[ring]]\n for t in range(1, t_max + 1):\n ideal.append(tuple(ring.basis(t)))\n\n # compute the resolution by internal_deg\n for t in range(1, t_max + 1):\n kernel_t = linalg.VectorSpaceMod2(r * FM.gen(\"a_{0,0}\") for r in ideal[t])\n for s in range(1, s_max + 1):\n print(\"(s, t)=({}, {})\".format(s, t))\n my_map = linalg.LinearMapKMod2()\n for index_gen in range(len(self.h[s])):\n t_gen, gen_kernel = self.h[s][index_gen]\n my_map.add_maps((sq * FM.gen(\"a_{{{}, {}}}\".format(s, index_gen)), sq * gen_kernel)\n for sq in ideal[t-t_gen])\n for gen in (kernel_t / my_map.image).basis(FM):\n self.h[s].append((t, gen))\n kernel_t = my_map.kernel\n\n def __str__(self):\n result = \"\"\n for s in range(len(self.h)):\n for j in range(len(self.h[s])):\n result += \"$a_{{{}, {}}} ({}, {}) \\\\to {}$\\\\\\\\\\n\".\\\n format(s, j, self.h[s][j][0] - s, s, self.h[s][j][1])\n result += \"\\\\\\\\\\n\"\n return result\n\n def get_spec(self, x_max, y_max) -> SpecSeq:\n spec = SpecSeq(x_max, y_max, \"Adams Discrete\")\n for s in range(len(self.h)):\n for j in range(len(self.h[s])):\n t = self.h[s][j][0]\n if s <= y_max and t - s <= x_max:\n spec.add_single_gen(\"a_{{{}, {}}}\".format(s, j), (s, t))\n return spec\n\n\n# tests -----------------------------\ndef test_ext():\n ext = Ext(15, 47)\n ext.compute_minimal(Steenrod)\n spec = ext.get_spec(30, 15)\n print(ext)\n spec.draw()\n\n\ndef test():\n FM.set_ring(Steenrod)\n print(Steenrod.gen(2) * FM.gen(\"x\"))\n\n\nif __name__ == \"__main__\":\n # test_amod()\n test_ext()\n # test()\n\n# 333, 130, 82\n","sub_path":"algebras/resolution.py","file_name":"resolution.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552243754","text":"import random\r\n\r\n\r\ndef rand(**args):\r\n a_sum = sum(args.values())\r\n if a_sum != 100:\r\n raise Exception(\"Wrong 'values' probabilities.\\nValues: {0}\\nSum: {1}\".format(args.values(), a_sum))\r\n\r\n prob = 100\r\n for key, value in args.items():\r\n a = random.randint(0, prob)\r\n if a <= value:\r\n return key\r\n else:\r\n prob -= value\r\n\r\n\r\ndef enum(*sequential, **named):\r\n enums = dict(zip(sequential, range(len(sequential))), **named)\r\n return type('Enum', (), enums)\r\n","sub_path":"EXTRA_FILES/extra_funcs.py","file_name":"extra_funcs.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356760958","text":"def criptografar(txt):\n '''\n Function para criptografar:\n param 'txt' - Texto para ser criptografado\n '''\n mensagem = str(txt)\n\n chave = 3\n cifrada = \"\"\n n = 128\n\n for letra in mensagem:\n indice = ord(letra)\n nova_letra = chr((indice + chave)%n)\n cifrada = cifrada + nova_letra\n\n return cifrada\n\n\ndef descriptografar(txt):\n '''\n Function para descriptografar:\n param 'txt' - Texto para ser descriptografado\n '''\n mensagem = str(txt)\n \n chave = -3\n descifrada = \"\"\n n = 128\n\n for letra in mensagem:\n indice = ord(letra)\n nova_letra = chr((indice + chave)%n)\n descifrada = descifrada + nova_letra \n\n return descifrada\n\n\ndef armazenar(txt1, txt2):\n '''\n Function para armazenar dados em arquivos texto:\n param 'txt1' - Primeiro texto que deve ser escrito. Fica ao lado do segundo\n param 'txt2' - Segundo texto que deve ser escrito. Fica ao lado do primeiro\n '''\n if txt1 != '' and txt2 != '':\n with open('resultado.txt', 'at') as arquivo:\n arquivo.write(f'{txt1} == ' + f'{txt2}\\n')\n else:\n pass\n\n\ndef confirmar():\n '''\n Function usada para perguntar se o user deseja salvar uma information.\n Obs.: O algoritmo usa a primeira letra (S ou N)\n '''\n while True:\n print('\\nDeseja salvar o resultado?')\n\n try:\n confir = input('>>> ').upper()[0]\n except (ValueError, TypeError, IndexError):\n print(f'\\033[31mErro! Tente novamente\\033[m')\n else:\n if confir != 'S' and confir != 'N':\n print('SOMENTE |S| OU |N|')\n elif confir == 'S':\n print('\\n\\033[32mMensagem salva!\\033[m')\n break\n else:\n break\n\n return confir\n","sub_path":"cripto/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319363074","text":"from tensorflow.keras.layers import Conv3D, Conv3DTranspose, BatchNormalization, Dropout\nfrom tensorflow.keras.layers import Input, concatenate, MaxPooling3D, Activation, Reshape, Flatten\nfrom tensorflow.keras import Model, regularizers\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.optimizers import Adam, RMSprop\n\n\ndef conv_block_3d(tensor, nfilters, size=3, padding='same', initializer=\"he_normal\"):\n x = Conv3D(filters=nfilters,\n kernel_size=(size, size, size),\n padding=padding,\n activation='elu',\n kernel_regularizer=regularizers.l2(7e-5),\n bias_regularizer=regularizers.l2(7e-5),\n activity_regularizer=regularizers.l2(1e-5)\n )(tensor)\n x = BatchNormalization()(x)\n x = Conv3D(filters=nfilters,\n kernel_size=(size, size, size),\n padding=padding,\n activation='elu',\n kernel_regularizer=regularizers.l2(7e-5),\n bias_regularizer=regularizers.l2(7e-5),\n activity_regularizer=regularizers.l2(1e-5)\n )(x)\n x = BatchNormalization()(x)\n return x\n\n\ndef deconv_block_3d(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2, 2)):\n y = Conv3DTranspose(nfilters,\n kernel_size=(size, size, size),\n strides=strides,\n padding=padding,\n kernel_regularizer=regularizers.l2(7e-5),\n bias_regularizer=regularizers.l2(7e-5),\n activity_regularizer=regularizers.l2(1e-5)\n )(tensor)\n y = concatenate([y, residual], axis=4)\n y = conv_block_3d(y, nfilters)\n return y\n\n\ndef weighted_categorical_crossentropy(weights):\n # weights = [0.9,0.05,0.04,0.01]\n def wcce(y_true, y_pred):\n Kweights = K.constant(weights)\n # if not K.is_tensor(y_pred): y_pred = K.constant(y_pred)\n y_true = K.cast(y_true, y_pred.dtype)\n return K.categorical_crossentropy(y_true, y_pred) * K.sum(y_true * Kweights, axis=-1)\n\n return wcce\n\n\ndef u_net_3d(img_height: int, img_width: int, bands: int,\n time_steps: int, nclasses: int, learning_rate=1e-5,\n class_weights=None, filters=16) -> Model:\n input_layer = Input(shape=(time_steps, img_height, img_width, bands), name='image_input')\n # First Conv Block\n input_norm = BatchNormalization()(input_layer)\n conv1 = conv_block_3d(input_norm, nfilters=filters)\n conv1 = Dropout(0.2)(conv1)\n conv1 = BatchNormalization()(conv1)\n conv2 = conv_block_3d(conv1, nfilters=filters * 2)\n conv2 = Dropout(0.2)(conv2)\n conv2 = BatchNormalization()(conv2)\n conv2_out = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv2)\n # Second Conv Block\n conv3 = conv_block_3d(conv2_out, nfilters=filters * 4)\n conv3 = Dropout(0.2)(conv3)\n conv3 = BatchNormalization()(conv3)\n conv4 = conv_block_3d(conv3, nfilters=filters * 8)\n conv4 = Dropout(0.2)(conv4)\n conv4 = BatchNormalization()(conv4)\n conv4_out = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv4)\n # Third Conv Block\n conv4_out = Dropout(0.5)(conv4_out)\n conv4_out = BatchNormalization()(conv4_out)\n conv5 = conv_block_3d(conv4_out, nfilters=filters * 16)\n conv5 = Dropout(0.5)(conv5)\n # Deconv Block\n deconv6 = deconv_block_3d(conv5, residual=conv4, nfilters=filters * 8)\n deconv6 = Dropout(0.5)(deconv6)\n deconv7 = deconv_block_3d(deconv6, residual=conv2, nfilters=filters * 4)\n deconv7 = Dropout(0.5)(deconv7)\n # Post Conv/Deconv Processing Block\n conv6 = Conv3D(filters=nclasses,\n kernel_size=(time_steps, 1, 1),\n padding='valid',\n activation='elu',\n kernel_regularizer=regularizers.l2(1e-5),\n bias_regularizer=regularizers.l2(1e-5),\n activity_regularizer=regularizers.l2(1e-5)\n )(deconv7)\n conv6 = BatchNormalization()(conv6)\n shapes = conv6.get_shape()\n output = Reshape((shapes[2] * shapes[3], shapes[4]))(conv6)\n # output = Flatten()(conv6)\n model = Model(inputs=input_layer, outputs=output, name='Unet')\n if class_weights is not None:\n loss = weighted_categorical_crossentropy(class_weights)\n model.compile(optimizer=Adam(lr=learning_rate, clipvalue=5),\n loss=loss, metrics=['accuracy'])\n else:\n loss = weighted_categorical_crossentropy(class_weights)\n model.compile(optimizer=Adam(lr=learning_rate, clipvalue=5),\n loss=loss, metrics=['accuracy'])\n print(model.summary())\n return model\n","sub_path":"dnn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"569554852","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/philewels/GitHub/MultiQC/multiqc/modules/qualimap/qualimap.py\n# Compiled at: 2019-11-13 05:22:42\n\"\"\" MultiQC module to parse output from QualiMap \"\"\"\nfrom __future__ import print_function\nfrom collections import defaultdict, OrderedDict\nimport logging, os\nfrom multiqc.modules.base_module import BaseMultiqcModule\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n \"\"\" Qualimap is really a collection of separate programs:\n BamQC, RNASeq and Counts.. This module is split into separate\n files to reflect this and help with code organisation. \"\"\"\n\n def __init__(self):\n super(MultiqcModule, self).__init__(name='QualiMap', anchor='qualimap', href='http://qualimap.bioinfo.cipf.es/', info='is a platform-independent application to facilitate the quality control of alignment sequencing data and its derivatives like feature counts.')\n from . import QM_BamQC\n from . import QM_RNASeq\n self.general_stats_headers = OrderedDict()\n self.general_stats_data = defaultdict(lambda : dict())\n n = dict()\n n['BamQC'] = QM_BamQC.parse_reports(self)\n if n['BamQC'] > 0:\n log.info(('Found {} BamQC reports').format(n['BamQC']))\n n['RNASeq'] = QM_RNASeq.parse_reports(self)\n if n['RNASeq'] > 0:\n log.info(('Found {} RNASeq reports').format(n['RNASeq']))\n if sum(n.values()) == 0:\n raise UserWarning\n self.general_stats_addcols(self.general_stats_data, self.general_stats_headers)\n\n def get_s_name(self, f):\n s_name = os.path.basename(os.path.dirname(f['root']))\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name.endswith('.qc'):\n s_name = s_name[:-3]\n return s_name","sub_path":"pycfiles/multiqc-1.8.tar/qualimap.py","file_name":"qualimap.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"351239411","text":"import sys\r\nn = int(sys.stdin.readline())\r\nfib_cache = {}\r\ndef fib(n):\r\n if n in fib_cache:\r\n return fib_cache[n]\r\n else:\r\n if n == 1:\r\n fib_cache[n] = 1\r\n return n\r\n elif n == 2:\r\n fib_cache[n] = 2\r\n return n\r\n else:\r\n fib_cache[fib(n - 1) + fib(n - 2)] = fib(n - 1) + fib(n - 2)\r\n return fib(n - 1) + fib(n - 2)\r\n","sub_path":"even_fibonacci_numbers.py","file_name":"even_fibonacci_numbers.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113598110","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nfrom sys import version_info\nif version_info.major == 3:\n pass\nelif version_info.major == 2:\n input = raw_input\nelse:\n print (\"Unknown python version - input function not safe\")\n\nfrom os import environ\nfrom math import ceil, floor\n#from sys import setrecursionlimit\n#setrecursionlimit (11000)\n\ndef encryption (s):\n # remove blanks from string\n s = s.replace (\" \", \"\")\n l = len (s)\n # find grid\n lw = l ** .5\n rows = floor (lw); cols = rows\n if rows * cols < l:\n cols = ceil (lw)\n if rows * cols < l:\n rows = cols\n print (rows, cols, l)\n encT = \"\"\n for c in range (cols):\n for r in range (rows):\n p = c + cols * r\n if p < l:\n encT += s [p]\n if c != cols - 1:\n encT += ' '\n return encT\n\ndef main ():\n fptr = open (environ ['OUTPUT_PATH'], 'w')\n s = input ()\n result = encryption (s)\n fptr.write (result + '\\n')\n fptr.close ()\n\nif __name__ == '__main__':\n main ()\n","sub_path":"2017/hackerrank/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166644945","text":"import locale\n\nfrom django import template\n\nfrom clubs.models import CourtSetup, Vacancy\nfrom reservations.models import Reservation\n\nregister = template.Library ( )\n \n\n\n@register.inclusion_tag ('clubs/vacancy_per_hour.html')\ndef vacancy_per_hour (court, for_date, hour_list):\n \"\"\"\n Renders an table row with link buttons, containing the\n prices for the selected court and date (for_date), per\n hour in 'hour_list'. If a term is not free (i.e. it has\n already been booked), the '-' string is rendered instead.-\n \"\"\"\n dow = for_date.isoweekday ( )\n hour_list = [{'value': k} for (k,v) in hour_list]\n \n for h in hour_list:\n h['vacancy'] = Vacancy.objects.get_all ([court], [dow], [h['value']])\n h['vacancy'] = h['vacancy'][0]\n try:\n cs = court.court_setup\n except AttributeError:\n cs = CourtSetup.objects.get (pk=court['court_setup_id'])\n h['reservation'] = Reservation.objects.by_date (cs, for_date) \\\n .filter (vacancy=h['vacancy'])\n if h['reservation']:\n h['reservation'] = h['reservation'][0]\n else:\n h['reservation'] = None\n \n return {'hour_list': hour_list,\n 'ordinal_date': for_date.toordinal ( ),}\n\n\n\n@register.inclusion_tag('clubs/vacancy_prices_per_day.html')\ndef vacancy_prices_per_day (court, hour):\n \"\"\"\n Renders an table row with input texts, containing the\n prices for the selected court, per hour and per day. If\n the selected term is not defined (i.e. it has no price),\n the '- - -' string is rendered instead.-\n \"\"\"\n prices = []\n try:\n court_id = court['id']\n except TypeError:\n court_id = court.id\n \n vs = Vacancy.objects.filter (court__id=court_id) \\\n .filter (available_from=hour) \\\n .order_by ('day_of_week') \\\n .values ('id', 'price')\n for v in vs:\n price_name = 'price_%s' % str(v['id'])\n price_value = '- - -' if not v['price'] else locale.format ('%.2f', \n v['price'], \n monetary=True)\n prices.append ((price_name, price_value))\n return {'prices' : prices,}\n","sub_path":"clubs/templatetags/vacancy_tags.py","file_name":"vacancy_tags.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"279213786","text":"import re\n\nfrom ..amount import parse_usd\n\nclass ChaseClient:\n def __init__(self, browser, creds, login_timeout=0):\n self._browser = browser\n self._creds = creds\n self._login_timeout = login_timeout\n\n self._home_url = None\n\n def _home(self):\n if self._browser.url != self._home_url:\n self._browser.load('https://www.chase.com/')\n\n self._browser.input_text('#usr_name_home', self._creds.username)\n self._browser.input_text_submit('#usr_password_home', self._creds.password)\n\n if self._login_timeout:\n self._browser.wait_for('.session_summary', self._login_timeout)\n\n self._home_url = self._browser.url\n\n def get_balance(self):\n self._home()\n\n rows = self._browser.execute(\"\"\"\n var rows = document.querySelectorAll('table[summary=\"account information\"] > tbody > tr');\n return Array.prototype.map.call(rows, function (r) { return r.innerText.trim() });\n \"\"\")\n\n bal = _first_match(rows, '^Current balance.*([$]\\S+)').group(1)\n\n return -1 * parse_usd(bal)\n\ndef _first_match(items, pattern):\n matcher = re.compile(pattern)\n items = (matcher.match(i) for i in items)\n return next((i for i in items if i), None)\n\nnew_client = ChaseClient\n","sub_path":"clients/chase.py","file_name":"chase.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430861128","text":"from datetime import datetime\nfrom pytz import timezone\n\nfrom namex.utils.logging import setup_logging\nfrom namex.utils.api_resource import log_error\nfrom namex.utils.common import convert_to_ascii\n\nfrom namex.constants import NameState\n\nfrom namex.models import Request, Name, State, Comment, Applicant\n\nfrom .abstract_name_request import AbstractNameRequestMixin\nfrom .name_request_state import apply_nr_state_change, get_nr_state_actions\n\nfrom .exceptions import \\\n CreateNameRequestError, SaveNameRequestError, MapRequestDataError, MapRequestHeaderAttributesError, MapRequestAttributesError, \\\n MapRequestApplicantError, MapRequestNamesError, MapPersonCommentError, MapLanguageCommentError, UpdateSubmitCountError, ExtendExpiryDateError\n\nsetup_logging() # Important to do this first\n\nNAME_REQUEST_SOURCE = 'NAMEREQUEST'\n\n\ndef build_language_comment(english_bol, user_id, nr_id):\n lang_comment = Comment()\n lang_comment.examinerId = user_id\n lang_comment.nrId = nr_id\n if english_bol is True:\n # Add a comment for the examiner that says this is not an english name\n lang_comment.comment = 'The applicant has indicated the submitted name or names are in English.'\n else:\n lang_comment.comment = 'The applicant has indicated the submitted name or names are not English.'\n return lang_comment\n\n\ndef build_name_comment(user_id, nr_id):\n name_comment = Comment()\n name_comment.examinerId = user_id\n name_comment.nrId = nr_id\n name_comment.comment = 'The submitted name or names is a person name, coined phrase or trademark'\n return name_comment\n\n\nclass NameRequestService(AbstractNameRequestMixin):\n \"\"\"\n Basic usage:\n\n # 1. Create or retrieve an NR\n nr_model = Request()\n\n # Sample method to generate a new NR number\n def generate_nr():\n db.session.query(NRNumber).first()\n if r is None:\n # Set starting nr number\n last_nr = 'NR L000000'\n else:\n last_nr = r.nrNum\n return nr_num\n\n nr_num = NRNumber.get_next_nr_num(last_nr)\n r.nrNum = nr_num\n r.save_to_db()\n\n # 2. Set the initial state of the NR\n nr_model.stateCd = State.DRAFT\n\n # 3. Create an instance of this service\n nr_svc = NameRequestService()\n\n # 3a. Important! Set the nr_num property on the service\n nr_svc.nr_num = generate_nr()\n\n # 3b. Important! Set the nr_id\n nr_svc.nr_id = nr_model.id\n\n # 3c. Important! Set the request_data\n nr_svc.request_data = request.get_json()\n\n # 4. Do your update logic in here\n def on_update(nr, svc):\n # Do stuff here\n nr = svc.map_request_data(nr, False)\n # Save the request\n nr = svc.save_request(nr)\n # Return the updated name request\n # The result of on_update is returned as the result of apply_state_change\n return nr\n\n # 5. Run apply_state_change to execute the update\n nr_model = nr_svc.apply_state_change(nr_model, new_state, on_update)\n \"\"\"\n _virtual_wc_service = None\n\n @property\n def virtual_wc_service(self):\n return self._virtual_wc_service\n\n @virtual_wc_service.setter\n def virtual_wc_service(self, service):\n self._virtual_wc_service = service\n\n def create_name_request(self):\n \"\"\"\n # !Important! All new name requests should be initially set to the DRAFT state.\n # Use apply_state_change on the name_request to transition to any other state.\n :return:\n \"\"\"\n try:\n name_request = Request()\n self.generate_nr_keys()\n\n name_request.stateCd = State.DRAFT\n except Exception as err:\n raise CreateNameRequestError(err)\n\n return name_request\n\n def create_name(self):\n try:\n name = Name()\n name_id = self.get_name_sequence()\n name.id = name_id\n name.state = NameState.NOT_EXAMINED.value\n except Exception as err:\n raise MapRequestNamesError(err, 'Error setting submitted_name and / or sequence.')\n\n return name\n\n def create_applicant(self):\n try:\n applicant = Applicant()\n applicant.partyId = self.get_applicant_sequence()\n except Exception as err:\n raise MapRequestApplicantError(err, 'Error setting applicant and / or sequence.')\n\n return applicant\n\n @classmethod\n def get_item_from_list(cls, items, item_id, item_prop='id'):\n \"\"\"\n TODO: We could make a util for this...\n :param items:\n :param item_id:\n :param item_prop:\n :return:\n \"\"\"\n matches = [i for i in items if i.__getattribute__(item_prop) == item_id]\n if len(matches) == 0:\n return None\n if len(matches) == 1:\n return matches[0]\n if len(matches) > 1:\n raise Exception('More than one match found for a given ID!')\n\n @classmethod\n def update_request_submit_count(cls, name_request):\n try:\n name_request.submitCount = name_request.submitCount + 1 if isinstance(name_request.submitCount, int) else 1\n except Exception as err:\n raise UpdateSubmitCountError(err)\n\n return name_request\n\n def extend_expiry_date(self, name_request, start_date=None):\n start_datetime = start_date if start_date else datetime.utcnow()\n \"\"\"\n Extends the expiry date by 56 days from today's date\n :param name_request:\n :return:\n \"\"\"\n try:\n name_request.expirationDate = self.create_expiry_date(\n start=start_datetime,\n expires_in_days=56,\n tz=timezone('UTC')\n )\n except Exception as err:\n raise ExtendExpiryDateError(err)\n\n return name_request\n\n # Methods used to map request data\n def map_request_data(self, name_request, map_draft_attrs=False):\n \"\"\"\n This method maps data from the HTTP request data over to the name request.\n We use this to set draft attributes, header attributes, and comments...\n :param name_request:\n :param map_draft_attrs:\n :return:\n \"\"\"\n new_state_code = self.next_state_code if self.next_state_code else self.request_state_code\n\n # Set the request attributes\n name_request = self.map_request_attrs(name_request)\n\n # If this is a draft, set name request header attributes\n if map_draft_attrs:\n name_request = self.map_draft_attrs(name_request)\n\n name_request = self.map_request_header_attrs(name_request)\n name_request = self.map_request_comments(name_request)\n\n try:\n if new_state_code == State.COND_RESERVE:\n name_request.consentFlag = 'Y'\n\n if new_state_code in [State.RESERVED, State.COND_RESERVE]:\n name_request.expirationDate = self.create_expiry_date(\n start=name_request.submittedDate,\n expires_in_days=56,\n tz=timezone('UTC')\n )\n except Exception as err:\n raise MapRequestDataError(err)\n\n return name_request\n\n def map_draft_attrs(self, name_request):\n \"\"\"\n Used internally by map_request_data.\n :param name_request:\n :return:\n \"\"\"\n try:\n user_id = self.user_id\n # Set this to name_request_service_account\n name_request.userId = user_id\n name_request.submittedDate = datetime.utcnow()\n except Exception as err:\n raise MapRequestDataError(err)\n\n return name_request\n\n def map_request_attrs(self, name_request):\n \"\"\"\n Used internally by map_request_data.\n :param name_request:\n :return:\n \"\"\"\n try:\n # Use class property values for the ID, NR Number and Source!\n # Do not map those values from the request if supplied, as they\n # should not be changed outside of the context of this application!\n nr_id = self.nr_id\n nr_num = self.nr_num\n name_request.id = nr_id\n name_request.nrNum = nr_num\n name_request._source = NAME_REQUEST_SOURCE\n\n # Default to whatever entity, action, or type already exists when mapping\n request_entity = self.request_entity if self.request_entity else name_request.entity_type_cd\n request_action = self.request_action if self.request_action else name_request.request_action_cd\n request_type = self.request_type if self.request_type or self.request_type is None else name_request.requestTypeCd\n\n # Set action and entity\n if request_entity:\n name_request.entity_type_cd = request_entity\n\n if request_action:\n name_request.request_action_cd = request_action\n\n # TODO: Throw exceptions for invalid combos?\n if not request_type and request_entity and request_action:\n # If request_type is None (eg. no 'requestTypeCd' was provided in the payload)\n # but a request_entity (entity_type_cd) and a request_action (request_action_cd)\n # are supplied, use get_mapped_request_type to map the requestTypeCd in the model\n # using the action and entity type\n request_type = self.get_mapped_request_type(request_entity, request_action)\n name_request.requestTypeCd = request_type[0]\n elif request_type is not None:\n # If request_type is NOT None, (eg. 'requestTypeCd' was provided in the payload)\n # then use the provided value\n name_request.requestTypeCd = request_type\n except Exception as err:\n raise MapRequestAttributesError(err)\n\n return name_request\n\n def map_request_header_attrs(self, name_request):\n \"\"\"\n Used internally by map_request_data.\n :param name_request:\n :return:\n \"\"\"\n user_id = self.user_id\n request_data = self.request_data\n\n try:\n # TODO: Review additional info stuff from NRO/namex (prev NR for re-applies,no NWPTA?\n if isinstance(request_data.get('natureBusinessInfo'), str):\n name_request.natureBusinessInfo = convert_to_ascii(request_data.get('natureBusinessInfo'))\n if isinstance(request_data.get('additionalInfo'), str):\n name_request.additionalInfo = convert_to_ascii(request_data.get('additionalInfo'))\n if isinstance(request_data.get('tradeMark'), str):\n name_request.tradeMark = request_data.get('tradeMark')\n if isinstance(request_data.get('previousRequestId'), int):\n name_request.previousRequestId = request_data.get('previousRequestId')\n if isinstance(request_data.get('priorityCd'), str):\n name_request.priorityCd = request_data.get('priorityCd')\n if request_data.get('priorityCd') == 'Y':\n name_request.priorityDate = datetime.utcnow().date()\n\n name_request.submitter_userid = user_id\n\n # XPRO\n if isinstance(request_data.get('xproJurisdiction'), str):\n name_request.xproJurisdiction = request_data.get('xproJurisdiction')\n # For MRAS participants\n if isinstance(request_data.get('homeJurisNum'), str):\n name_request.homeJurisNum = convert_to_ascii(request_data.get('homeJurisNum'))\n # For existing businesses\n if isinstance(request_data.get('corpNum'), str):\n name_request.corpNum = convert_to_ascii(request_data.get('corpNum'))\n except Exception as err:\n raise MapRequestHeaderAttributesError(err)\n\n return name_request\n\n def map_request_comments(self, name_request):\n \"\"\"\n Used internally by map_request_data. Execute any logic required to map comments here.\n :param name_request:\n :return:\n \"\"\"\n name_request = self.map_request_language_comments(name_request)\n name_request = self.map_request_person_name_comments(name_request)\n\n return name_request\n\n def map_request_language_comments(self, name_request):\n \"\"\"\n Used internally by map_request_comments.\n :param name_request:\n :return:\n \"\"\"\n try:\n request_data = self.request_data\n user_id = self.user_id\n nr_id = self.nr_id\n\n # If the language comment exists, we don't need to add it again, after nitail PIST it gets set to None so must dealt with\n if request_data.get('english') is not None:\n lang_comment = build_language_comment(request_data.get('english'), user_id, nr_id)\n matching_comments = list(filter(lambda x: x.comment == lang_comment.comment, list(name_request.comments)))\n if len(matching_comments) == 0:\n name_request.comments.append(lang_comment)\n except Exception as err:\n raise MapLanguageCommentError(err)\n\n return name_request\n\n def map_request_person_name_comments(self, name_request):\n \"\"\"\n Used internally by map_request_comments.\n :param name_request:\n :return:\n \"\"\"\n try:\n request_data = self.request_data\n user_id = self.user_id\n nr_id = self.nr_id\n\n if request_data.get('nameFlag') is not None:\n if request_data.get('nameFlag') is True:\n # If the person name comment exists, we don't need to add it again\n name_comment = build_name_comment(user_id, nr_id)\n matching_comments = list(filter(lambda x: x.comment == name_comment.comment, list(name_request.comments)))\n if len(matching_comments) == 0:\n name_request.comments.append(name_comment)\n except Exception as err:\n raise MapPersonCommentError(err)\n\n return name_request\n\n def map_request_applicants(self, name_request):\n \"\"\"\n This method maps applicants from the HTTP request data over to the name request.\n :param name_request:\n :return:\n \"\"\"\n request_data = self.request_data\n nr_id = self.nr_id\n\n request_applicants = request_data.get('applicants')\n applicants = []\n\n try:\n if isinstance(request_applicants, list):\n for request_applicant in request_applicants:\n request_applicant_id = request_applicant.get('partyId')\n\n if request_applicant_id:\n existing_applicants = name_request.applicants.all()\n match = self.get_item_from_list(existing_applicants, request_applicant_id, 'partyId')\n if match:\n applicant = self.map_request_applicant(match, request_applicant)\n applicants.append(applicant)\n else:\n applicant = self.create_applicant()\n applicant.nrId = nr_id\n\n applicant = self.map_request_applicant(applicant, request_applicant)\n applicants.append(applicant)\n\n name_request.applicants = applicants\n\n elif isinstance(request_applicants, dict):\n request_applicant_id = request_applicants.get('partyId')\n\n if request_applicant_id:\n existing_applicants = name_request.applicants.all()\n match = self.get_item_from_list(existing_applicants, request_applicant_id, 'partyId')\n if match:\n applicant = self.map_request_applicant(match, request_applicants)\n applicants.append(applicant)\n else:\n applicant = self.create_applicant()\n applicant.nrId = nr_id\n\n applicant = self.map_request_applicant(applicant, request_applicants)\n applicants.append(applicant)\n\n name_request.applicants = applicants\n\n except Exception as err:\n raise MapRequestApplicantError(err)\n\n return name_request\n\n def map_request_applicant(self, applicant, request_applicant):\n applicant.lastName = convert_to_ascii(request_applicant['lastName'])\n applicant.firstName = convert_to_ascii(request_applicant['firstName'])\n if request_applicant['middleName']:\n applicant.middleName = convert_to_ascii(request_applicant['middleName'])\n applicant.contact = convert_to_ascii(request_applicant['contact'])\n if request_applicant['clientFirstName']:\n applicant.clientFirstName = convert_to_ascii(request_applicant['clientFirstName'])\n if request_applicant['clientLastName']:\n applicant.clientLastName = convert_to_ascii(request_applicant['clientLastName'])\n if request_applicant['phoneNumber']:\n applicant.phoneNumber = convert_to_ascii(request_applicant['phoneNumber'])\n if request_applicant['faxNumber']:\n applicant.faxNumber = convert_to_ascii(request_applicant['faxNumber'])\n applicant.emailAddress = convert_to_ascii(request_applicant['emailAddress'])\n applicant.addrLine1 = convert_to_ascii(request_applicant['addrLine1'])\n if request_applicant['addrLine2']:\n applicant.addrLine2 = convert_to_ascii(request_applicant['addrLine2'])\n if request_applicant['addrLine3']:\n applicant.addrLine3 = convert_to_ascii(request_applicant['addrLine3'])\n applicant.city = convert_to_ascii(request_applicant['city'])\n applicant.stateProvinceCd = request_applicant['stateProvinceCd']\n applicant.postalCd = convert_to_ascii(request_applicant['postalCd'])\n applicant.countryTypeCd = request_applicant['countryTypeCd']\n\n return applicant\n\n def map_request_names(self, name_request):\n \"\"\"\n This method maps names from the HTTP request data over to the name request.\n :param name_request:\n :return:\n \"\"\"\n if not isinstance(self.request_names, list):\n raise MapRequestNamesError()\n\n try:\n for request_name in self.request_names:\n request_name_id = request_name.get('id')\n if request_name_id:\n existing_names = name_request.names.all()\n match = self.get_item_from_list(existing_names, request_name_id)\n if match:\n # Update the name\n updated_name = self.map_submitted_name(match, request_name)\n name_request.names.append(updated_name)\n else:\n submitted_name = self.create_name()\n submitted_name = self.map_submitted_name(submitted_name, request_name)\n name_request.names.append(submitted_name)\n\n except Exception as err:\n raise MapRequestNamesError(err)\n\n return name_request\n\n def map_submitted_name(self, submitted_name, name):\n \"\"\"\n Used internally by map_request_names.\n :param submitted_name:\n :param name:\n :return:\n \"\"\"\n new_state_code = self.next_state_code if self.next_state_code else self.request_state_code\n\n # Common name attributes\n submitted_name = self.map_submitted_name_attrs(submitted_name, name)\n test_conflict = name.get('conflict1')\n if len(test_conflict) > 0:\n conflict_flag = 'Y'\n else:\n conflict_flag = 'N'\n\n if new_state_code in [State.COND_RESERVE] and conflict_flag == 'Y':\n submitted_name = self.map_submitted_name_conflicts(submitted_name, name)\n\n consent_words_list = name.get('consent_words', None)\n if consent_words_list and len(consent_words_list) > 0:\n submitted_name = self.map_submitted_name_consent_words(submitted_name, consent_words_list)\n\n return submitted_name\n\n def map_submitted_name_attrs(self, submitted_name, name):\n \"\"\"\n Used internally by map_submitted_name.\n :param submitted_name:\n :param name:\n :return:\n \"\"\"\n new_state_code = self.next_state_code if self.next_state_code else self.request_state_code\n\n try:\n submitted_name.nrId = self.nr_id\n submitted_name.choice = name.get('choice', 1)\n submitted_name.name_type_cd = name.get('name_type_cd', 'CO')\n submitted_name.name = convert_to_ascii(name.get('name', ''))\n submitted_name.designation = name.get('designation', '')\n # For existing businesses\n if isinstance(name.get('corpNum'), str):\n # To clear the corpNum use an empty string in the data payload\n submitted_name.corpNum = convert_to_ascii(name.get('corpNum'))\n\n if new_state_code == State.DRAFT:\n submitted_name.state = NameState.NOT_EXAMINED.value\n\n elif new_state_code == State.COND_RESERVE:\n submitted_name.state = NameState.COND_RESERVE.value\n\n elif new_state_code == State.RESERVED:\n submitted_name.state = NameState.RESERVED.value\n\n elif new_state_code == State.CONDITIONAL:\n submitted_name.state = NameState.CONDITION.value\n\n elif new_state_code == State.APPROVED:\n submitted_name.state = NameState.APPROVED.value\n\n except Exception as err:\n raise MapRequestNamesError(err, 'Error setting common name attributes.')\n\n return submitted_name\n\n def map_submitted_name_conflicts(self, submitted_name, name):\n \"\"\"\n Used internally by map_submitted_name.\n :param submitted_name:\n :param name:\n :return:\n \"\"\"\n try:\n # Only capturing one conflict\n if name.get('conflict1_num'):\n submitted_name.conflict1_num = name.get('conflict1_num')\n if name.get('conflict1'):\n submitted_name.conflict1 = name.get('conflict1')\n # Conflict text same as Namex\n submitted_name.decision_text = 'Consent is required from ' + name.get('conflict1') + '\\n' + '\\n'\n except Exception as err:\n raise MapRequestNamesError(err, 'Error on reserved conflict info.')\n\n return submitted_name\n\n def clear_submitted_name_conflicts(self, submitted_name):\n \"\"\"\n Used internally by map_submitted_name.\n :param submitted_name:\n :param name:\n :return:\n \"\"\"\n try:\n submitted_name.conflict1_num = None\n submitted_name.conflict1 = None\n except Exception as err:\n raise MapRequestNamesError(err, 'Error on draft empty conflict info.')\n\n return submitted_name\n\n def map_submitted_name_consent_words(self, submitted_name, consent_list):\n \"\"\"\n Used internally by map_submitted_name.\n :param submitted_name:\n :param consent_list:\n :return:\n \"\"\"\n decision_text = submitted_name.decision_text\n for consent in consent_list:\n try:\n cnd_instructions = None\n if consent != '' or len(consent) > 0:\n cnd_instructions = self.virtual_wc_service.get_word_condition_instructions(consent)\n except Exception as err:\n log_error('Error on get consent word. Consent Word[0]'.format(consent), err)\n raise MapRequestNamesError('Error mapping consent words.')\n\n try:\n if decision_text is None:\n decision_text = cnd_instructions + '\\n' + '\\n'\n else:\n decision_text += consent + '- ' + cnd_instructions + '\\n' + '\\n'\n\n submitted_name.decision_text = decision_text\n except Exception as err:\n raise MapRequestNamesError(err, 'Error adding consent words to decision.')\n\n return submitted_name\n\n def apply_state_change(self, name_request, next_state, on_success=None):\n \"\"\"\n This is where we handle entity state changes.\n This just wraps .state.apply_nr_state_change located in this module.\n :param name_request:\n :param next_state:\n :param on_success:\n :return:\n \"\"\"\n def on_success_cb(nr, resource):\n new_state = next_state\n\n # TODO: Try / except here?\n if on_success:\n nr = on_success(nr, resource)\n\n # Set the actions corresponding to the new Name Request state\n self.current_state_actions = get_nr_state_actions(new_state, nr)\n return nr\n\n return apply_nr_state_change(self, name_request, next_state, on_success_cb)\n\n # CRUD methods\n def save_request(self, name_request, on_success=None):\n try:\n name_request.save_to_db()\n if on_success:\n on_success()\n\n return Request.find_by_nr(name_request.nrNum)\n\n except Exception as err:\n raise SaveNameRequestError(err)\n","sub_path":"api/namex/services/name_request/name_request.py","file_name":"name_request.py","file_ext":"py","file_size_in_byte":25382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327871261","text":"from django import forms\nfrom .models import Question\nfrom .models import Choice\nimport random\n\nclass QuestionForm(forms.Form):\n\tquestion=forms.CharField(max_length=1000)\n\tchoice1=forms.CharField(max_length=200)\n\tchoice2=forms.CharField(max_length=200)\n\tchoice3=forms.CharField(max_length=200)\n\tchoice4=forms.CharField(max_length=200)\n\tcorrect_choice=forms.CharField(max_length=200)\n\n\tdef clean(self):\n\t\tcleaned_data = super(QuestionForm, self).clean()\n\t\tcorrect_choice=cleaned_data.get(\"correct_choice\")\n\t\tchoice1=cleaned_data.get(\"choice1\")\n\t\tchoice2=cleaned_data.get(\"choice2\")\n\t\tchoice3=cleaned_data.get(\"choice3\")\n\t\tchoice4=cleaned_data.get(\"choice4\")\n\t\tif(correct_choice !=choice1 and correct_choice!=choice2 and correct_choice!=choice3 and correct_choice!=choice4):\n\t\t\traise forms.ValidationError(\n\t\t\t\t\"correct choice must be equal to one of the above choices\")\nclass PracticeForm(forms.Form)\t\t\t:\n\tchoices_given=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tdef __init__(self,*args,**kwargs):\n\t\tid=kwargs.pop('id')\n\t\tsuper(PracticeForm,self).__init__(*args,**kwargs)\n\t\tques_obj=Question.objects.get(pk=id)\n\t\tchoice_list=[]\n\t\tchoices=ques_obj.choices_created.all()\n\t\tchoices=list(choices)\n\t\twhile(len(choices)>0):\n\t\t\tx=random.choice(choices)\n\t\t\ty=(x.pk,x.choice_text)\n\t\t\tchoice_list.append(y)\n\t\t\tchoices.remove(x)\n\t\tself.fields['choices_given'].label=ques_obj.question\t\n\t\tself.fields['choices_given'].choices=choice_list\nclass QuizForm(forms.Form)\t\t:\n\tquestion_id_1=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_1=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_2=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_2=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_3=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_3=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_4=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_4=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_5=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_5=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_6=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_6=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_7=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_7=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_8=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_8=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_9=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_9=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tquestion_id_10=forms.IntegerField(widget=forms.HiddenInput())\n\tchoices_given_10=forms.ChoiceField(widget=forms.RadioSelect(),required=False)\n\tdef __init__(self,*args,**kwargs):\n\t\tques_list=kwargs.pop('ques_list')\n\t\tsuper(QuizForm,self).__init__(*args,**kwargs)\n\t\tcount=1\n\t\twhile(count<=10):\n\t\t\tques_obj=ques_list[count-1]\n\t\t\tchoice_list=[]\n\t\t\tchoices=ques_obj.choices_created.all()\n\t\t\tchoices=list(choices)\n\t\t\twhile(len(choices)>0):\n\t\t\t\tx=random.choice(choices)\n\t\t\t\ty=(x.pk,x.choice_text)\n\t\t\t\tchoice_list.append(y)\n\t\t\t\tchoices.remove(x)\t\n\t\t\tr=\"Q%s\"\t%count\n\t\t\tr=r+\". \"+ques_obj.question\n\t\t\tself.fields['question_id_%s'% count].initial=ques_obj.pk\n\t\t\tself.fields['choices_given_%s'% count].label=r\n\t\t\tself.fields['choices_given_%s'% count].choices=choice_list\n\t\t\tcount=count+1\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n \t\n\n\n\n\n\n\n\n\n\n\t\t\n","sub_path":"questions/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121233973","text":"# -*- coding: utf-8 -*-\n\nimport codecs\nimport os\nimport re\nimport pickle\n\ndef docs(\n path_input,\n path_output\n ):\n\n from gensim.models.doc2vec import LabeledSentence\n \n # Make a list of files in the specified directory\n flist = os.listdir(path_input)\n\n # docs list\n docs = [] \n\n # Loop in flist\n total_count = len(flist)\n for idx, file in enumerate(flist):\n print('Docs {0}/{1}: {2}'.format(idx + 1, total_count, file))\n \n with codecs.open('{}/{}'.format(path_input, file), 'r', 'utf-8') as fp:\n\n # Split documents into words\n words = re.split(u' ', fp.read())\n\n # Make a labeled sentence\n try:\n doc = LabeledSentence(\n words = words,\n tags = [file]\n )\n except:\n print('Can not convert into labeled sentence')\n continue\n\n docs.append(doc)\n\n with codecs.open('{}/{}'.format(path_output, 'docs.bin'), 'wb') as fp:\n pickle.dump(docs, fp)\n\n","sub_path":"prep/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335585585","text":"from matplotlib import pyplot as plt\nimport json\nfrom polygon import Polygon\nfrom point import Point\n\nclass _Utility:\n _instance = None\n \n def __init__(self):\n self._fig_counter = 0\n\n @property\n def fig_number(self):\n return self._fig_counter\n\n def draw(self, x, y):\n self._fig_counter += 1\n fig = plt.figure(self._fig_counter, figsize=(5,5), dpi=90)\n ax = fig.add_subplot(111)\n ax.plot(x, y, alpha=0.7, linewidth=3)\n ax.scatter(x,y)\n ax.set_title('Polygon')\n\n def draw_spikes(self, polygon):\n fig = plt.figure(self._fig_counter)\n ax = fig.get_axes()[0]\n topx, topy = polygon.topSpike.get_xy\n bottx, botty = polygon.bottomSpike.get_xy\n ax.plot(topx, topy, \"or\", color='r')\n ax.plot(bottx, botty, \"or\", color='g')\n\n def redraw(self, x, y, _color):\n fig = plt.figure(self._fig_counter)\n ax = fig.get_axes()\n ax[0].plot(x,y,\"or\", color=_color)\n\n def from_txt(self, filename='data.txt'):\n self._fig_counter += 1\n correct_data = []\n with open('data/' + filename, 'r') as f:\n data = [list(map(int,coord.strip().split(' '))) for coord in f.readlines()]\n for coord in data:\n correct_data.append(Point(coord[0], coord[1]))\n return correct_data\n\n def show(self):\n plt.show()\n\ndef Utility():\n if _Utility._instance is None:\n _Utility._instance = _Utility()\n return _Utility._instance\n\n\n","sub_path":"04_polygon_triangulation/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"56875209","text":"from cardata.can_id import CanMessage\nfrom simulation.file_reader import FileReader\nimport eel\n\n\nclass Car:\n messages: [CanMessage] = []\n callback = None\n\n def start_worker(self, is_test=False):\n eel.sleep(4)\n if is_test:\n reader = FileReader(\"dumps/drive1.txt\")\n reader.read_line(self.transform)\n\n def transform(self, can_id, data):\n msg: CanMessage = CanMessage()\n for msg in self.messages:\n if hex(msg.can_id) == can_id:\n msg.data = data\n self.callback(msg)\n\n def set_callback(self, update_ui):\n self.callback = update_ui\n","sub_path":"cardata/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"248562914","text":"import sys\nfrom crud_cuenta.dao.conexion import Conexion\n\nclass DaoPlanCuenta(Conexion):\n def __init__(self):\n super().__init__()\n # Consultar todos los cuentas asociadas a un grupo ingresados\n\n def consultar(self, buscar):\n result = False\n try:\n sql = \"SELECT p.id,p.codigo,g.id grupo,p.descripcion,p.naturaleza,p.estado FROM plancuenta p\\\n INNER JOIN grupo AS g ON p.grupo=g.id \\\n WHERE p.estado=1 AND p.descripcion LIKE '%\" + str(buscar)+\"%' ORDER BY p.id\"\n self.conectar()\n self.conector.execute(sql)\n result = self.conector.fetchall()\n self.conn.commit()\n except Exception as e:\n print('Error al procesar la consulta de Grupos Cuenta', e)\n self.conn.rollback()\n finally:\n self.cerrar()\n\n return result\n\n \n #Consulta Indivodual para verificar si existe el id a modificar o eliminar\n def __consulta_indi(self, id):\n try:\n sql = \"SELECT IF( EXISTS(SELECT * FROM plancuenta p WHERE p.id=%s),1,0) AS resul\"\n self.conectar()\n self.conector.execute(sql,id)\n result = self.conector.fetchall()\n self.conn.commit()\n except Exception as e:\n print('Error al procesar la consulta de Grupos Cuenta', e)\n self.conn.rollback()\n finally:\n self.cerrar()\n\n return True if result[0][0]==1 else False\n\n # Metodo para ingresar\n\n def __ingresar(self, pcta):\n correcto = True\n try:\n sql = \"INSERT INTO plancuenta(codigo,grupo,descripcion,naturaleza,estado)\\\n VALUES(%s,%s,%s,%s,%s)\"\n self.conectar()\n self.conector.execute(\n sql, (pcta.codigo, pcta.grupo, pcta.descripcion, pcta.naturaleza, 1))\n self.conn.commit()\n except Exception as e:\n print('Error al ingresar', e)\n correcto = False\n self.conn.rollback()\n finally:\n self.cerrar()\n return correcto\n # Metodo para modificar\n\n def __modificar(self, pcta):\n correcto = True\n try:\n sql = 'UPDATE plancuenta SET codigo = %s, grupo = %s, descripcion=%s,\\\n naturaleza=%s WHERE id = %s'\n self.conectar()\n self.conector.execute(sql, (pcta.codigo, pcta.grupo, pcta.descripcion,\n pcta.naturaleza, pcta.idcuenta))\n self.conn.commit()\n except Exception as e:\n print('Error al modificar', e)\n correcto = False\n self.conn.rollback()\n finally:\n self.cerrar()\n \n return correcto\n\n # Metodo para eliminar\n\n def __eliminar(self, pcta):\n correcto = True\n if self.__consulta_indi(pcta.idcuenta):\n try:\n sql = \"UPDATE plancuenta p SET p.estado=%s WHERE p.id=%s\"\n self.conectar()\n self.conector.execute(sql,(0,pcta.idcuenta))\n self.conn.commit()\n except Exception as e:\n print('Error en eliminar', e)\n correcto = False\n self.conn.rollback()\n finally:\n self.cerrar()\n \n return correcto\n#Obtiene los resultados de los metodos encapsulados ingresar,modificar,eliminar \n# que solo pueden acceder la propia clase \n def crud_opciones(self,pcta,opcion):\n if opcion=='I':\n return self.__ingresar(pcta)\n elif opcion=='M':\n return self.__modificar(pcta)\n elif opcion=='E':\n return self.__eliminar(pcta)\n elif opcion=='CI':\n return self.__consulta_indi(pcta)","sub_path":"dao/dao_plancuenta.py","file_name":"dao_plancuenta.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"254687639","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\n\ntrain = pd.read_csv('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/data_files/Training_data.csv')\ntrain.dropna(subset=['clean_txt'], inplace=True)\n\ntest = pd.read_csv('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/data_files/Test_data.csv')\ntest.dropna(subset=['clean_txt'], inplace=True)\n\n#Vectorizer class initiation\nword_vec = CountVectorizer(ngram_range=(1,2), analyzer='word')\n\n#Fitting and transforming the train data\n\nmat_tr = word_vec.fit_transform(train['clean_txt'])\npickle.dump(word_vec,open('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/models/CountVectoriser.sav','wb'))\n#processing the vectorizer output\n#finding the frequency of words in the train data\nfreq = sum(mat_tr).toarray()[0]\ndf= pd.DataFrame(freq, index=word_vec.get_feature_names(), columns=['frequency'])\ndf1= pd.DataFrame(mat_tr.todense(), columns=[word_vec.get_feature_names()])\nind = df[(df['frequency']>5) & (df['frequency']<50)].index #limiting the frequency\ndf2 = df1[np.array(ind)] #taking only the required terms based on frequency filtering\ndf2.reset_index(drop=True, inplace=True)\ntrain_data_final = pd.concat([df2,train[['class','Hate','Offensive','Neither']]],axis=1)\ntrain_data_final.to_csv('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/data_files/CountVectorised_train.csv')\n\n#Test data processing steps same as input text processing\n\nmat_ts = word_vec.transform(test['clean_txt'])\ndf1_ts = pd.DataFrame(mat_ts.todense(), columns=[word_vec.get_feature_names()])\ndf2_ts = df1_ts[np.array(ind)]\ndf2_ts.reset_index(drop=True)\ntest_data_final = pd.concat([df2_ts,test[['class','Hate','Offensive','Neither']]], axis=1)\ntest_data_final.to_csv('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/data_files/CountVectorised_test.csv')\n#TF-IDF\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf = TfidfVectorizer(ngram_range=(1,2))\nX = tfidf.fit_transform(train['clean_txt'])\ndf_tfidf = pd.DataFrame(X.todense(), columns=[tfidf.get_feature_names()])\ndf1_tfidf = df_tfidf[np.array(ind)]\npd.concat([df1_tfidf,train[['class','Hate','Offensive','Neither']]],axis=1).to_csv('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/data_files/tfidf_vectored_train.csv', index=False)\npickle.dump(tfidf,open('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/models/TFIDF_vectorizer.sav','wb'))\n\n\nY = tfidf.transform(test['clean_txt'])\ndft_tfidf = pd.DataFrame(Y.todense(),columns=[tfidf.get_feature_names()])\ndft1_tfidf = dft_tfidf[np.array(ind)]\npd.concat([dft1_tfidf,test[['class','Hate','Offensive','Neither']]],axis=1).to_csv('/Users/umeshkethepalli/Desktop/Hate Speech/HateSpeech-5/data_files/tfidf_vectored_test.csv', index=False)\n","sub_path":"data_vectorization.py","file_name":"data_vectorization.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352247111","text":"#=========================================\n#\n# Lorenz model \n#\n#\n#=========================================\nimport random\nimport numpy as np\nrho = 28.0\n#rho = 142.0\n#rho = 148.0\nsigma = 10.0\nbeta = 8.0 / 3.0\n\n#rho = 45.92\n#sigma = 4\n#beta = 10\n\n\n#delta_t = 0.01\ndelta_t = 1e-2\ninitial_t = 0\nfinal_t = 2e1\ninitial_val = [1.0, 1.0, 1.0]\nmodel_name = \"Lorenz\"\ninformation = \"Lorenz\" + \"(rho, sigma, beta) = (\" + str(rho) + \", \" + str(sigma) + \", \" + str(beta) + \")\"\n\n\ndef f(state, t):\n x, y, z = state \n return sigma * (y - x), x * (rho - z) - y, x * y - beta * z\n\ndef Jf(state):\n #print(state)\n x, y, z = state\n #return Delta_t * np.matrix([[-sigma, sigma, 0], [(rho - z), -1, -x], [y, x, -beta]]) + np.eye(3)\n return np.matrix([[1 - delta_t * sigma, delta_t * sigma, 0], \n [delta_t * (rho - z), 1 - delta_t , -delta_t * x], \n [delta_t * y, delta_t * x, 1 - delta_t * beta]])\n\n","sub_path":"model/Lorenz.py","file_name":"Lorenz.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90960556","text":"## Try another name\r\n# You are still working on your Twitter sentiment analysis. You analyze now some things that caught your attention. You noticed that there are email addresses inserted in some tweets. Now, you are curious to find out which is the most common name.\r\n\r\n# You want to extract the first part of the email. E.g. if you have the email marysmith90@gmail.com, you are only interested in marysmith90.\r\n# You need to match the entire expression. So you make sure to extract only names present in emails. Also, you are only interested in names containing upper (e.g. A,B, Z) or lowercase letters (e.g. a, d, z) and numbers.\r\n\r\n# # The list sentiment_analysis containing the text of three tweets as well as the re module were loaded in your session. You can use print() to view it in the IPython Shell.\r\n\r\n# Instructions\r\n# 100 XP\r\n# Complete the regex to match the email capturing only the name part. The name part appears before the @.\r\n# Find all matches of the regex in each element of sentiment_analysis analysis. Assign it to the variable email_matched.\r\n# Complete the .format() method to print the results captured in each element of sentiment_analysis analysis.\r\n\r\n# Write a regex that matches email\r\nregex_email = r\"([a-zA-Z0-9]+)@\\S+\"\r\n\r\nfor tweet in sentiment_analysis:\r\n # Find all matches of regex in each tweet\r\n email_matched = re.findall(regex_email, tweet)\r\n\r\n # Complete the format method to print the results\r\n print(\"Lists of users found in this tweet: {}\".format(email_matched))\r\n\t\r\n\r\n## Flying home\r\n# Your boss assigned you to a small project. They are performing an analysis of the travels people made to attend business meetings. You are given a dataset with only the email subjects for each of the people traveling.\r\n\r\n# You learn that the text followed a pattern. Here is an example:\r\n\r\n# Here you have your boarding pass LA4214 AER-CDB 06NOV.\r\n\r\n# You need to extract the last part:\r\n\r\n# The two letters indicate the airline (e.g LA),\r\n# The 4 numbers are the flight number (e.g. 4214).\r\n# The three letters correspond to the departure (e.g AER),\r\n# The destination (CDB),\r\n# The date (06NOV) of the flight.\r\n# All letters are always uppercase.\r\n\r\n# The variable flight containing one email subject was loaded in your session. You can use print() to view it in the IPython Shell.\r\n\r\n# Instructions 4/4\r\n# 25 XP\r\n# Complete the format method with the elements contained in flight_matches. In the first line print the airline,and the flight number. In the second line, the departure and destination. In the third line, the date.\r\n\r\n# Import re\r\nimport re\r\n\r\n# Write regex to capture information of the flight\r\nregex = r\"([A-Z]{2})(\\d{4})\\s([A-Z]{3})-([A-Z]{3})\\s(\\d{2}[A-Z]{3})\"\r\n\r\n# Find all matches of the flight information\r\nflight_matches = re.findall(regex, flight)\r\n \r\n#Print the matches\r\nprint(\"Airline: {} Flight number: {}\".format(flight_matches[0][0], flight_matches[0][1]))\r\nprint(\"Departure: {} Destination: {}\".format(flight_matches[0][2], flight_matches[0][3]))\r\nprint(\"Date: {}\".format(flight_matches[0][4]))\r\n\r\n\r\n## Love it!\r\n# You are still working on the Twitter sentiment analysis project. First, you want to identify positive tweets about movies and concerts.\r\n\r\n# You plan to find all the sentences that contain the words love, like, or enjoy and capture that word. You will limit the tweets by focusing on those that contain the words movie or concert by keeping the word in another group. You will also save the movie or concert name.\r\n\r\n# For example, if you have the sentence: I love the movie Avengers. You match and capture love. You need to match and capture movie. Afterwards, you match and capture anything until the dot.\r\n\r\n# The list sentiment_analysis containing the text of three tweets and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.\r\n\r\n# Instructions\r\n# 100 XP\r\n# Complete the regular expression to capture the words love or like or enjoy. Match and capture the words movie or concert. Match and capture anything appearing until the ..\r\n# Find all matches of the regex in each element of sentiment_analysis. Assign them to positive_matches.\r\n# Complete the .format() method to print out the results contained in positive_matches for each element in sentiment_analysis.\r\n\r\n# Write a regex that matches sentences with the optional words\r\nregex_positive = r\"(love|like|enjoy).+?(movie|concert)\\s(.+?)\\.\"\r\n\r\nfor tweet in sentiment_analysis:\r\n\t# Find all matches of regex in tweet\r\n positive_matches = re.findall(regex_positive, tweet)\r\n \r\n # Complete format to print out the results\r\n print(\"Positive comments found {}\".format(positive_matches))\r\n\t\r\n\t\r\n\t\r\n\t\r\n## Ugh! Not for me!\r\n# After finding positive tweets, you want to do it for negative tweets. Your plan now is to find sentences that contain the words hate, dislike or disapprove. You will again save the movie or concert name. You will get the tweet containing the words movie or concert but this time, you don't plan to save the word.\r\n\r\n# For example, if you have the sentence: I dislike the movie Avengers a lot.. You match and capture dislike. You will match but not capture the word movie. Afterwards, you match and capture anything until the dot.\r\n\r\n# The list sentiment_analysis containing the text of three tweets as well as the re module are loaded in your session. You can use print() to view the data in the IPython Shell.\r\n\r\n# Instructions\r\n# 100 XP\r\n# Complete the regular expression to capture the words hate or dislike or disapprove. Match but don't capture the words movie or concert. Match and capture anything appearing until the ..\r\n# Find all matches of the regex in each element of sentiment_analysis. Assign them to negative_matches.\r\n# Complete the .format() method to print out the results contained in negative_matches for each element in sentiment_analysis.\t\r\n\r\n# Write a regex that matches sentences with the optional words\r\nregex_negative = r\"(hate|dislike|disapprove).+?(?:movie|concert)\\s(.+?)\\.\"\r\n\r\nfor tweet in sentiment_analysis:\r\n\t# Find all matches of regex in tweet\r\n negative_matches = re.findall(regex_negative, tweet)\r\n \r\n # Complete format to print out the results\r\n print(\"Negative comments found {}\".format(negative_matches))\r\n\t\r\n## Parsing PDF files\r\n# You now need to work on another small project you have been delaying. Your company gave you some PDF files of signed contracts. The goal of the project is to create a database with the information you parse from them. Three of these columns should correspond to the day, month, and year when the contract was signed.\r\n# The dates appear as Signed on 05/24/2016 (05 indicating the month, 24 the day). You decide to use capturing groups to extract this information. Also, you would like to retrieve that information so you can store it separately in different variables.\r\n\r\n# You decide to do a proof of concept.\r\n\r\n# The variable contract containing the text of one contract and the re module are already loaded in your session. You can use print() to view the data in the IPython Shell.\r\n\r\n# Instructions 3/3\r\n# 30 XP\r\n# Complete the f-string method to print out the captured groups. Use the values corresponding to each key in the dictionary.\r\n\r\n# Write regex and scan contract to capture the dates described\r\nregex_dates = r\"Signed\\son\\s(\\d{2})/(\\d{2})/(\\d{4})\"\r\ndates = re.search(regex_dates, contract)\r\n\r\n# Assign to each key the corresponding match\r\nsignature = {\r\n\t\"day\": dates.group(2),\r\n\t\"month\": dates.group(1),\r\n\t\"year\": dates.group(3)\r\n}\r\n# Complete the format method to print-out\r\nprint(\"Our first contract is dated back to {data[year]}. Particularly, the day {data[day]} of the month {data[month]}.\".format(data=signature))\r\n\r\n\r\n## Close the tag, please!\r\n# In the meantime, you are working on one of your other projects. The company is going to develop a new product. It will help developers automatically check the code they are writing. You need to write a short script for checking that every HTML tag that is open has its proper closure.\r\n\r\n# You have an example of a string containing HTML tags:\r\n\r\n# <title>The Data Science Company\r\n\r\n# You learn that an opening HTML tag is always at the beginning of the string. It appears inside <>. A closing tag also appears inside <>, but it is preceded by /.\r\n\r\n# You also remember that capturing groups can be referenced using numbers, e.g \\4.\r\n\r\n# The list html_tags, containing three strings with HTML tags, and there module are loaded in your session. You can use print() to view the data in the IPython Shell.\r\n\r\n# Instructions\r\n# 0 XP\r\n# Complete the regex in order to match closed HTML tags. Find if there is a match in each string of the list html_tags. Assign the result to match_tag.\r\n# If a match is found, print the first group captured and saved in match_tag.\r\n# If no match is found, complete the regex to match only the text inside the HTML tag. Assign it to notmatch_tag.\r\n# Print the first group captured by the regex and save it in notmatch_tag.\r\n\r\nfor string in html_tags:\r\n # Complete the regex and find if it matches a closed HTML tags\r\n match_tag = re.match(r\"<(\\w+)>.*?\", string)\r\n \r\n if match_tag:\r\n # If it matches print the first group capture\r\n print(\"Your tag {} is closed\".format(match_tag.group(1))) \r\n else:\r\n # If it doesn't match capture only the tag \r\n notmatch_tag = re.match(r\"<(\\w+)>\", string)\r\n # Print the first group capture\r\n print(\"Close your {} tag!\".format(notmatch_tag.group(1)))\r\n\r\n## Reeepeated characters\r\n# Back to your sentiment analysis! Your next task is to replace elongated words that appear in the tweets. We define an elongated word as a word that contains a repeating character twice or more times. e.g. \"Awesoooome\".\r\n\r\n# Replacing those words is very important since a classifier will treat them as a different term from the source words lowering their frequency.\r\n\r\n# To find them, you will use capturing groups and reference them back using numbers. E.g \\4.\r\n\r\n# If you want to find a match for Awesoooome. You first need to capture Awes. Then, match o and reference the same character back, and then, me.\r\n\r\n# The list sentiment_analysis, containing the text of three tweets, and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.\r\n\r\n# Instructions\r\n# 100 XP\r\n# Complete the regular expression to match an elongated word as described.\r\n# Search the elements in sentiment_analysis list to find out if they contain elongated words. Assign the result to match_elongated.\r\n# Assign the captured group number zero to the variable elongated_word.\r\n# Print the result contained in the variable elongated_word.\r\n\r\n# Complete the regex to match an elongated word\r\nregex_elongated = r\"\\w+(\\w)\\1\\w*\"\r\n\r\nfor tweet in sentiment_analysis:\r\n\t# Find if there is a match in each tweet \r\n\tmatch_elongated = re.search(regex_elongated, tweet)\r\n \r\n\tif match_elongated:\r\n\t\t# Assign the captured group zero \r\n\t\telongated_word = match_elongated.group(0)\r\n \r\n\t\t# Complete the format method to print the word\r\n\t\tprint(\"Elongated word found: {word}\".format(word=elongated_word))\r\n\telse:\r\n\t\tprint(\"No elongated word found\") \r\n\r\n\r\n## Surrounding words\r\n# Now, you want to perform some visualizations with your sentiment_analysis dataset. You are interested in the words surrounding python. You want to count how many times a specific words appears right before and after it.\r\n\r\n# Positive lookahead (?=) makes sure that first part of the expression is followed by the lookahead expression. Positive lookbehind (?<=) returns all matches that are preceded by the specified pattern.\r\n\r\n# The variable sentiment_analysis, containing the text of one tweet, and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.\r\n\r\n# Instructions 1/2\r\n# 50 XP\r\n# Get all the words that are followed by the word python in sentiment_analysis. Print out the word found.\r\n\r\n# Positive lookahead\r\nlook_ahead = re.findall(r\"\\w+(?=\\spython)\", sentiment_analysis)\r\n\r\n# Print out\r\nprint(look_ahead)\r\n\t\r\n# Get all the words that are preceded by the word python or Python in sentiment_analysis. Print out the words found.\r\n\r\n# Positive lookbehind\r\nlook_behind = re.findall(r\"(?<=[Pp]ython\\s)\\w+\", sentiment_analysis)\r\n\r\n# Print out\r\nprint(look_behind)\r\n\r\n## Filtering phone numbers\r\n# Now, you need to write a script for a cell-phone searcher. It should scan a list of phone numbers and return those that meet certain characteristics.\r\n\r\n# The phone numbers in the list have the structure:\r\n\r\n# Optional area code: 3 numbers\r\n# Prefix: 4 numbers\r\n# Line number: 6 numbers\r\n# Optional extension: 2 numbers\r\n# E.g. 654-8764-439434-01.\r\n\r\n# You decide to use .findall() and the non-capturing group's negative lookahead (?!) and negative lookbehind (? 0:\n cata_list = []\n for catalogue_src in catalogue_list:\n c_name = catalogue_src.find_element_by_xpath(\"div[@class='catalogue-desc']/h2[@class='catalogue-name']\").text\n try:\n c_date = catalogue_src.find_element_by_xpath(\"div[@class='catalogue-desc']/span[@class='catalogue-endDate']\").text\n except:\n c_date = \"No Date\"\n if c_date == \"\":\n c_date = \"No Date\"\n c_pdf_url = catalogue_src.find_element_by_xpath(\"div[@class='catalogue-desc']/div[@class='catalogue-actionLinks']/a[@class='icon-links link-download']\").get_attribute(\"href\")\n new_catalogue = catalogue(c_name, c_date, c_pdf_url, self.web_name, self.save_folder, self.history_path)\n cata_list.append(new_catalogue)\n\n for cata in cata_list:\n cata.set_driver(self.web_driver)\n cata.download_pdf()\n else:\n try:\n c_name = self.web_driver.find_element_by_xpath(\"//div[@class='catalogue-desc']/h2[@class='catalogue-name']\").text\n try:\n c_date = self.web_driver.find_element_by_xpath(\"//div[@class='catalogue-desc']/span[@class='catalogue-endDate']\").text\n except:\n c_date = \"No Date\"\n if c_date == \"\":\n c_date = \"No Date\"\n c_pdf_url = self.web_driver.find_element_by_xpath(\"//a[@class='icon-toolbar toolbar-pdf toolTip pdfDownloadLink onclick_tracking']\").get_attribute(\"href\")\n new_catalogue = catalogue(c_name, c_date, c_pdf_url, self.web_name, self.save_folder, self.history_path)\n new_catalogue.set_driver(self.web_driver)\n new_catalogue.download_pdf()\n except:\n pass\n \n def take_screenshot(self):\n self.web_driver.get_screenshot_as_file(join(self.scrsh_folder, self.web_name + \"_\" + str(datetime.datetime.now().date()) + \".png\"))","sub_path":"download_scripts/officeworks_dl.py","file_name":"officeworks_dl.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301804513","text":"if __name__ == '__main__':\n with open('input', 'r') as f:\n input = list(map(int, f.read().split(',')))\n\n game_state = {num: (idx + 1) for idx, num in enumerate(input[:-1])}\n prev_num = act_num = input[-1]\n for prev_idx in range(len(input), 2020):\n act_num = prev_idx - game_state[prev_num] if prev_num in game_state else 0\n game_state[prev_num] = prev_idx\n prev_num = act_num\n print(act_num)\n","sub_path":"day15/day15_1.py","file_name":"day15_1.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96562754","text":"import time\nimport pickle\nimport tensorflow as tf\nimport argparse\nimport os, re\nimport numpy as np\nimport h5py\nimport skimage as ski\nimport skimage.data\nimport skimage.transform\nimport cv2\n\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.contrib.framework import arg_scope\n\nFLAGS = tf.app.flags.FLAGS\n\n# RGB\ndata_mean = [123.68, 116.779, 103.939]\ndata_std = [70.59564226, 68.52497082, 71.41913876]\n#MEAN_BGR = [103.939, 116.779, 123.68]\n#DATA_STD = [71.41913876, 68.52497082, 70.59564226]\n\nmodel_depth = 121\nblock_sizes = [6,12,24,16]\n# top1error = 25.41 - top5error = 7.87 (236.8 examples/sec; 0.422 sec/batch)\n\n#model_depth = 169\n#block_sizes = [6,12,32,32]\n# top1error = 24.13 - top5error = 7.03 (198.5 examples/sec; 0.504 sec/batch)\n\n\ninit_dir = '/home/kivan/datasets/pretrained/dense_net/'\n\nweight_decay = 1e-4\n#init_func = layers.variance_scaling_initializer(mode='FAN_OUT')\ninit_func = layers.variance_scaling_initializer()\n\nk = 32\ncompression = 0.5\n\n#data_format = 'NCHW'\n#maps_dim = 1\n#height_dim = 2\n\ndata_format = 'NHWC'\nmaps_dim = 3\nheight_dim = 1\n\nbn_params = {\n # Decay for the moving averages.\n #'decay': 0.999,\n 'decay': 0.9,\n 'center': True,\n 'scale': True,\n # epsilon to prevent 0s in variance.\n #'epsilon': 0.001,\n 'epsilon': 1e-5,\n # None to force the updates\n 'updates_collections': None,\n 'fused': True,\n 'data_format': data_format,\n 'is_training': True\n}\n\n\ndef normalize_input(img):\n if data_format == 'NCHW':\n img = tf.transpose(img, perm=[0,3,1,2])\n mean = tf.constant(data_mean, dtype=tf.float32, shape=[1,3,1,1])\n std = tf.constant(data_std, dtype=tf.float32, shape=[1,3,1,1])\n else:\n mean = data_mean\n std = data_mean\n return (img - mean) / std\n\n\ndef BNReluConv(net, num_filters, name, k=3):\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope(name):\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n net = layers.conv2d(net, num_filters, kernel_size=k)\n return net\n\ndef layer(net, num_filters, name, is_training, k=3):\n with tf.variable_scope(name):\n net = BNReluConv(net, 4*num_filters, 'bottleneck', k=1)\n net = BNReluConv(net, num_filters, 'conv', k=3)\n #with tf.variable_scope('bottleneck'):\n # net = tf.contrib.layers.batch_norm(net, **bn_params)\n # net = tf.nn.relu(net)\n # net = layers.conv2d(net, 4*num_filters, kernel_size=1)\n #with tf.variable_scope('conv'):\n # net = tf.contrib.layers.batch_norm(net, **bn_params)\n # net = tf.nn.relu(net)\n # net = layers.conv2d(net, num_filters, kernel_size=k)\n #if is_training: \n #net = tf.nn.dropout(net, keep_prob=0.8)\n return net\n\ndef dense_block(net, size, k, name, is_training):\n with tf.variable_scope(name):\n for i in range(size):\n x = net\n net = layer(net, k, 'layer'+str(i), is_training)\n net = tf.concat([x, net], maps_dim)\n return net\n\ndef transition(net, compression, name):\n with tf.variable_scope(name):\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n num_filters = net.get_shape().as_list()[maps_dim]\n num_filters = int(round(num_filters*compression))\n net = layers.conv2d(net, num_filters, kernel_size=1, data_format=data_format)\n net = layers.avg_pool2d(net, 2, stride=2, padding='SAME', data_format=data_format)\n return net\n\ndef build(image, is_training=False):\n bn_params['is_training'] = is_training\n image = normalize_input(image)\n print(image)\n \n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope('conv0'):\n net = layers.conv2d(image, 2*k, 7, stride=2, padding='VALID')\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n\n net = layers.max_pool2d(net, 3, stride=2, padding='SAME',\n data_format=data_format, scope='pool0')\n net = dense_block(net, block_sizes[0], k, 'block0', is_training)\n print(net)\n net = transition(net, compression, 'block0/transition')\n print(net)\n net = dense_block(net, block_sizes[1], k, 'block1', is_training)\n print(net)\n net = transition(net, compression, 'block1/transition')\n print(net)\n net = dense_block(net, block_sizes[2], k, 'block2', is_training)\n print(net)\n net = transition(net, compression, 'block2/transition')\n print(net)\n net = dense_block(net, block_sizes[3], k, 'block3', is_training)\n feats=net\n print(net)\n\n with tf.variable_scope('head'):\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n net = tf.reduce_mean(net, axis=[height_dim,height_dim+1])\n print(net)\n logits = layers.fully_connected(net, 1000, activation_fn=None,\n scope='fc1000')\n prob = tf.nn.softmax(logits)\n return logits, prob, feats\n\ndef shuffle_data(data_x, data_y):\n indices = np.arange(data_x.shape[0])\n np.random.shuffle(indices)\n shuffled_data_x = np.ascontiguousarray(data_x[indices])\n shuffled_data_y = np.ascontiguousarray(data_y[indices])\n return shuffled_data_x, shuffled_data_y\n\nif __name__ == '__main__':\n img_size = 224\n image = tf.placeholder(tf.float32, [None, img_size, img_size, 3], 'input')\n labels = tf.placeholder(tf.int32, [None], 'label')\n logits, prob, _ = build(image, is_training=False)\n #init_op, init_feed = create_init_op(resnet_param)\n init_path = init_dir + 'dense_net_' + str(model_depth) + '.pickle'\n #init_map = np.load(init_path)\n with open(init_path, 'rb') as f:\n init_map = pickle.load(f)\n init_op, init_feed = tf.contrib.framework.assign_from_values(init_map)\n\n all_vars = tf.contrib.framework.get_variables()\n for v in all_vars:\n #print(v.name)\n if v.name in init_map:\n pass\n #del init_map[v.name]\n else:\n print(v.name)\n raise 'Not in'\n #print('Dont exist: ', list(init_map.keys()))\n\n\n sess = tf.Session()\n #sess.run(tf.initialize_all_variables())\n #sess.run(tf.initialize_local_variables())\n sess.run(init_op, feed_dict=init_feed)\n\n batch_size = 100\n\n data_path = '/home/kivan/datasets/imagenet/ILSVRC2015/numpy/val_data.hdf5'\n h5f = h5py.File(data_path, 'r')\n data_x = h5f['data_x'][()]\n print(data_x.shape)\n data_y = h5f['data_y'][()]\n h5f.close()\n y_pred = np.zeros((data_y.shape[0]), dtype=np.int32)\n #data_std = data_x.std((0,1,2), dtype=np.float64)\n #print(data_std)\n b_tmp = data_x[...,0].copy()\n data_x[...,0] = data_x[...,2]\n data_x[...,2] = b_tmp\n\n N = data_x.shape[0]\n assert N % batch_size == 0\n num_batches = N // batch_size\n\n top5_error = tf.nn.in_top_k(logits, labels, 5)\n top5_wrong = 0\n cnt_wrong = 0\n #num_batches=100\n for i in range(num_batches):\n offset = i * batch_size\n batch_x = data_x[offset:offset+batch_size, ...]\n batch_y = data_y[offset:offset+batch_size, ...]\n #print(batch_y)\n start_time = time.time()\n #batch_x = batch_x.astype(np.float32)\n #batch_x -= MEAN_BGR\n #batch_x /= DATA_STD\n #batch_x -= MEAN_RGB\n #batch_x /= DATA_STD_RGB\n #print(batch_x.mean((0,1,2)))\n #print(batch_x.std((0,1,2)))\n logits_val, prob_val, top5 = sess.run([logits, prob, top5_error],\n feed_dict={image:batch_x, labels:batch_y})\n #print(np.max(prob_val, 1))\n duration = time.time() - start_time\n num_examples_per_step = batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n top5_wrong += (top5==0).sum()\n yp = logits_val.argmax(1).astype(np.int32)\n y_pred[offset:offset+batch_size] = yp\n cnt_wrong += (yp != batch_y).sum()\n if i % 10 == 0:\n print('[%d / %d] top1error = %.2f - top5error = %.2f (%.1f examples/sec; %.3f sec/batch)' % (i, num_batches,\n cnt_wrong / ((i+1)*batch_size) * 100, top5_wrong / ((i+1)*batch_size) * 100,\n examples_per_sec, sec_per_batch))\n print(cnt_wrong / N)\n print(top5_wrong / N)\n\n#data_y = data_y[:num_batches*batch_size]\n#for i in range(100):\n# mask = data_y == i\n# print(y_pred[mask])\n","sub_path":"OLD/models/imagenet/dense_net/dense_net.py","file_name":"dense_net.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424559886","text":"#!/usr/bin/env python3\n\nfrom random import choice, randint\n\nEMPTY = '~'\nSHIP = '#'\nHIT = 'X'\nMISS = '.'\nVERTICAL = True\nHORIZONTAL = False\nSIZE = 10\nMAX_SHIP_LENGTH = 4\nSHIPS = sorted([l for v in range(1, 5) for l in range(v, 0, -1)], reverse=True)\n\n\nclass Field:\n def __init__(self, size=SIZE):\n self.field = self._new_field()\n self.ships_allocated = self._new_field()\n\n def _new_field(self, size=SIZE, cr=EMPTY):\n field = []\n for _ in range(size):\n line = []\n for _ in range(size):\n line.append(cr)\n field.append(line)\n return field\n\n def _get_neighbors(self, field, x, y):\n n = [\n (x-1, y-1), (x, y-1), (x+1, y-1),\n (x-1, y), (x, y), (x+1, y),\n (x-1, y+1), (x, y+1), (x+1, y+1)\n ]\n return [a for a in n if a[0] in range(SIZE) and a[1] in range(SIZE)]\n\n def can_add(self, x, y, size, direction):\n # Размер корабля не превышает размеры поля\n if size > SIZE:\n return False\n # Координаты \"головы\" не выходят за границы поля\n elif not (0 <= x < SIZE) or not (0 <= y < SIZE):\n return False\n # \"Хвост\" не выходит за границы поля\n elif (direction == HORIZONTAL and x+size > SIZE) or \\\n (direction == VERTICAL and y+size > SIZE):\n return False\n # Корабль не пересекается с другим по горизонтали\n elif (direction == HORIZONTAL) and \\\n (SHIP in self.ships_allocated[y][x:x+size]):\n return False\n # Корабль не пересекается с другим по вертикали\n elif (direction == VERTICAL) and \\\n (SHIP in list(zip(*self.ships_allocated))[x][y:y+size]):\n return False\n else:\n return True\n\n def add_ship(self, x, y, size, direction=HORIZONTAL):\n if direction == HORIZONTAL:\n self.field[y][x:x+size] = SHIP * size\n else:\n for yy in range(y, y+size):\n self.field[yy][x] = SHIP\n\n if direction == HORIZONTAL:\n for xx in range(x, x+size):\n n = self._get_neighbors(self.field, xx, y)\n for nn in n:\n self.ships_allocated[nn[1]][nn[0]] = SHIP\n else:\n for yy in range(y, y+size):\n n = self._get_neighbors(self.field, x, yy)\n for nn in n:\n self.ships_allocated[nn[1]][nn[0]] = SHIP\n\n def can_shoot(self, x, y):\n if not (0 <= x < SIZE) or not (0 <= y < SIZE):\n return False\n elif self.field[y][x] in (HIT, MISS):\n return False\n else:\n return True\n\n def shoot(self, x, y):\n if not self.can_shoot(x, y):\n return False\n\n if self.field[y][x] == SHIP:\n self.field[y][x] = HIT\n return True\n elif self.field[y][x] == EMPTY:\n self.field[y][x] = MISS\n return False\n\n def arrange_ships(self):\n for ship in SHIPS:\n x = randint(0, SIZE-1)\n y = randint(0, SIZE-1)\n direction = choice((VERTICAL, HORIZONTAL))\n while not self.can_add(x, y, ship, direction):\n x = randint(0, SIZE-1)\n y = randint(0, SIZE-1)\n direction = choice((VERTICAL, HORIZONTAL))\n self.add_ship(x, y, ship, direction)\n\n def if_still_alive(self):\n if SHIP in set([s for x in self.field for s in x]):\n return True\n return False\n\n def print_(self, field):\n for line in field:\n print(''.join(line))\n\n def hide_ships(self):\n field = []\n for line in self.field:\n s = ''.join(line)\n s = s.replace(SHIP, EMPTY)\n field.append(list(s))\n return field\n","sub_path":"field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493951793","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom webapp.models import Product\nfrom webapp.forms import ProductForm, SearchProduct\n\n# Create your views here.\n\n\ndef list_product(request, *args, **kwargs):\n products = Product.objects.order_by('category', 'name').filter(count__gt=0)\n search = SearchProduct()\n return render(request, 'index.html', context={\n 'products': products,\n 'search' : search\n })\n\n\ndef product_view(request, pk):\n products = get_object_or_404(Product, pk=pk)\n return render(request, 'product.html', context={\n 'products': products\n })\n\n\ndef product_create(request, *args, **kwargs):\n if request.method == 'GET':\n form = ProductForm()\n return render(request, 'create.html', context={\n 'form': form\n })\n elif request.method == 'POST':\n form = ProductForm(data=request.POST)\n if form.is_valid():\n products = Product.objects.create(\n name=form.cleaned_data['name'],\n description=form.cleaned_data['description'],\n category=form.cleaned_data['category'],\n count=form.cleaned_data['count'],\n price=form.cleaned_data['price']\n )\n return redirect('index')\n else:\n return render(request, 'create.html', context={\n 'form': form\n })\n\n\ndef product_update(request, pk):\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'GET':\n form = ProductForm(data={\n 'name': products.name,\n 'description': products.description,\n 'category': products.category,\n 'count': products.count,\n 'price': products.price\n })\n return render(request, 'update.html', context={\n 'form': form,\n 'products': products\n })\n elif request.method == 'POST':\n form = ProductForm(data=request.POST)\n if form.is_valid():\n products.name= form.cleaned_data['name']\n products.description= form.cleaned_data['description']\n products.category= form.cleaned_data['category']\n products.count= form.cleaned_data['count']\n products.price= form.cleaned_data['price']\n products.save()\n return redirect('index')\n else:\n return render(request, 'update.html', context={\n 'form': form,\n 'products': products\n })\n\n\ndef product_delete(request, pk):\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'GET':\n return render(request, 'delete.html', {\n 'products': products\n })\n elif request.method == 'POST':\n products.delete()\n return redirect('index')\n\n\ndef product_search(request):\n query = request.GET.get('search')\n products = Product.objects.filter(name__contains=query)\n return render(request, 'index.html', context={\n 'products': products\n })\n\n\n","sub_path":"source/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514830279","text":"# CircuitPython 3.0 CRICKIT dMake It Bubble\nimport time\nfrom adafruit_crickit import crickit\n\nmotor_2 = crickit.dc_motor_2\nmotor_2.throttle = 1 # full speed forward\n\nwhile True:\n print(\"servo up\")\n crickit.servo_1.angle = 30\n time.sleep(2)\n print(\"servo down\")\n crickit.servo_1.angle = 145\n time.sleep(0.4)\n","sub_path":"Make_It_Bubble/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89037662","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^coming_soon', include('coming_soon.urls')),\n url(r'^user/', include('user_resume.urls', namespace='user_resume')),\n url(r'^users/', include('user_resume_site.urls', namespace='user_resume_site')),\n url(r'^catalog/', include('public_area_catalog.urls', namespace='public_area_catalog')),\n url(r'^', include('user_auth.urls', namespace='user_auth')),\n url(r'', include('public_area.urls', namespace='public_area')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"django_resume/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274010441","text":"from flask import flash, current_app\nfrom flask_security import login_user, current_user\nfrom flask_dance.contrib.facebook import make_facebook_blueprint, facebook\nfrom flask_dance.contrib.google import make_google_blueprint, google\nfrom flask_dance.consumer import oauth_authorized\nfrom flask_dance.consumer.storage.sqla import SQLAlchemyStorage\nfrom application.models import db, OAuth, User\n\n\ndef init_flask_dance(app):\n facebook_bp = create_facebook_bluprint(app)\n google_bp = create_google_blueprint()\n\n # register auth blueprints to the app\n app.register_blueprint(facebook_bp)\n app.register_blueprint(google_bp)\n\n\ndef create_facebook_bluprint(app):\n \"\"\"\n Create facebook auth blueprint with default config variables client id and\n secret\n\n https://flask-dance.readthedocs.io/en/latest/providers.html#module-flask_dance.contrib.facebook\n\n Default authorized redirect uri: /facebook/authorized\n (use to register in facebook login setting )\n\n \"\"\"\n\n fb_blueprint = make_facebook_blueprint(scope='email')\n\n @oauth_authorized.connect_via(fb_blueprint)\n def facebook_logged_in(bp, token):\n if not token:\n flash('Fail to login with facebook')\n return False\n\n resp = facebook.get('/me?fields=email,name,picture')\n if not resp.ok:\n flash('Fail to get user profile form facebook', category=\"error\")\n return False\n\n # example response\n '''\n {\n 'email': 'user@email.com',\n\n 'picture': {'data': {'height': 50,\n 'is_silhouette': False,\n 'url': 'https://image_url',\n 'width': 50}},\n 'id': '128371982371892'\n }\n '''\n\n user_info = resp.json()\n picture = None\n try:\n picture = user_info.get('picture').get('data').get('url')\n except:\n pass\n\n try:\n ok = _save_and_login_user(user_id=user_info.get('id'),\n user_name=user_info.get('name'),\n email=user_info.get('email'),\n picture=picture,\n token=token,\n provider_name=fb_blueprint.name)\n if ok:\n flash(\"Successfully signed in with Facebook.\")\n\n except Exception as e:\n current_app.logger.error(\n \"[Flask Dance: facebook] cannot save and login user %s\" %\n str(e))\n db.session.rollback()\n\n # prevent flask-dance trigger twice\n return False\n\n return fb_blueprint\n\n\ndef create_google_blueprint():\n \"\"\"\n Google use default config variables\n GOOGLE_OAUTH_CLIENT_ID and GOOGLE_OAUTH_CLIENT_SECRET\n https://developers.google.com/identity/protocols/oauth2/scopes#google-sign-in\n https://flask-dance.readthedocs.io/en/latest/providers.html#module-flask_dance.contrib.facebook\n Default redirect uri: /google/authorized\n \"\"\"\n\n g_blueprint = make_google_blueprint(scope=[\n 'https://www.googleapis.com/auth/userinfo.email',\n 'https://www.googleapis.com/auth/userinfo.profile', 'openid'\n ])\n\n @oauth_authorized.connect_via(g_blueprint)\n def google_logged_in(bp, token):\n if not token:\n flash('Fail to login with facebook')\n return False\n\n # resp = facebook.get('/me?fields=email,name,picture')\n resp = google.get(\"/oauth2/v2/userinfo\")\n if resp.status_code != 200:\n flash('Fail to get user profile form facebook', category='error')\n return False\n \"\"\" example response\n {\n \"id\": \"2942903842893\",\n \"email\": \"youmail@gmail.com\"\n \"name\": \"Firstname Lastname\",\n \"given_name\": \"firstname\",\n \"family_name\": \"lastname\",\n \"link\": \"https://plus.google.com/3247298347928734\",\n \"picture\": \"https://lh6.googleusercontent.com/asdkfjskfjsdkjf\",\n \"gender\": \"male\",\n \"locale\": \"en\"\n }\n\n \"\"\"\n user_info = resp.json()\n\n try:\n ok = _save_and_login_user(user_id=user_info.get('id'),\n user_name=user_info.get('name'),\n email=user_info.get('email'),\n picture=user_info.get('picture'),\n token=token,\n provider_name=g_blueprint.name)\n if ok:\n flash(\"Successfully signed in with Google.\")\n except Exception as e:\n current_app.logger.error(\n \"[Flask Dance: Google] cannot save and login user %s\" % str(e))\n db.session.rollback()\n\n return False\n\n return g_blueprint\n\n\ndef _save_and_login_user(user_id=None,\n user_name=None,\n email=None,\n picture=None,\n token=None,\n provider_name=None):\n if not email:\n flash('Fail to search user with no email', category='error')\n return False\n\n # check oauth record\n auth = OAuth.query.filter_by(provider=provider_name,\n provider_user_id=user_id).first()\n\n current_app.logger.debug(auth)\n # create auth if not existing\n if not auth:\n auth = OAuth(provider=provider_name,\n provider_user_id=user_id,\n token=token)\n\n db.session.add(auth)\n\n # check associated user\n # if not auth.user:\n user = User.query.filter_by(email=email).first()\n if not user:\n user = User(email=email, name=user_name, active=True)\n\n auth.user = user\n if picture:\n auth.user.avatar = picture\n\n db.session.commit()\n db.session.refresh(user)\n login_user(user)\n return True\n","sub_path":"flask_turbo_boost/project/application/utils/flask_dance.py","file_name":"flask_dance.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"177529170","text":"import numpy as numpy\nimport cv2\n\ncaptured_frame = cv2.VideoCapture(0)\n#print(cv2.__version__)\n\n#append all 80 types of objects detectable by coco in this list\nclass_names = list()\nwith open('coco.names', 'rt') as coco_names:\n class_names = coco_names.read().rstrip(\"\\n\").split(\"\\n\")\n#print(class_names)\n\nwhile(True):\n success, image = captured_frame.read()\n\n #cv2.imshow(\"Image\", image)\n cv2.waitKey(1)","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218762364","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport plugins.basetypes\nimport plugins.session\nimport aiohttp\n\n\"\"\" GitHub org invite endpoint for Boxer\"\"\"\n\n\nasync def process(\n server: plugins.basetypes.Server, session: plugins.session.SessionObject, indata: dict\n) -> dict:\n # Administrative locking of account\n if indata.get('lock') and session.credentials and session.credentials.admin:\n user_to_lock = indata.get('lock')\n for person in server.data.people:\n if person.asf_id == user_to_lock:\n print(f\"Unlinking GitHub login from user {user_to_lock} (locked by {session.credentials.uid})\")\n # Remove from sqlite\n person.remove(server.database.client)\n # Remove from server cache\n server.data.people.remove(person)\n return {\n \"okay\": True,\n \"message\": \"unlinked from GitHub\",\n }\n return {\n \"okay\": False,\n \"message\": \"Could not unlink - account not found in database!\",\n }\n # Unlinking personal account\n if indata.get('unlink') and session.credentials:\n for person in server.data.people:\n if person.asf_id == session.credentials.uid:\n print(f\"Unlinking GitHub login from user {person.asf_id}\")\n person.github_login = \"\"\n person.save(server.database.client)\n return {\n \"okay\": True,\n \"reauth\": True,\n \"message\": \"unlinked from GitHub\",\n }\n return {\n \"okay\": False,\n \"message\": \"Could not unlink - account not found in database!\",\n }\n if not session.credentials.github_id:\n if session.credentials.github_login:\n for person in server.data.people:\n if person.github_login == session.credentials.github_login:\n print(f\"Removing stale GitHub login from user {person.asf_id}\")\n person.github_login = \"\"\n person.save(server.database.client)\n break\n return {\n \"okay\": False,\n \"reauth\": True,\n \"message\": \"Could not invite to Org - missing numerical GitHub ID.\",\n }\n if session.credentials and session.credentials.github_login:\n invite_url = f\"https://api.github.com/orgs/{server.config.github.org}/invitations\"\n async with aiohttp.ClientSession() as httpsession:\n headers = {\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': f\"token {server.config.github.token}\",\n }\n async with httpsession.post(invite_url, headers=headers, json={\n \"invitee_id\": session.credentials.github_id,\n \"role\": \"direct_member\",\n }) as rv:\n response = await rv.json()\n if rv.status == 201:\n return {\n \"okay\": True,\n \"message\": \"Invitation sent!\",\n }\n else:\n return {\n \"okay\": False,\n \"message\": \"Could not invite to Org - already invited??\",\n }\n else:\n return {\n \"okay\": False,\n \"message\": \"You need to be authed via GitHub before we can send an invite link to you.\",\n }\n\n\ndef register(server: plugins.basetypes.Server):\n return plugins.basetypes.Endpoint(process)\n","sub_path":"server/endpoints/invite.py","file_name":"invite.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355254009","text":"from __future__ import print_function\n\nimport tensorflow as tf\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop, SGD\nimport numpy as np\nimport matplotlib.pyplot as plt\n#print('tensorflow:', tf.__version__)\n#print('keras:', keras.__version__)\n\n\n#load (first download if necessary) the MNIST dataset\n# (the dataset is stored in your home direcoty in ~/.keras/datasets/mnist.npz\n# and will take ~11MB)\n# data is already split in train and test datasets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# x_train : 60000 images of size 28x28, i.e., x_train.shape = (60000, 28, 28)\n# y_train : 60000 labels (from 0 to 9)\n# x_test : 10000 images of size 28x28, i.e., x_test.shape = (10000, 28, 28)\n# x_test : 10000 labels\n# all datasets are of type uint8\n\n\n#To input our values in our network Dense layer, we need to flatten the datasets, i.e.,\n# pass from (60000, 28, 28) to (60000, 784)\n#flatten images\nnum_pixels = x_train.shape[1] * x_train.shape[2]\nx_train = x_train.reshape(x_train.shape[0], num_pixels)\nx_test = x_test.reshape(x_test.shape[0], num_pixels)\n\n#Convert to float\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n\n#Normalize inputs from [0; 255] to [0; 1]\nx_train = x_train / 255\nx_test = x_test / 255\n\n\n#We want to have a binary classification: digit 0 is classified 1 and \n#all the other digits are classified 0\n\ny_new = np.zeros(y_train.shape)\ny_new[np.where(y_train==0.0)[0]] = 1\ny_train = y_new\n\ny_new = np.zeros(y_test.shape)\ny_new[np.where(y_test==0.0)[0]] = 1\ny_test = y_new\n\n\nnum_classes = 1\n\n\n#Let start our work: creating a neural network\n#First, we just use a single neuron. \n\n#####TO COMPLETE\n\nmodel = Sequential()\nmodel.add(Dense(1, activation='tanh', input_dim=784))\n\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='binary_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n\nhistory=model.fit(x_train, y_train,validation_split=0.25,\n epochs=100,\n batch_size=128)\n\ntestScore = model.evaluate(x_test, y_test, batch_size=128)\nprint(testScore)\n\n# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Validation'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Validation'], loc='upper left')\nplt.show()","sub_path":"Implementing a Neural Network with Keras/lab3_1.py","file_name":"lab3_1.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414017951","text":"# coding=utf-8\n# fish_common.py 单元测试\n# 2018.5.15 create by David Yi\n\nimport pytest\nimport sys\nsys.path.append('../fishbase')\nfrom fishbase.fish_common import *\n\n\n# 2018.5.14 v1.0.11 #19027 create by David Yi, 开始进行单元测试\nclass TestFishCommon(object):\n\n # 测试 conf_as_dict() 通过的 tc\n def test_config_dict_01(self):\n # 定义配置文件名\n conf_filename = './test/test_conf.ini'\n\n # 读取配置文件\n ds = conf_as_dict(conf_filename)\n d = ds[1]\n\n # 返回结果\n assert ds[0] is True\n # 返回长度\n assert ds[2] == 7\n # 某个 section 下面某个 key 的 value\n assert d['show_opt']['short_opt'] == 'b:d:v:p:f:'\n\n # 测试 conf_as_dict() 通不过的 tc\n def test_config_dict_02(self):\n # 定义配置文件名\n conf_filename = './test/test_conf1.ini'\n\n # 读取配置文件\n ds = conf_as_dict(conf_filename)\n\n # 返回结果\n assert ds[0] is False\n\n # 应该读不到返回的 dict 内容\n with pytest.raises(IndexError):\n d = ds[1]\n\n # 测试 GetMD5() 通过的 tc\n def test_md5_01(self):\n\n assert GetMD5.string('hello world!') == 'fc3ff98e8c6a0d3087d515c0473f8677'\n assert GetMD5.file('./test/test_conf.ini') == 'fb7528c9778b2377e30b0f7e4c26fef0'\n assert GetMD5.big_file('./test/test_conf.ini') == 'fb7528c9778b2377e30b0f7e4c26fef0'\n\n # 测试 GetMD5() 通不过的 tc\n def test_md5_02(self):\n\n assert GetMD5.string('hello world') != 'fc3ff98e8c6a0d3087d515c0473f8677'\n\n if sys.version > '3':\n with pytest.raises(FileNotFoundError):\n GetMD5.file('./test/test_conf1.ini')\n else:\n with pytest.raises(IOError):\n GetMD5.file('./test/test_conf1.ini')\n\n assert GetMD5.file('./test/test_conf.ini') != 'bb7528c9778b2377e30b0f7e4c26fef0'\n\n # 测试 if_json_contain() 通过和不通过 tc\n def test_json_contain_01(self):\n\n json01 = {\"id\": \"0001\"}\n json02 = {\"id\": \"0001\", \"value\": \"Desk\"}\n json10 = {\"id\": \"0001\", \"value\": \"File\"}\n json11 = {\"id\": \"0002\", \"value\": \"File\"}\n json12 = {\"id1\": \"0001\", \"value\": \"File\"}\n\n assert if_json_contain(json01, json10) is True\n assert if_json_contain(json02, json10) is False\n assert if_json_contain(json01, json11) is False\n assert if_json_contain(json01, json12) is False\n\n # 测试 splice_url_params() 通过和不通过 tc\n def test_splice_url_params_01(self):\n\n dic01 = {'key1': 'value1', 'key2': 'value2'}\n dic02 = {'key1': '1111', 'key2': 'value2'}\n\n assert splice_url_params(dic01) == '?key1=value1&key2=value2'\n assert splice_url_params(dic02) != '?key1=value1&key2=value2'\n\n # test singleton() test case\n def test_singleton_01(self):\n\n t1 = SingleTon()\n t1.x = 2\n t2 = SingleTon()\n t1.x += 1\n\n assert t2.x == t1.x\n\n t2.x = 5\n assert t1.x == 5\n","sub_path":"test/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"527859761","text":"# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. \n# The sum of these multiples is 23.\n# Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in.\n# Note: If the number is a multiple of both 3 and 5, only count it once.\n\ndef solution (num):\n my_list = list (range(1,num))\n list_of_multiples = []\n for i in my_list:\n if i%3 == 0 or i%5==0:\n list_of_multiples.append (i)\n return sum(list_of_multiples)\n \n# solution (10)","sub_path":"Multiples_of_3or5.py","file_name":"Multiples_of_3or5.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80596728","text":"from __future__ import print_function\n\nfrom imposm.parser import OSMParser\nimport optparse\nfrom pprint import pprint\nfrom elasticsearch import Elasticsearch, TransportError\nfrom datetime import datetime, date\nimport glob\nimport time\n\nclass Node(object):\n\n def __init__(self, osmid, tags, refs):\n self.osmid = osmid\n # Don't allow dots in field names\n clean_tags = [{k.replace(\".\", \":\"): v} for k, v in tags.iteritems()]\n self.tags = clean_tags\n self.location = [refs[0], refs[1]]\n self.lat = refs[1]\n self.lng = refs[0]\n\n\nclass OSMScraper(object):\n \"\"\"\n OSMScraper - Reads in a pdb file, indexes in ES\n \"\"\"\n processed = 0\n files_processed = []\n mappings = {\n 'node': {\n \"dynamic_templates\": [\n {\n \"tags_exceptions\": {\n \"path_match\": \"tags.*\",\n \"match\": \"(name.*)\",\n \"match_pattern\": \"regex\",\n \"mapping\": {\n \"store\": \"no\",\n \"type\": \"multi_field\",\n \"fields\": {\n \"{name}\": {\"type\": \"string\", \"index\": \"not_analyzed\"},\n \"analyzed\": {\"type\": \"string\", \"index\": \"analyzed\"}\n }\n }\n }\n },\n {\n \"tags_default\": {\n \"path_match\": \"tags.*\",\n \"mapping\": {\"index\": \"not_analyzed\", \"store\": \"no\"}\n }\n }\n ],\n 'properties': {\n \"location\": { \"type\": \"geo_point\" },\n \"osmid\": {\"type\": \"string\", \"index\": \"not_analyzed\"}\n }\n },\n }\n\n settings = {\n \"settings\": {},\n \"mappings\": mappings\n }\n\n # Choose from: http://wiki.openstreetmap.org/wiki/Map_Features\n categories = [\n #'aeroway',\n 'amenity',\n #'barrier',\n 'building',\n 'emergency',\n #'public_transport', # Better bus stops\n #'highway', # Includes bus stops, highway phones, rest areas\n #'landuse',\n 'military',\n #'office', # Maybe....\n 'place', #City, Country, State, Region, etc.... definitely\n 'power',\n ]\n\n def __init__(self, opts):\n self.es_client = Elasticsearch(opts.esendpoint)\n if opts.deleteindex:\n print('Deleting index \"{}\" before processing'.format(opts.index))\n self.es_client.indices.delete(index=opts.index, ignore=404)\n\n try:\n print('Recreating index \"{}\"'.format(opts.index))\n self.es_client.indices.create(index=opts.index, body=self.settings, ignore=400)\n except TransportError as te:\n print(te.info)\n\n # Set list of files we already processed so we can pick up where we left off (more or less...)\n if opts.resume:\n try:\n osmstatus = open('osm.status', 'r')\n files = osmstatus.read()\n self.files_processed = str.splitlines(files)\n print(\"Resuming from previous run, already processed these files: {}\".format(self.files_processed))\n osmstatus.close()\n except IOError as e:\n print(\"No status file exists, starting from scratch\")\n\n self.opts = opts\n\n def ways(self, ways):\n # Callback for ways\n # TODO: Update the mapping to be a geoshape? Or maybe just store the other IDs, and then we\n # can do an ES multiget to get that actual shape.... except we won't be able to search by\n # location in this case\n\n # refs - A list of longs (ids) of the nodes it comprises\n for osmid, tags, refs in ways:\n print(\"Refs: {}\".format(refs))\n print(\"Tags: {}\".format(tags))\n print(\"OSM ID: {}\".format(osmid))\n\n def nodes(self, nodes):\n for osmid, tags, refs in nodes:\n self.processed = self.processed + 1\n print(\"Processed {} records...\".format(self.processed), end='\\r')\n # If there is overlap between the tags and our categories\n if(not set(tags.keys()).isdisjoint(self.categories)):\n node = Node(osmid, tags, refs)\n #print \"Node: {}\".format(node.__dict__)\n try:\n self.es_client.index(index=self.opts.index, doc_type='node', body=node.__dict__, id=osmid)\n except TransportError as te:\n print(\"Exception indexing record: {}, info: {}\".format(node, te.info))\n\n def save_progress(self):\n with open('osm.status', 'w') as osmstatus:\n osmstatus.write(\"\\n\".join(self.files_processed))\n\n def add_file_processed(self, filename):\n self.files_processed.append(filename)\n\ndef parse(opts):\n scraper = OSMScraper(opts)\n if opts.filename is not None:\n print('Processing file {}'.format(opts.filename))\n parse_file(opts.filename, scraper)\n else:\n file_list = glob.glob(opts.directory + '*.pbf')\n print('Processing directory {}, with {} files'.format(opts.directory, len(file_list)))\n\n #Sorting is necessary to keep track of our progress in this directory\n list.sort(file_list)\n if opts.resume:\n file_list = [item for item in file_list if item not in scraper.files_processed]\n print(\"Picking up where we left off, so actually processing {} files\".format(len(file_list)))\n\n for filename in file_list:\n parse_file(filename, scraper)\n scraper.processed = 0\n\n\ndef parse_file(filename, scraper):\n \"\"\"\n parse_file\n \"\"\"\n print('Started parsing file: {}'.format(filename))\n p = OSMParser(concurrency=4, nodes_callback=scraper.nodes)\n start_time = time.clock()\n p.parse(filename)\n proc_time = time.clock()-start_time\n\n # Add file to processed list, and update the progress file\n scraper.add_file_processed(filename)\n scraper.save_progress()\n print(u\"\\n \\u2713 Processed {} records in {} seconds\".format(scraper.processed, proc_time))\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n\n parser.add_option('-f', '--file',\n action=\"store\", dest=\"filename\",\n help=\"File to parse (PBF Format)\")\n\n parser.add_option('-d', '--dir',\n action=\"store\", dest=\"directory\",\n help=\"Directory containing PBF/XML/ files\")\n\n parser.add_option('-i', '--index',\n action=\"store\", dest=\"index\",\n help=\"Elasticsearch Index\", default=\"osm\")\n\n parser.add_option('-e', '--endpoint',\n action=\"store\", dest=\"esendpoint\",\n help=\"Elasticsearch endpoint\", default=\"localhost:9200\")\n\n categories = [\n 'aeroway',\n 'amenity',\n 'barrier',\n 'building',\n 'emergency',\n 'public_transport', # Better bus stops\n #'highway', # Includes bus stops, highway phones, rest areas\n 'landuse',\n 'military',\n 'office', # Maybe....\n 'place', #City, Country, State, Region, etc.... definitely\n 'power',\n ]\n\n parser.add_option('-c', '--categories',\n action=\"store\", dest=\"categories\",\n help=\"Categories to pull from OSM\", default=categories)\n\n parser.add_option('-x', action=\"store_true\", dest=\"deleteindex\",\n help=\"If present will delete existing index\", default=False)\n\n parser.add_option('-r', action=\"store_true\", dest=\"resume\",\n help=\"Resume where the last run left off. This checks osm.status to see if it's processed each file before.\", default=False)\n\n options, args = parser.parse_args()\n\n if options.filename is not None and options.directory is not None:\n parser.error(\"Specify either filename or directory, not both\")\n\n parse(options)","sub_path":"OSM/parseosm.py","file_name":"parseosm.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"9115522","text":"from django.utils import timezone\nfrom django.utils.translation import activate\nfrom django.urls import reverse_lazy\nfrom django.test import tag\nfrom django.views.generic import DetailView\n\nfrom shared_models.views import CommonDetailView\nfrom fisheriescape.test import FactoryFloor\nfrom fisheriescape.test.common_tests import CommonFisheriescapeTest as CommonTest\nfrom .. import views\n\n# Example how to run with keyword tags\n# python manage.py test fisheriescape.test --tag species_new\n\n\nclass TestSpeciesDetailView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.SpeciesFactory()\n self.test_url = reverse_lazy('fisheriescape:species_detail', args=[self.instance.pk, ])\n self.expected_template = 'fisheriescape/species_detail.html'\n self.user = self.get_and_login_user(in_group=\"fisheriescape_admin\")\n\n @tag(\"Species\", \"species_detail\", \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.SpeciesDetailView, CommonDetailView)\n self.assert_inheritance(views.SpeciesDetailView, views.FisheriescapeAdminAccessRequired)\n\n @tag(\"Species\", \"species_detail\", \"access\")\n def test_view(self):\n self.assert_good_response(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.user)\n\n @tag(\"Species\", \"species_detail\", \"context\")\n def test_context(self):\n context_vars = [\n \"field_list\",\n ]\n self.assert_presence_of_context_vars(self.test_url, context_vars, user=self.user)\n\n @tag(\"Species\", \"species_detail\", \"correct_url\")\n def test_correct_url(self):\n # use the 'en' locale prefix to url\n self.assert_correct_url(\"fisheriescape:species_detail\", f\"/en/fisheriescape/species/{self.instance.pk}/view/\", [self.instance.pk])\n\n\nclass TestFisheryAreaDetailView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.FisheryAreaFactory()\n self.test_url = reverse_lazy('fisheriescape:fishery_area_detail', args=[self.instance.pk, ])\n self.expected_template = 'fisheriescape/fisheryarea_detail.html'\n self.user = self.get_and_login_user(in_group=\"fisheriescape_admin\")\n\n @tag(\"FisheryArea\", \"fishery_area_detail\", \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.FisheryAreaDetailView, CommonDetailView)\n self.assert_inheritance(views.FisheryAreaDetailView, views.FisheriescapeAdminAccessRequired)\n\n @tag(\"FisheryArea\", \"fishery_area_detail\", \"access\")\n def test_view(self):\n self.assert_good_response(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.user)\n\n @tag(\"FisheryArea\", \"fishery_area_detail\", \"context\")\n def test_context(self):\n context_vars = [\n \"field_list\",\n \"random_fishery\",\n \"fishery_field_list\",\n ]\n self.assert_presence_of_context_vars(self.test_url, context_vars, user=self.user)\n\n @tag(\"FisheryArea\", \"fishery_area_detail\", \"correct_url\")\n def test_correct_url(self):\n # use the 'en' locale prefix to url\n self.assert_correct_url(\"fisheriescape:fishery_area_detail\", f\"/en/fisheriescape/fisheryarea/{self.instance.pk}/view/\", [self.instance.pk])\n\n\nclass TestFisheryDetailView(CommonTest):\n def setUp(self):\n super().setUp()\n self.instance = FactoryFloor.FisheryFactory()\n self.test_url = reverse_lazy('fisheriescape:fishery_detail', args=[self.instance.pk, ])\n self.expected_template = 'fisheriescape/fishery_detail.html'\n self.user = self.get_and_login_user(in_group=\"fisheriescape_admin\")\n\n @tag(\"Fishery\", \"fishery_detail\", \"view\")\n def test_view_class(self):\n self.assert_inheritance(views.FisheryDetailView, CommonDetailView)\n self.assert_inheritance(views.FisheryDetailView, views.FisheriescapeAdminAccessRequired)\n\n @tag(\"Fishery\", \"fishery_detail\", \"access\")\n def test_view(self):\n self.assert_good_response(self.test_url)\n self.assert_non_public_view(test_url=self.test_url, expected_template=self.expected_template, user=self.user)\n\n @tag(\"Fishery\", \"fishery_detail\", \"context\")\n def test_context(self):\n context_vars = [\n \"field_list\",\n \"random_mammals\",\n \"mammals_field_list\",\n \"fishery_polygons\",\n ]\n self.assert_presence_of_context_vars(self.test_url, context_vars, user=self.user)\n\n @tag(\"Fishery\", \"fishery_detail\", \"correct_url\")\n def test_correct_url(self):\n # use the 'en' locale prefix to url\n self.assert_correct_url(\"fisheriescape:fishery_detail\", f\"/en/fisheriescape/fishery/{self.instance.pk}/view/\", [self.instance.pk])","sub_path":"fisheriescape/test/test_detail_views.py","file_name":"test_detail_views.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"324357469","text":"import os, sys\nimport subprocess, sys\nqec_playground_root_dir = subprocess.run(\"git rev-parse --show-toplevel\", cwd=os.path.dirname(os.path.abspath(__file__)), shell=True, check=True, capture_output=True).stdout.decode(sys.stdout.encoding).strip(\" \\r\\n\")\nrust_dir = os.path.join(qec_playground_root_dir, \"backend\", \"rust\")\nfault_toleran_MWPM_dir = os.path.join(qec_playground_root_dir, \"benchmark\", \"fault_tolerant_MWPM\")\nsys.path.insert(0, fault_toleran_MWPM_dir)\nfrom automated_threshold_evaluation import qec_playground_fault_tolerant_MWPM_simulator_runner_vec_command\nfrom automated_threshold_evaluation import run_qec_playground_command_get_stdout, compile_code_if_necessary\nsys.path.insert(0, os.path.join(qec_playground_root_dir, \"benchmark\", \"slurm_utilities\"))\nimport slurm_distribute\nfrom slurm_distribute import slurm_threads_or as STO\nslurm_distribute.SLURM_DISTRIBUTE_CPUS_PER_TASK = 12 # it doesn't rely on too much CPUs\nslurm_distribute.SLURM_DISTRIBUTE_TIME = \"05:20:00\"\nslurm_distribute.SLURM_DISTRIBUTE_MEM_PER_TASK = '4G'\nimport math, random, scipy.stats\nimport numpy as np\n\ndi_vec = [3, 5, 7, 9, 11, 13]\np_vec = [0.5 * (10 ** (- i / 5)) for i in range(5 * 4 + 1)]\n# print(p_vec)\nmin_error_cases = 0 # +inf\nmax_N = 0 # +inf\n\ntime_budget = 5 * 3600 # 5 hours\n# time_budget = 10 # debug\nUF_parameters = f\"-p{STO(0)} --time_budget {time_budget} --use_xzzx_code --noise_model OnlyGateErrorCircuitLevel --use_fast_benchmark --fbench_use_fake_decoder --fbench_disable_additional_error --fbench_target_dev 1e-2\".split(\" \")\n\ncompile_code_if_necessary()\n@slurm_distribute.slurm_distribute_run(os.path.dirname(__file__))\ndef experiment(slurm_commands_vec = None, run_command_get_stdout=run_qec_playground_command_get_stdout):\n results = []\n for di in di_vec:\n local_results = []\n filename = os.path.join(os.path.dirname(__file__), f\"d_{di}_{di}.txt\")\n for p in p_vec:\n p_pauli = p * 0.05\n p_erasure = p * 0.95\n command = qec_playground_fault_tolerant_MWPM_simulator_runner_vec_command([p_pauli], [di], [di], [di], UF_parameters + [\"--pes\", f\"[{p_erasure}]\"], max_N=max_N, min_error_cases=min_error_cases)\n if slurm_commands_vec is not None:\n slurm_commands_vec.sanity_checked_append(command)\n continue\n print(\" \".join(command))\n \n\n # run experiment\n stdout, returncode = run_command_get_stdout(command)\n print(\"\\n\" + stdout)\n assert returncode == 0, \"command fails...\"\n\n # full result\n full_result = stdout.strip(\" \\r\\n\").split(\"\\n\")[-1]\n lst = full_result.split(\" \")\n if lst[0] == \"format:\":\n print(\"[warning] missing data\")\n continue\n error_rate = float(lst[7])\n confidence_interval = float(lst[8])\n \n # compute effective code distance\n if 'last_data' in locals() and last_data is not None:\n p_last, error_rate_last, confidence_interval_last = last_data\n X = [math.log(p_last), math.log(p)]\n baseline_slope, _, _, _, _ = scipy.stats.linregress(X, [math.log(error_rate_last), math.log(error_rate)])\n slope_vec = []\n for random_round in range(20):\n Y = [math.log(error_rate_last) + random.gauss(0, confidence_interval_last / 1.96), math.log(error_rate) + random.gauss(0, confidence_interval / 1.96)]\n slope, intercept, _, _, _ = scipy.stats.linregress(X, Y)\n slope_vec.append(slope)\n slope_confidence_interval = 1.96 * np.std(slope_vec)\n full_result += f\" {baseline_slope} {slope_confidence_interval} {math.sqrt(p * p_last)}\"\n if p == p_vec[-1]:\n last_data = None\n else:\n last_data = (p, error_rate, confidence_interval)\n\n # record result\n print_result = f\"{p} \" + full_result\n local_results.append(print_result)\n results.append(print_result)\n print(print_result)\n\n if slurm_commands_vec is not None:\n continue\n\n print(\"\\n\\n\")\n print(\"\\n\".join(local_results))\n print(\"\\n\\n\")\n\n with open(filename, \"w\", encoding=\"utf-8\") as f:\n f.write(\"\\n\".join(local_results) + \"\\n\")\n\n results.append(\"\")\n\n if slurm_commands_vec is not None:\n return\n\n print(\"\\n\\n\")\n print(\"\\n\".join(results))\n","sub_path":"benchmark/union_find_decoder/atomic_qubit_model/fbench_fake_revised_95_5_mixed_gate_only_circuit_level/run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640020358","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nA sentence splitter wrapper for CoreNLP. This wrapper returns the untokenized sentence splitting result from CoreNLP toolkit.\nThis sentence splitter has gone through a few changes.\n(1) Danqi Chen wrote the original python wrapper for the tokenization function in CoreNLP, https://github.com/facebookresearch/DrQA/blob/master/drqa/tokenizers/corenlp_tokenizer.py\n(2) Chao Jiang modified the code to make the sentence splitter produce splitted setnences with untokenized text.\n(3) Wuwei Lan modified the code to make it works with Arabic language.\n\n\"\"\"\n\nimport copy\nimport json\nimport pexpect\n\nCORENLP_PATH = './stanford-corenlp-4.2.0/*'\n\n\"\"\"Base tokenizer/tokens classes and utilities.\"\"\"\nclass Tokens(object):\n \"\"\"A class to represent a list of tokenized text.\"\"\"\n TEXT = 0\n TEXT_WS = 1\n SPAN = 2\n POS = 3\n LEMMA = 4\n NER = 5\n\n def __init__(self, data, annotators, opts=None, output = None):\n self.data = data\n self.annotators = annotators\n self.opts = opts or {}\n if output != None:\n self.output = output\n\n def __len__(self):\n \"\"\"The number of tokens.\"\"\"\n return len(self.data)\n\n def slice(self, i=None, j=None):\n \"\"\"Return a view of the list of tokens from [i, j).\"\"\"\n new_tokens = copy.copy(self)\n new_tokens.data = self.data[i: j]\n return new_tokens\n\n def ssplit(self):\n s_list = []\n original_sentence = self.untokenize()\n dict_a = self.output\n for i in dict_a['sentences']:\n start_offset = i['tokens'][0]['characterOffsetBegin']\n end_offset = i['tokens'][-1]['characterOffsetEnd']\n# s_list.append(original_sentence[start_offset:end_offset+1].strip())\n s_list.append(original_sentence[start_offset:end_offset].strip())\n\n return s_list\n\n def untokenize(self):\n \"\"\"Returns the original text (with whitespace reinserted).\"\"\"\n return ''.join([t[self.TEXT_WS] for t in self.data]).strip()\n\n def words(self, uncased=False):\n \"\"\"Returns a list of the text of each token\n\n Args:\n uncased: lower cases text\n \"\"\"\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]\n\n def offsets(self):\n \"\"\"Returns a list of [start, end) character offsets of each token.\"\"\"\n return [t[self.SPAN] for t in self.data]\n\n def pos(self):\n \"\"\"Returns a list of part-of-speech tags of each token.\n Returns None if this annotation was not included.\n \"\"\"\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]\n\n def lemmas(self):\n \"\"\"Returns a list of the lemmatized text of each token.\n Returns None if this annotation was not included.\n \"\"\"\n if 'lemma' not in self.annotators:\n return None\n return [t[self.LEMMA] for t in self.data]\n\n def entities(self):\n \"\"\"Returns a list of named-entity-recognition tags of each token.\n Returns None if this annotation was not included.\n \"\"\"\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]\n\n def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n \"\"\"Returns a list of all ngrams from length 1 to n.\n\n Args:\n n: upper limit of ngram length\n uncased: lower cases text\n filter_fn: user function that takes in an ngram list and returns\n True or False to keep or not keep the ngram\n as_string: return the ngram as a string vs list\n \"\"\"\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(uncased)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams\n\n def entity_groups(self):\n \"\"\"Group consecutive entity tokens with the same NER tag.\"\"\"\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups\n\n\nclass Tokenizer(object):\n \"\"\"Base tokenizer class.\n Tokenizers implement tokenize, which should return a Tokens class.\n \"\"\"\n def tokenize(self, text):\n raise NotImplementedError\n\n def shutdown(self):\n pass\n\n def __del__(self):\n self.shutdown()\n\n\"\"\"Simple wrapper around the Stanford CoreNLP pipeline.\n\nServes commands to a java subprocess running the jar. Requires java 8.\n\"\"\"\nclass CoreNLPTokenizer(Tokenizer):\n\n def __init__(self, **kwargs):\n \"\"\"\n Args:\n annotators: set that can include pos, lemma, and ner.\n classpath: Path to the corenlp directory of jars\n mem: Java heap memory\n \"\"\"\n self.classpath = (CORENLP_PATH)\n self.annotators = copy.deepcopy(kwargs.get('annotators', set()))\n self.mem = kwargs.get('mem', '2g')\n self._launch()\n\n def _launch(self):\n \"\"\"Start the CoreNLP jar with pexpect.\"\"\"\n annotators = ['tokenize', 'ssplit']\n if 'ner' in self.annotators:\n annotators.extend(['pos', 'lemma', 'ner'])\n elif 'lemma' in self.annotators:\n annotators.extend(['pos', 'lemma'])\n elif 'pos' in self.annotators:\n annotators.extend(['pos'])\n annotators = ','.join(annotators)\n options = ','.join(['untokenizable=noneDelete',\n 'invertible=true'])\n # if you work on English, use this this command\n cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',\n annotators, '-tokenize.options', options,\n '-outputFormat', 'json', '-prettyPrint', 'false']\n \n # if you work on arabic, use this this command\n \n # cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n # # 'edu.stanford.nlp.pipeline.StanfordCoreNLP','-annotators',\n # 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-props', 'StanfordCoreNLP-arabic.properties','-annotators',\n # annotators, '-tokenize.options', options, #'-tokenize.whitespace', 'true',\n # '-outputFormat', 'json', '-prettyPrint', 'false']\n print(' '.join(cmd))\n\n # We use pexpect to keep the subprocess alive and feed it commands.\n # Because we don't want to get hit by the max terminal buffer size,\n # we turn off canonical input processing to have unlimited bytes.\n self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)\n self.corenlp.setecho(False)\n self.corenlp.sendline('stty -icanon')\n self.corenlp.sendline(' '.join(cmd))\n self.corenlp.delaybeforesend = 0\n self.corenlp.delayafterread = 0\n self.corenlp.expect_exact('NLP>', searchwindowsize=100)\n\n @staticmethod\n def _convert(token):\n if token == '-LRB-':\n return '('\n if token == '-RRB-':\n return ')'\n if token == '-LSB-':\n return '['\n if token == '-RSB-':\n return ']'\n if token == '-LCB-':\n return '{'\n if token == '-RCB-':\n return '}'\n return token\n\n def tokenize(self, text):\n # Since we're feeding text to the commandline, we're waiting on seeing\n # the NLP> prompt. Hacky!\n if 'NLP>' in text:\n raise RuntimeError('Bad token (NLP>) in text!')\n\n # Sending q will cause the process to quit -- manually override\n if text.lower().strip() == 'q':\n token = text.strip()\n index = text.index(token)\n data = [(token, text[index:], (index, index + 1), 'NN', 'q', 'O')]\n return Tokens(data, self.annotators)\n\n # Minor cleanup before tokenizing.\n clean_text = text.replace('\\n', ' ')\n\n self.corenlp.sendline(clean_text.encode('utf-8'))\n self.corenlp.expect_exact('NLP>', searchwindowsize=100)\n\n # Skip to start of output (may have been stderr logging messages)\n output = self.corenlp.before\n start = output.find(b'{\\r\\n \"sentences\":')\n output = json.loads(output[start:].decode('utf-8'))\n \n data = []\n tokens = [t for s in output['sentences'] for t in s['tokens']]\n# print(output)\n for i in range(len(tokens)):\n # Get whitespace\n start_ws = tokens[i]['characterOffsetBegin']\n if i + 1 < len(tokens):\n end_ws = tokens[i + 1]['characterOffsetBegin']\n else:\n end_ws = tokens[i]['characterOffsetEnd']\n\n data.append((\n self._convert(tokens[i]['word']),\n text[start_ws: end_ws],\n (tokens[i]['characterOffsetBegin'],\n tokens[i]['characterOffsetEnd']),\n tokens[i].get('pos', None),\n tokens[i].get('lemma', None),\n tokens[i].get('ner', None)\n ))\n return Tokens(data, self.annotators, output = output)\n\ndef corenlp_ssplitter(text):\n \"\"\"\n A wrapper for CoreNLP sentence splitting function. \n Input is a string, function will return a list of string.\n CoreNLP will quit if receives \"q\" or \"Q\". Therefore, returning [text] if text in ['q', 'Q']\n \"\"\"\n \n \n if text not in ['q', 'Q']:\n return tok.tokenize(text).ssplit()\n else:\n return [text]\n\ntok = CoreNLPTokenizer()\ntext = 'Tom graduated from No. 1 Middle School. He loves reading.'\nprint(text)\nprint(corenlp_ssplitter(text))\nassert \" \".join(corenlp_ssplitter(text)) == text\n","sub_path":"sentence_splitter_wrapper_for_CoreNLP_En.py","file_name":"sentence_splitter_wrapper_for_CoreNLP_En.py","file_ext":"py","file_size_in_byte":10570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526748383","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 12 11:12:25 2020\n\n@author: sebastian\n\"\"\"\n\n\nimport mmap\n\n\nheader= [170, 68, 18]\ncount = 0\nflag = 0\nlenghHead = []\nidMessage = []\nwith open(\"./data/GPS_HELIPUERTO_Dronearth_Colombia.rgi\", mode = 'rb') as data:\n with mmap.mmap(data.fileno(), 0, access=mmap.ACCESS_READ) as m:\n for i in range(0, len(m)):\n if(i == len(m)-1):\n break\n if m[i] == 170 and m[i+1] == 68 and m[i+2] == 18:\n idMessage.append(int(hex(m[i+5]).rstrip(\"L\").lstrip(\"0x\")+hex(m[i+4]).rstrip(\"L\").lstrip(\"0x\"),16))\n for j in range(i+3, len(m)):\n if m[j] == 170 and m[j+1] == 68 and m[j+2] == 18:\n break\n \n \n \n \n \n \n \n \n \n\n \n \n \n \n\n\n","sub_path":"binFiles/readRGI/readRgi.py","file_name":"readRgi.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432079475","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport random\n\nt=0\nr=3.0\nn=0\nA=[]\n\nfor x in range(10):\n for y in range(10):\n A.append([random.uniform(0,1),random.uniform(0,1)])\nA = np.array(A).transpose()\nprint(A)\n\nfig = plt.figure()\nline, = plt.plot(A[0],A[1], \"x\", color=\"blue\")\n\ndef update():\n for i in range(100):\n A[0], A[1] = r*A[0]*(1-A[0]), r*A[1]*(1-A[1])\n yield A\n\ndef draw(data):\n line.set_xdata(data[0])\n line.set_ydata(data[1])\n return line,\n\nani = animation.FuncAnimation(fig, draw, update, interval=1000, blit=True)\n\nplt.show()","sub_path":"test/templates/animation_examples/movingscatter2.py","file_name":"movingscatter2.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391437034","text":"import os\nimport pandas\nimport logging\nimport shutil\n\nfrom tools import tools\nfrom bids import BidsSession\n\nfrom definitions import Series, checkSeries, plugin_root\n\n\"\"\"\nrename_plugin defines all nessesary functions to prepare source\ndataset. In particular it identifies the correct session id and\nretrieves demographic, task and assesment data\n\"\"\"\n\n# defining logger this way will prefix plugin messages\n# with plugin name\nlogger = logging.getLogger(__name__)\n\n\n#############################\n# global bidscoin variables #\n#############################\n\n# Folder with source dataset\nrawfolder = None\n# folder with prepared dataset\npreparedfolder = None\n# switch if is a dry-run (test run)\ndry_run = False\n\n\n###########################\n# global plugin variables #\n###########################\n\n# map of individual sessions\n# key: source folde session (s01234)\n# value: bidsified session (ses-HCL)\nscans_map = {}\n\n# scale to convert ms in log-files to seconds\ntime_scale = 1e-3\n\n# subject balck-list\n# subject folders in this list will be skipped\n# by plugin\nsub_black_list = []\n\n# subject xls table columns and their renaming\nexcel_col_list = {\"Patient\": \"pat\",\n \"Sex\": \"pat_sex\",\n \"Age\": \"pat_age\",\n \"Education\": \"pat_edu\",\n 1: \"pat_1\", 2: \"pat_2\", 3: \"pat_3\",\n 'Control': \"cnt\",\n \"Sex.1\": \"cnt_sex\",\n \"Age.1\": \"cnt_age\",\n \"Education.1\": \"cnt_edu\",\n \"1.1\": \"cnt_1\", \"2.1\": \"cnt_2\", \"3.1\": \"cnt_3\"\n }\n\n\n# columns prefixes for patient and control subjects\n# 0 == False == Control\n# 1 == True == Patient\nsub_prefix = [\"cnt\", \"pat\"]\n\n# pandas dataframe with list of subjects\ndf_subjects = None\n\n\ndef InitEP(source: str, destination: str,\n dry: bool,\n subjects: str = \"\") -> int:\n \"\"\"\n Initialisation of plugin\n\n 1. Saves source/destination folders and dry_run switch\n 2. Loads subjects xls table\n\n Parameters\n ----------\n source: str\n path to source dataset\n destination:\n path to prepared dataset\n subjects: str\n path to subjects xls file, if empty is looked\n in source dataset folder\n \"\"\"\n\n global rawfolder\n global preparefolder\n global dry_run\n\n rawfolder = source\n preparefolder = destination\n dry_run = dry\n\n #########################\n # Loading subjects list #\n #########################\n if subjects:\n subject_file = subjects\n else:\n subject_file = os.path.join(plugin_root, \"Appariement.xlsx\")\n if not os.path.isfile(subject_file):\n raise FileNotFoundError(\"Subject file '{}' not found\"\n .format(subject_file))\n\n # creating dataframe for subjects\n global df_subjects\n df_subjects = pandas.read_excel(subject_file,\n sheet_name=0, header=0,\n usecols=\"A:N\"\n )\n df_subjects.rename(index=str, columns=excel_col_list, inplace=True)\n df_subjects = df_subjects[df_subjects['pat'].notnull()\n | df_subjects['cnt'].notnull()]\n\n\ndef SubjectEP(session: BidsSession) -> int:\n \"\"\"\n Subject determination and initialisation\n\n 1. Checks if subject not in balck list\n 2. Loads demographics from subject table\n 3. Creates session parcing dictionary\n\n Parameters\n ----------\n session: BidsSession\n\n Returns\n -------\n int:\n if 0, plugin succesfull\n if > 0, plugin failed, an exception will be raised\n if < 0, plugin failed, and subject will be skipped\n \"\"\"\n\n #################################\n # Skipping if in the black list #\n #################################\n if session.subject in sub_black_list:\n logger.info(\"Subject '{}' is in black_list\"\n .format(session.subject))\n return -1\n\n ################################\n # Retriving subject from table #\n ################################\n try:\n # in case if folder name in source dataset\n # cannot be converted to integer\n sub_id = int(session.subject)\n except ValueError as e:\n logger.error(\"Subject {}: Can't determine subject Id for: {}\"\n .format(session.subject, e))\n return -1\n\n # storing bidsified subject id into session object\n # optional, but useful as reference\n session.sub_values[\"participant_id\"] = \"sub-\" + session.subject\n # looking for subject in dataframe\n prefix = \"pat\"\n index = df_subjects.loc[df_subjects[prefix] == sub_id].index\n # storing participant group in session\n session.sub_values[\"group\"] = \"patient\"\n\n if len(index) == 0:\n # Subject not in patient list, looking in control\n prefix = \"cnt\"\n index = df_subjects.loc[df_subjects[prefix] == sub_id].index\n session.sub_values[\"group\"] = \"control\"\n if len(index) == 0:\n raise KeyError(\"Subject {} not found in table\"\n .format(sub_id))\n if len(index) > 1:\n logger.warning(\"Subject {}: several column entries present\"\n .format(sub_id))\n index = index[0]\n\n # retrieving demographics\n sex = df_subjects.loc[index, prefix + \"_sex\"]\n age = df_subjects.loc[index, prefix + \"_age\"]\n education = df_subjects.loc[index, prefix + \"_edu\"]\n\n # session initialised values are Null\n # fill them only if they are retrieved from table\n if pandas.notna(sex):\n session.sub_values[\"sex\"] = sex\n if pandas.notna(age):\n session.sub_values[\"age\"] = float(age)\n if pandas.notna(education):\n session.sub_values[\"education\"] = float(education)\n\n # looking for pairing\n paired = df_subjects.loc[index, sub_prefix[prefix == \"cnt\"]]\n if pandas.notna(paired):\n session.sub_values[\"paired\"] = \"sub-{:03}\".format(int(paired))\n\n #################################\n # determining order of sessions #\n #################################\n scans_map.clear()\n scans_order = sorted([os.path.basename(s) for s in\n tools.lsdirs(os.path.join(rawfolder,\n session.subject),\n \"s*\")\n ])\n # looping over session defined in columns\n for ind, s in enumerate((\"_1\", \"_2\", \"_3\")):\n v = \"ses-\" + str(df_subjects.loc[index, prefix + s]).strip()\n ses = \"ses\" + s\n if v == \"ses-nan\":\n # Session not defined in table, but existing\n # in source dataset\n session.sub_values[ses] = \"\"\n logger.warning(\"Subject {}({}): missing {} value\"\n .format(session.sub_values[\"participant_id\"],\n session.sub_values[\"group\"],\n ses)\n )\n elif v == \"ses-OUT\":\n # participant left study\n logger.warning(\"Subject {}({}): seems to be abandoned study\"\n .format(session.sub_values[\"participant_id\"],\n session.sub_values[\"group\"]\n )\n )\n return -1\n elif v not in Series:\n # invalid session name\n logger.critical(\"Subject {}({}): Invalid {}: {}\"\n .format(session.sub_values[\"participant_id\"],\n session.sub_values[\"group\"],\n ses,\n session.sub_values[ses])\n )\n raise KeyError(\"Invalid {}: {}\"\n .format(ses, v))\n else:\n # session retrieved, storing values\n session.sub_values[ses] = v\n scans_map[scans_order[ind]] = v\n\n # checking if all scans are identifyable\n # if not, additional scans will be stored\n # with original names\n for scan in scans_order:\n if scan not in scans_map:\n logger.error(\"Subject {}({}): Can't identify session {}\"\n .format(session.sub_values[\"participant_id\"],\n session.sub_values[\"group\"],\n scan))\n scans_map[scan] = scan\n\n # opional, the sub- prefix added automatically\n # if not present\n session.subject = \"sub-\" + session.subject\n\n\ndef SessionEP(session: BidsSession) -> int:\n \"\"\"\n 1. Set-up session name\n\n Parameters\n ----------\n session: BidsSession\n \"\"\"\n # Renaming session name from map\n session.session = scans_map[session.session]\n\n\ndef SessionEndEP(session: BidsSession):\n \"\"\"\n 1. Checks the series in the prepared folder\n 2. Extract KSS/VAS data from kss_dict to tsv file\n 3. Parces in-scan nBack and KSS/VAS log files\n \"\"\"\n # path contain destination folder, where\n # all data files are placed\n\n # session.getPath generates bids path based on\n # subject and session id, eg. sub-001/ses-HCL\n # if parameter empty==True, and no session,\n # the generated path will still contain 'ses-'\n # empty must be True in preparation plugin\n path = os.path.join(preparefolder,\n session.getPath(True))\n out_path = os.path.join(path,\n \"MRI\")\n\n # checking if session contains correct series\n if not dry_run:\n checkSeries(out_path,\n session.subject, session.session,\n False)\n\n ############################################\n # Retrieving in-scan task and KSS/VAS data #\n ############################################\n if session.session == \"ses-STROOP\":\n return 0\n # where tsv files are\n inp_dir = os.path.join(session.in_path, \"inp\")\n # where tsv files should be\n aux_dir = os.path.join(path, \"auxiliary\")\n if not os.path.isdir(inp_dir):\n raise NotADirectoryError(inp_dir)\n\n # do not copy if we are in dry mode\n if not dry_run:\n os.makedirs(aux_dir, exist_ok=True)\n # just copy file, in real life application\n # you may parce files\n for file in (\"FCsepNBack.tsv\", \"VAS.tsv\"):\n file = os.path.join(inp_dir, file)\n if not os.path.isfile(file):\n raise FileNotFoundError(file)\n shutil.copy2(file, aux_dir)\n\n # copiyng correspondent json files\n for file in (\"FCsepNBack.json\", \"VAS.json\"):\n file = os.path.join(plugin_root, file)\n if not os.path.isfile(file):\n raise FileNotFoundError(file)\n shutil.copy2(file, aux_dir)\n","sub_path":"example1/resources/plugins/rename_plugin.py","file_name":"rename_plugin.py","file_ext":"py","file_size_in_byte":10788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138900088","text":"class MainWindow(Frame):\n def __init__(self, master):\n super(MainWindow, self).__init__(master)\n self.grid()\n self.configure(padx = 20, pady = 20)\n self.createFoodMenuList()\n #self.create_customer_widgets()\n self.create_menu()\n self.add_menu(master)\n def createFoodMenuList(self): \n self.frame1= Frame(self)\n self.frame2= Frame(self)\n from tkinter.ttk import Treeview\n tv = Treeview(self)\n tv['columns'] = ('location', 'city', 'cutomer_count', 'total_table', 'total_waiter', 'sales')\n tv.heading(\"#0\", text='Restaurant name', anchor='w')\n tv.column(\"#0\", anchor=\"w\")\n tv.heading('location', text='Location')\n tv.column('location', anchor='center', width=100)\n tv.heading('city', text='City')\n tv.column('city', anchor='center', width=100)\n tv.heading('cutomer_count', text='Customer Count')\n tv.column('cutomer_count', anchor='center', width=100)\n tv.heading('total_table', text='Total Table')\n tv.column('total_table', anchor='center', width=100)\n tv.heading('total_waiter', text='Total Waiter')\n tv.column('total_waiter', anchor='center', width=100)\n tv.heading('sales', text='Sales')\n tv.column('sales', anchor='center', width=100)\n tv.grid(sticky = (N, S, W, E))\n self.frame1.treeview = tv\n self.frame1.grid_rowconfigure(0, weight = 1)\n self.frame1.grid_columnconfigure(0, weight = 1)\n self.frame2.grid(row = 0, column = 1, sticky = N)\n self.button = Button(self.frame2, text= \"Add\")\n self.button.grid(row = 0, column = 0)\n self.button1 = Button(self.frame2, text= \"Modify\")\n self.button1.grid(row = 1, column = 0)\n self.button2 = Button(self.frame2, text= \"Delete\")\n self.button2.grid(row = 2, column = 0)\n self.button3 = Button(self.frame2, text= \"Delete All\")\n self.button3.grid(row = 3, column = 0)\n\n def create_restaurant_widgets(self):\n self.res_labelframe = LabelFrame(self, text=\"Restaurant\", padx = 20, pady = 20)\n self.res_labelframe.grid()\n\n self.res_lbl_name = Label(self.res_labelframe, text=\"Name:\", padx = 10, pady = 10)\n self.res_lbl_name.grid(row = 0, column = 0)\n self.res_ent_name = Entry(self.res_labelframe, width = 50)\n self.res_ent_name.grid(row = 0, column = 1)\n self.res_ent_name.focus_get()\n\n self.res_lbl_location = Label(self.res_labelframe, text=\"Location:\", padx = 10, pady = 10)\n self.res_lbl_location.grid(row = 1, column = 0)\n self.res_ent_location = Entry(self.res_labelframe, width = 50)\n self.res_ent_location.grid(row = 1, column = 1)\n\n self.res_lbl_city = Label(self.res_labelframe, text=\"City:\" , padx = 10, pady = 10)\n self.res_lbl_city.grid(row = 2, column = 0)\n self.res_ent_city = Entry(self.res_labelframe, width = 50)\n self.res_ent_city.grid(row = 2, column = 1)\n\n self.res_ent_name.focus_set()\n\n\n self.add_waiters = BooleanVar()\n Checkbutton(self.res_labelframe, text = \"Add Waiters\", onvalue = 0, variable = self.add_waiters).grid(row = 3, column = 0, sticky = W, padx = 12, pady = 10)\n self.add_tables = BooleanVar()\n Checkbutton(self.res_labelframe, text = \"Add Tables\", onvalue = 0, variable = self.add_tables).grid(row = 3, column = 1, sticky = W, padx = 120, pady = 10)\n self.add_menus = BooleanVar()\n Checkbutton(self.res_labelframe, text = \"Add Menus\", variable = self.add_menus).grid(row = 3, column = 2, sticky = W, padx = 12, pady = 10)\n self.res_bttn_add = Button(self.res_labelframe, text = 'Add Restaurant', command = self.add_restaurant)\n self.res_bttn_add.grid(row = 0, column = 2, padx = 10, pady = 10)\n \n def create_customer_widgets(self):\n self.cus_labelframe = LabelFrame(self, text=\"Customer\", padx = 20, pady = 20)\n self.cus_labelframe.grid(padx = 10, pady = 0)\n \n self.cus_lbl_name = Label(self.cus_labelframe, text=\"Name:\", padx = 10, pady = 10)\n self.cus_lbl_name.grid(row = 0, column = 0)\n self.cus_ent_name = Entry(self.cus_labelframe, width = 50)\n self.cus_ent_name.grid(row = 0, column = 1)\n self.cus_bttn_add = Button(self.cus_labelframe, text = ' Add Customer ', command = self.add_customer)\n self.cus_bttn_add.grid(row = 0, column = 2, padx = 10, pady = 10)\n\n self.cus_lbl_Address = Label(self.cus_labelframe, text=\"Address:\", padx = 10, pady = 10)\n self.cus_lbl_Address.grid(row = 1, column = 0)\n self.cus_ent_Address = Entry(self.cus_labelframe, width = 50)\n self.cus_ent_Address.grid(row = 1, column = 1)\n\n self.cus_lbl_phone_no = Label(self.cus_labelframe, text=\"Phone No:\" , padx = 10, pady = 10)\n self.cus_lbl_phone_no.grid(row = 2, column = 0)\n self.cus_ent_phone_no = Entry(self.cus_labelframe, width = 50)\n self.cus_ent_phone_no.grid(row = 2, column = 1)\n\n self.cus_lbl_checkIn = Label(self.cus_labelframe, text=\"Check In To:\" , padx = 10, pady = 10)\n self.cus_lbl_checkIn.grid(row = 3, column = 0)\n from tkinter.ttk import Combobox\n self.cus_combo_restaurant = Combobox(self.cus_labelframe, values = main_obj.list_restaurant, width = 50)\n self.cus_combo_restaurant.grid(row = 3, column = 1, padx = 10, pady = 10)\n\n\n def create_menu(self):\n self.menu_labelframe = LabelFrame(self, text=\"Food Menu\", padx = 20, pady = 20)\n self.menu_labelframe.grid(padx = 10, pady = 0)\n \n self.menu_lbl_name = Label(self.menu_labelframe, text=\"Name:\", padx = 10, pady = 10)\n self.menu_lbl_name.grid(row = 0, column = 0)\n self.menu_ent_name = Entry(self.menu_labelframe, width = 50)\n self.menu_ent_name.grid(row = 0, column = 1)\n self.menu_bttn_add = Button(self.menu_labelframe, text = ' Add Food Menu ', command = self.add_created_menu)\n self.menu_bttn_add.grid(row = 0, column = 2, padx = 10, pady = 10)\n\n self.menu_lbl_price = Label(self.menu_labelframe, text=\"Price:\", padx = 10, pady = 10)\n self.menu_lbl_price.grid(row = 1, column = 0)\n self.menu_ent_price = Entry(self.menu_labelframe, width = 50)\n self.menu_ent_price.grid(row = 1, column = 1)\n\n self.menu_lbl_menutype = Label(self.menu_labelframe, text=\"Menu Type\" , padx = 10, pady = 10)\n self.menu_lbl_menutype.grid(row = 3, column = 0)\n list_menu = ['Snacks', 'Drinks', 'Main_Food']\n from tkinter.ttk import Combobox\n self.menu_combo_menu_type = Combobox(self.menu_labelframe, values = list_menu, width = 50)\n self.menu_combo_menu_type.grid(row = 3, column = 1, padx = 10, pady = 10)\n\n self.menu_lbl_restaurant = Label(self.menu_labelframe, text=\"Restaurant\" , padx = 10, pady = 10)\n self.menu_lbl_restaurant.grid(row = 4, column = 0)\n from tkinter.ttk import Combobox\n self.menu_combo_restaurant = Combobox(self.menu_labelframe, values = main_obj.list_restaurant, width = 50)\n self.menu_combo_restaurant.grid(row = 4, column = 1, padx = 10, pady = 10)\n \n def add_restaurant(self):\n name = self.res_ent_name.get()\n location = self.res_ent_location.get()\n city = self.res_ent_city.get()\n main_obj.add_restaurant(name, location, city)\n self.res_ent_name.delete(0, 'end')\n self.res_ent_location.delete(0, 'end')\n self.res_ent_city.delete(0, 'end')\n self.res_ent_name.focus_set()\n main_obj.list_restaurant.append(name)\n print('Restaurant: ', name, location, city, \"Added.\")\n self.refresh_customer()\n\n def refresh_customer(self):\n self.cus_labelframe.grid_forget()\n self.create_customer_widgets()\n self.menu_labelframe.grid_forget()\n self.create_menu()\n\n def add_customer(self):\n name = self.cus_ent_name.get()\n address = self.cus_ent_Address.get()\n phone_no = self.cus_ent_phone_no.get()\n check_in = self.cus_combo_restaurant.get()\n main_obj.add_customer(name, address, phone_no, check_in)\n self.cus_ent_name.delete(0, 'end')\n self.cus_ent_Address.delete(0, 'end')\n self.cus_ent_phone_no.delete(0, 'end')\n self.cus_ent_name.focus_set()\n print('Customer: ', name, address, phone_no, \"Added.\")\n\n def add_created_menu(self):\n item_name = self.menu_ent_name.get()\n item_price = int(self.menu_ent_price.get())\n item_type = self.menu_combo_menu_type.get()\n restaurant = self.menu_combo_restaurant.get()\n main_obj.add_menu(item_name, item_price, item_type, restaurant)\n self.menu_ent_name.delete(0, 'end')\n self.menu_ent_price.delete(0, 'end')\n self.menu_combo_menu_type.delete(0, 'end')\n self.menu_combo_restaurant.delete(0, 'end')\n main_obj.print_menu()\n \n\n\n\n def add_menu(self, master):\n # Creating a Menu Bar\n master.menuBar = Menu(root)\n master.config(menu=master.menuBar)\n p_res_add = popup_add_restaurant()\n p_cus_add = popup_add_customer()\n # Add File menu items\n fileMenu = Menu(master.menuBar, tearoff=0)\n newMenu = Menu(master.menuBar, tearoff=0)\n newMenu.add_command(label = \"Customer\", command = p_cus_add.call_popup)\n newMenu.add_command(label = \"Restaurant\", command = p_res_add.call_popup)\n fileMenu.add_cascade(label = \"New\", menu = newMenu)\n fileMenu.add_separator()\n fileMenu.add_command(label=\"Exit\", command=self._quit)\n master.menuBar.add_cascade(label=\"File\", menu=fileMenu)\n\n # Add Edit menu items\n editMenu = Menu(master.menuBar, tearoff=0)\n editMenu.add_command(label=\"Cut\")\n editMenu.add_command(label=\"Copy\")\n editMenu.add_command(label=\"Paste\")\n master.menuBar.add_cascade(label=\"Edit\", menu=editMenu)\n\n\n # Add Customer menu items\n cusMenu = Menu(master.menuBar, tearoff=0)\n cus_table = customer_table()\n cusMenu.add_command(label=\"Show\", command = cus_table.create_table)\n cusMenu.add_command(label=\"Add new\", command = p_cus_add.call_popup)\n cusMenu.add_command(label=\"Delete\")\n cusMenu.add_command(label=\"Status\")\n order = popup_order()\n cusMenu.add_command(label=\"Order\", command = order.call_popup_customer)\n master.menuBar.add_cascade(label=\"Customer\", menu=cusMenu)\n\n # Add Restaurant menu items\n resMenu = Menu(master.menuBar, tearoff=0)\n res_table = restaurant_table()\n resMenu.add_command(label=\"Show\", command = res_table.create_table)\n resMenu.add_command(label=\"Add new\", command = p_res_add.call_popup)\n resMenu.add_command(label=\"Delete\")\n resMenu.add_command(label=\"Status\")\n resMenu.add_command(label=\"Orders\")\n resMenu.add_command(label=\"Payments\")\n master.menuBar.add_cascade(label=\"Restaurant\", menu=resMenu)\n\n # Add Help menu and about item\n helpMenu = Menu(master.menuBar, tearoff=0)\n helpMenu.add_command(label=\"About\")\n master.menuBar.add_cascade(label=\"Help\", menu=helpMenu)\n\n # Exit GUI Cleanly\n def _quit(self):\n self.quit()\n self.destroy()\n exit()","sub_path":"Code/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":11322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570366142","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('resume', '0006_auto_20160204_1606'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='WorkLog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start', models.DateField()),\n ('end', models.DateField(null=True, blank=True)),\n ('company', models.CharField(max_length=200)),\n ('job', models.CharField(max_length=200)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"resume/migrations/0007_worklog.py","file_name":"0007_worklog.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407581276","text":"# Upper to lower case and vice versa\n\ninp = input(\"Enter a String\")\n\nop = ''\n\nfor i in inp:\n if ord(i) >= 97 and ord(i) <= 122:\n op += chr(ord(i) - 32)\n elif ord(i) >= 65 and ord(i) <= 90:\n op += chr(ord(i) + 32)\n else:\n op += i\n\nprint(op)","sub_path":"upperToLowerCase.py","file_name":"upperToLowerCase.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562660437","text":"# -*- coding:utf-8 -*-\nimport math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict\n\nX_train_orig,Y_train_orig,X_test_orig,Y_test_orig,classes = load_dataset()\nindex = 0\nplt.imshow(X_train_orig[index])\nprint (\"y = \" + str(np.squeeze(Y_train_orig[:, index])))\n\n# 拉直训练集和测试集\nX_train_flatten = X_train_orig.reshape(X_train_orig.shape[0],-1).T\nX_test_flatten = X_test_orig.reshape(X_test_orig.shape[0],-1).T\n# 进行归一化操作,归一化到【0-1】\nX_train = X_train_flatten/255\nX_test = X_test_flatten/255\n\n# 将训练集和测试集合转换成One-hot矩阵\nY_train = convert_to_one_hot(Y_train_orig,6)\nY_test = convert_to_one_hot(Y_test_orig,6)\n\nprint(\"X_train_orig.shape[0] = \" + str(X_train_orig.shape[0])) # 1080\nprint(\"X_test_orig.shape[0] = \" + str(X_test_orig.shape[0])) # 120\nprint(\"number of X training orginal examples = \" + str(X_train_orig.shape)) #(1080, 64, 64, 3)\nprint(\"number of Y training orginal examples = \" + str(Y_train_orig.shape)) # (1, 1080)\nprint(\"number of flatten X X_train_flatten orginal examples = \" + str(X_train_flatten.shape)) # (12288, 1080)\nprint(\"number of flatten X X_test_flatten orginal examples = \" + str(X_test_flatten.shape)) # (12288, 120)\nprint (\"number of training examples = \" + str(X_train.shape[1])) # 1080\nprint (\"number of test examples = \" + str(X_test.shape[1])) # 120\nprint (\"X_train shape: \" + str(X_train.shape)) # (12288, 120)\nprint (\"Y_train shape: \" + str(Y_train.shape)) # (6, 1080)\nprint (\"X_test shape: \" + str(X_test.shape)) # (12288, 120)\nprint (\"Y_test shape: \" + str(Y_test.shape)) # (6, 120)\n\n# create placeholder\ndef create_placeholders(n_x,n_y):\n X = tf.placeholder(shape=[n_x,None],dtype=tf.float32)\n Y = tf.placeholder(shape=[n_y,None],dtype=tf.float32)\n return X,Y\n\n# 初始化参数\ndef initialize_parameters():\n tf.set_random_seed(1) # so that your \"random\" numbers match ours\n\n ### START CODE HERE ### (approx. 6 lines of code)\n W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer())\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n parameters = initialize_parameters()\n print(\"W1 = \" + str(parameters[\"W1\"]))\n print(\"b1 = \" + str(parameters[\"b1\"]))\n print(\"W2 = \" + str(parameters[\"W2\"]))\n print(\"b2 = \" + str(parameters[\"b2\"]))\n\n# 前向运算\ndef forward_propagation(X, parameters):\n\n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n\n Z1 = tf.add(tf.matmul(W1,X),b1)\n A1 = tf.nn.relu(Z1)\n Z2 = tf.add(tf.matmul(W2,A1),b2)\n A2 = tf.nn.relu(Z2)\n Z3 = tf.add(tf.matmul(W3,A2),b3)\n\n return Z3\n\n\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n X, Y = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters)\n print(\"Z3 = \" + str(Z3))\n\ndef compute_cost(Z3,Y):\n # 转置\n logits = tf.transpose(Z3)\n labels = tf.transpose(Y)\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))\n\n return cost\n\ndef model(X_train,Y_train,X_test,Y_test,learning_rate=0.0001,\n num_epochs = 1500,minibatch_size = 32,print_cost = True):\n\n ops.reset_default_graph()\n tf.set_random_seed(1)\n seed = 3\n (n_x,m) = X_train.shape\n n_y = Y_train.shape[0]\n costs = []\n\n # Create Placeholders of shape (n_x, n_y)\n X,Y = create_placeholders(n_x,n_y)\n\n # Initialize parameters\n parameters = initialize_parameters()\n\n # Forward propagation: Build the forward propagation in the tensorflow graph\n Z3 = forward_propagation(X,parameters)\n\n # Cost function: Add cost function to tensorflow graph\n cost = compute_cost(Z3,Y)\n\n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n # Initialize all the variables\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n\n for epoch in range(num_epochs):\n epoch_cost = 0\n num_minibatches = int(m/minibatch_size) # 计算有多少格mini-batch\n seed = seed + 1\n minibatches = random_mini_batches(X_train,Y_train,minibatch_size,seed)\n\n for minibatch in minibatches:\n (minibatch_X,minibatch_Y) = minibatch\n _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n epoch_cost += minibatch_cost / num_minibatches\n\n # Print the cost every epoch\n if print_cost == True and epoch % 100 == 0:\n print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n\n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n # lets save the parameters in a variable\n parameters = sess.run(parameters)\n print(\"Parameters have been trained!\")\n\n # Calculate the correct predictions\n correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))\n\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))\n print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))\n\n return parameters\n\n\nparameters = model(X_train, Y_train, X_test, Y_test)\n","sub_path":"src/deeplearning/ai/class2/assignment3/FirstNeuralNetWorkInTensorflow.py","file_name":"FirstNeuralNetWorkInTensorflow.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326270198","text":"\"\"\" SQS component helper functions \"\"\"\nimport json\n\nimport botocore\nfrom addict import Dict\n\nfrom components.generic_helper import GenericHelper\n\n\nclass LambdaHelper(GenericHelper):\n \"\"\" Helper functions for Lambda \"\"\"\n\n @classmethod\n def metric_resource_exists(cls, metric):\n \"\"\"\n Check the resource exists before defining an alarm\n aws cloudwatch list-metrics returns metrics for resources that\n no longer exists\n \"\"\"\n region = cls.get_metric_region(metric)\n namespace = metric.Namespace\n resource_exists = True\n try:\n print(f\"Getting boto client for {namespace} in {region}\")\n client = cls.get_client_from_namespace(namespace, region)\n if client:\n function_name = cls.get_metric_dimension_value(metric, \"FunctionName\")\n if function_name:\n print(f\"Get tags for lambda function: {function_name}\")\n client.get_function(FunctionName=function_name)\n else:\n resource_exists = False\n\n except AttributeError as err:\n print(json.dumps(metric, indent=2))\n print(str(err))\n except botocore.exceptions.ClientError as err:\n print(str(err))\n resource_exists = False\n return resource_exists\n\n @classmethod\n def get_tags_for_metric_resource(cls, metric):\n \"\"\"\n Get QueueUrl from queue name and then get the tags if present\n There is some duplication of the above function it would be nice to remove\n \"\"\"\n region = cls.get_metric_region(metric)\n namespace = metric.Namespace\n tags = {}\n try:\n print(f\"Getting boto client for {namespace} in {region}\")\n client = cls.get_client_from_namespace(namespace, region)\n if client:\n function_name = cls.get_metric_dimension_value(metric, \"FunctionName\")\n if function_name:\n print(f\"Get tags for lambda function: {function_name}\")\n get_function_response = Dict(\n client.get_function(FunctionName=function_name)\n )\n lambda_arn = get_function_response.Configuration.FunctionArn\n get_tags_response = Dict(client.list_tags(Resource=lambda_arn))\n tags = get_tags_response.Tags\n\n except AttributeError as err:\n print(json.dumps(metric, indent=2))\n print(str(err))\n except botocore.exceptions.ClientError as err:\n print(str(err))\n return tags\n\n @classmethod\n def get_metric_threshold(cls, metric, rule):\n if metric.MetricName == \"Duration\":\n region = cls.get_metric_region(metric)\n # Calculate duration threshold here.\n\n # Get the lambda timeout\n namespace = metric.Namespace\n # Assign a timeout outside of the try block\n lambda_timeout = 60\n try:\n print(f\"Getting boto client for {namespace} in {region}\")\n client = cls.get_client_from_namespace(namespace, region)\n if client:\n function_name = cls.get_metric_dimension_value(\n metric, \"FunctionName\"\n )\n if function_name:\n print(f\"Get timeout for lambda function: {function_name}\")\n get_function_response = Dict(\n client.get_function(FunctionName=function_name)\n )\n lambda_timeout = get_function_response.Configuration.Timeout\n except AttributeError as err:\n print(json.dumps(metric, indent=2))\n print(str(err))\n except botocore.exceptions.ClientError as err:\n print(str(err))\n\n print(f\"Set lambda timeout to: {lambda_timeout}\")\n # 90% of max timeout seconds in milliseconds\n rule.Maximum = lambda_timeout * 1000 * 0.9\n print(f\"Maximum threshold set to {rule.Maximum}\")\n\n threshold = super().get_metric_threshold(metric, rule)\n\n return threshold\n","sub_path":"lambda/health_package/components/lambda_helper.py","file_name":"lambda_helper.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18014931","text":"#!/usr/bin/env python3\n\nfrom grads import *\nfrom pylab import *\n\nif __name__ == \"__main__\":\n\n channel = 470\n yy = 2008\n mm = 0o6\n yymm = 2008*100 + mm\n\n \n qname = {}\n qname['aod_modo'] = 'MODIS/TERRA Ocean'\n qname['aod_modl'] = 'MODIS/TERRA Land'\n qname['aod_mydo'] = 'MODIS/AQUA Ocean'\n qname['aod_mydl'] = 'MODIS/AQUA Land'\n qname['aod_deep'] = 'MODIS/AQUA Deep-Blue'\n qname['aod_misr'] = 'MISR'\n qname['aod_anet'] = 'AERONET'\n qname['aod_parl'] = 'PARASOL Land'\n qname['aod_paro'] = 'PARASOL Ocean'\n qname['aod_omi'] = 'OMI'\n\n ga = GrADS(Window=False,Echo=False)\n\n ga(\"sdfopen a0005.gritas_aod.obs.%s.nc\"%yymm)\n ga(\"sdfopen a0005.gritas_aod.omf.%s.nc\"%yymm)\n ga(\"sdfopen a0005.gritas_aod.oma.%s.nc\"%yymm)\n\n ga('set lev %d'%channel)\n ga('set gxout grfill')\n\n for q in ( 'aod_modo', 'aod_modl', 'aod_mydo', 'aod_mydl',\n 'aod_misr', 'aod_deep', 'aod_omi' ):\n\n print(\"Plotting \"+q)\n\n ga('clear')\n ga(\"d %s.1\"%q)\n ga('cbarn')\n ga('draw title %dnm OBS Log(eps+AOD) - %s [%d-%d]'%(channel,qname[q],yy,mm))\n ga('gxyat obs.%s_%dnm.%d.png'%(q,channel,yymm))\n \n ga('clear')\n ga(\"xydiff %s.2\"%q)\n ga('draw title %dnm O-F Log(eps+AOD) - %s [%d-%d]'%(channel,qname[q],yy,mm))\n ga('gxyat omf.%s_%dnm.%d.png'%(q,channel,yymm))\n ga('print omf.%s_%dnm.%d.eps'%(q,channel,yymm))\n \n ga('clear')\n ga(\"xydiff %s.3\"%q)\n ga('draw title %dnm O-A Log(eps+AOD) - %s [%d-%d]'%(channel,qname[q],yy,mm))\n ga('gxyat oma.%s_%dnm.%d.png'%(q,channel,yymm))\n\n\n\n \n \n","sub_path":"src/Components/misc/obs_aod/ML/plot_omf2.py","file_name":"plot_omf2.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109590680","text":"import wave\nimport pickle\nimport contextlib\nimport librosa\nimport numpy as np\nimport IPython.display as ipd\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom sklearn.mixture import GaussianMixture\nfrom scipy.spatial.distance import cdist\nimport webrtcvad\nimport collections\nimport copy\nimport os\nfrom IPython.display import clear_output\nfrom sklearn.cluster import SpectralClustering\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nclass Frame(object):\n\tdef __init__(self, bytes, timestamp, duration):\n\t\tself.bytes = bytes\n\t\tself.timestamp = timestamp\n\t\tself.duration = duration\n\ndef frame_generator(frame_duration_ms, audio, sample_rate):\n\tn = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n\toffset = 0\n\ttimestamp = 0.0\n\tduration = (float(n) / sample_rate) / 2.0\n\twhile offset + n < len(audio):\n\t\tyield Frame(audio[offset:offset + n], timestamp, duration)\n\t\ttimestamp += duration\n\t\toffset += n\n\ndef vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, vad, frames):\n\tnum_padding_frames = int(padding_duration_ms / frame_duration_ms)\n\tring_buffer = collections.deque(maxlen=num_padding_frames)\n\ttriggered = False\n\n\tvoiced_frames = []\n\tfor frame in frames:\n\t\tis_speech = vad.is_speech(frame.bytes, sample_rate)\n\n\tif not triggered:\n\t\tring_buffer.append((frame, is_speech))\n\t\tnum_voiced = len([f for f, speech in ring_buffer if speech])\n\t\tif num_voiced > 0.9 * ring_buffer.maxlen:\n\t\t\ttriggered = True\n\t\t\tfor f, s in ring_buffer:\n\t\t\t\tvoiced_frames.append(f)\n\t\t\tring_buffer.clear()\n\telse:\n\t\tvoiced_frames.append(frame)\n\t\tring_buffer.append((frame, is_speech))\n\t\tnum_unvoiced = len([f for f, speech in ring_buffer if not speech])\n\t\tif num_unvoiced > 0.9 * ring_buffer.maxlen:\n\t\t\ttriggered = False\n\t\t\tyield b''.join([f.bytes for f in voiced_frames])\n\t\t\tring_buffer.clear()\n\t\t\tvoiced_frames = []\n\tif voiced_frames:\n\t\tyield b''.join([f.bytes for f in voiced_frames])\n\npath = '/home/beslan/Diplom/WorkWithVideo6/Audio/'\nname = 'Audiotest6.wav'\n\n#читаем сигнал\ny, sr = librosa.load(path+name)\n#первым шагом делаем pre-emphasis: усиление высоких частот\npre_emphasis = 0.97\ny = np.append(y[0], y[1:] - pre_emphasis * y[:-1])\n\n#все что ниже фактически взято с гитхаба webrtcvad с небольшими изменениями\nvad = webrtcvad.Vad(2) # агрессивность VAD\naudio = np.int16(y/np.max(np.abs(y)) * 32767)\n\n# frames = frame_generator(10, audio, sr)\n# frames = list(frames)\n# segments = vad_collector(sr, 50, 200, vad, frames)\n\n# if not os.path.exists(path+'chunks'): os.makedirs(path+'chunks')\n# for i, segment in enumerate(segments):\n# chunk_name = path+'chunks/chunk-%003d.wav' % (i,)\n# # vad добавляет в конце небольшой кусочек тишины, который нам не нужен\n# write_wave(chunk_name, segment[0: len(segment)-int(100*sr/1000)], sr)\n","sub_path":"Code Python/Temporary/toExtractPresenterFromAudio2.py","file_name":"toExtractPresenterFromAudio2.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529013836","text":"# The German Traffic Sign Recognition Benchmark\r\n#\r\n# sample code for reading the traffic sign images and the\r\n# corresponding labels\r\n#\r\n# example:\r\n# \r\n# trainImages, trainLabels = readTrafficSigns('GTSRB/Training')\r\n# print len(trainLabels), len(trainImages)\r\n# plt.imshow(trainImages[42])\r\n# plt.show()\r\n#\r\n# have fun, Christian\r\n# YAO Matrix\r\n\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nfrom sklearn.model_selection import train_test_split\r\nimport pickle\r\n\r\ndef readTrafficSigns(rootpath):\r\n '''Reads traffic sign data for German Traffic Sign Recognition Benchmark.\r\n\r\n Arguments: path to the traffic sign data\r\n Returns: list of images, list of corresponding labels'''\r\n images = [] # images\r\n labels = [] # corresponding labels\r\n # loop over all 43 classes\r\n for c in range(0, 43):\r\n prefix = rootpath + '/' + format(c, '05d') + '/' # subdirectory for each class\r\n gtFile = open(prefix + 'GT-'+ format(c, '05d') + '.csv') # annotations file\r\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file\r\n gtReader.next()\r\n # loop over all images in current annotations file\r\n for row in gtReader:\r\n images.append(plt.imread(prefix + row[0])) # the 1th column is the filename\r\n labels.append(row[7]) # the 8th column is the label\r\n gtFile.close()\r\n return images, labels\r\n\r\ndef readTestTrafficSigns(rootpath):\r\n '''Reads test traffic sign data for German Traffic Sign Recognition Benchmark.\r\n\r\n Arguments: path to the test traffic sign data\r\n Returns: list of images, list of corresponding labels'''\r\n images = [] # images\r\n labels = [] # corresponding labels\r\n\r\n prefix = rootpath + '/'\r\n gtFile = open(prefix + 'GT-final_test.csv') # annotations file\r\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file\r\n gtReader.next()\r\n # loop over all images in current annotations file\r\n for row in gtReader:\r\n images.append(plt.imread(prefix + row[0])) # the 1th column is the filename\r\n labels.append(row[7]) # the 8th column is the label\r\n gtFile.close()\r\n \r\n return images, labels\r\n\r\nif __name__ == \"__main__\":\r\n images, labels = readTrafficSigns(\"./data/GTSRB/Final_Training/Images\")\r\n\r\n train = {}\r\n val = {}\r\n test = {}\r\n\r\n train[\"features\"], val[\"features\"], train[\"labels\"], val[\"labels\"] = train_test_split(images, labels, test_size=0.1, random_state=6)\r\n\r\n train_file = open('./data/gtsrb_train.pkl', 'wb')\r\n pickle.dump(train, train_file)\r\n\r\n val_file = open('./data/gtsrb_val.pkl', 'wb')\r\n pickle.dump(val, val_file)\r\n\r\n test[\"features\"], test[\"labels\"] = readTestTrafficSigns(\"./data/GTSRB/Final_Test/Images\")\r\n test_file = open('./data/gtsrb_test.pkl', 'wb')\r\n pickle.dump(test, test_file)\r\n\r\n print(\"Data Preparation Done\")\r\n","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556612491","text":"import random\n\nstr = \"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\"\nlistwords = str.split(\" \")\nresult = []\nfor word in listwords:\n if len(word) <= 4:\n result.append(word)\n else:\n shuffle = random.sample(word, len(word) - 2)\n result.append(word[0] + \"\".join(shuffle) + word[-1])\nresult = \" \".join(result)\nprint(result)\n","sub_path":"009/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100838227","text":"import re, math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n# NLP Pipeline\nfrom bs4 import BeautifulSoup\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Silence outdated numpy warning\nimport warnings\nwarnings.filterwarnings(action=\"ignore\", category=DeprecationWarning)\n\n\n'''\nFeatures to engineer:\n pure feature transforms:\n p/t ratio\n t/p ratio\n p+t : cmc ratio\n nlp / text analysis:\n keywordification\n n-grams (the big boi)\n n-grams by card type?\n # abilities (count ':'s?)\n cmc of abilities\n synergies (tf-df?)\n mana abilities\n difficulty to cast (mana, 'additional cost')\n card similarity\n activated vs triggered abilities \n independent / dependent transforms:\n card-card similarity, nearest neighbors\n price normalization by season\n price decomposition by format\n'''\ndef one_hot(input_df, columns):\n \"\"\"\n One-hot encode the provided list of columns and return a new copy of the data frame\n \"\"\"\n df = input_df.copy()\n\n for col in columns:\n dummies = pd.get_dummies(df[col], prefix=col)\n dummies.drop(dummies.columns[-1], axis=1, inplace=True)\n df = df.drop(col, axis=1).merge(dummies, left_index=True, right_index=True)\n\n return df\n\ndef csv_cleaner(df, y_col='price'):\n clean_df = df.copy()\n # clean_df.drop(columns=['Unnamed: 0'], inplace=True)\n clean_df.drop_duplicates(inplace=True)\n # clean_df.set_index('id', inplace=True)\n\n set_excluder = SetExclusionTransformer()\n clean_df = set_excluder.transform(clean_df)\n return clean_df.drop(columns=y_col, axis=1), clean_df[y_col]\n\ndef get_seasons(df):\n seasons = [x for x in df.columns if x.strip('s').isnumeric()]\n return seasons\n\n\nclass OneHotTransformer(BaseEstimator, TransformerMixin):\n \"\"\"\n One-hot encode features\n \"\"\"\n\n def fit(self, X, y=None):\n \"\"\"\n Store the features resulting from training features\n Accepts DataFrame\n Saves state and returns self\n \"\"\"\n df = one_hot(\n X,\n [\n # PUT FEATURES TO ONE-HOT HERE\n ],\n )\n self.train_columns = df.columns\n\n return self\n\n def transform(self, X):\n \"\"\"\n One-hot encode and ensure all features captured in training are present as well.\n Accepts DataFrame\n Returns DataFrame with addition features\n \"\"\"\n df = X.copy()\n df = one_hot(\n df,\n [\n # PUT FEATURES TO ONE-HOT HERE\n ],\n )\n\n # Remove untrained columns\n for col in self.train_columns:\n if col not in df.columns:\n df[col] = 0\n\n # Add trained on columns\n for col in df.columns:\n if col not in self.train_columns:\n df.drop(col, axis=1, inplace=True)\n\n return df[self.train_columns]\n\nclass FillnaTransformer(BaseEstimator, TransformerMixin):\n \"\"\"\n Impute NaN values\n # TODO: Parameterize so values can be imputed with -1, mean, median, or mode.\n \"\"\"\n def __init__(self, fill_value=0, is_seasonal=False):\n self.fill_value = fill_value\n self.is_seasonal = is_seasonal\n\n def _get_seasons(self, df):\n \"\"\" finds seasons in columns of df and returns list of them \"\"\"\n seasons = [x for x in df.columns if x[1:].isnumeric()] \n return seasons\n\n def fit(self, X, y=None): \n return self\n\n def transform(self, X):\n # paramaterize this with mean, median, mode, etc.\n # fill with -1\n # TODO: make this fill dynamic for all columns?\n df = X.copy()\n # only fill non-seasonal columns\n if self.is_seasonal:\n seasons = self._get_seasons(df)\n non_seasons = list(set(df.columns) - set(seasons))\n df[non_seasons] = df[non_seasons].fillna(self.fill_value, axis=1)\n else: \n df = df.fillna(self.fill_value, axis=1)\n return df\n\nclass CostIntensityTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Add engineered features to DataFrame.\"\"\"\n\n def fit(self, X, y=None):\n \"\"\"Does not save state\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Derives additional features used in the training of models.\"\"\"\n df = X.copy()\n\n def colors(row):\n try:\n intensity = len(row['mana_cost'])\n if (intensity == 3) & (row['mana_cost'][1] not in ['W', 'U', 'B', 'R', 'G']):\n return 0\n else: \n return intensity\n except:\n return 0\n # Difficulty casting\n df['mana_intensity'] = df['mana_cost'].apply(lambda x: colors(x))-2\n df['color_intensity'] = df['color_identity'].apply(lambda x: len(x)-2)\n return df\n\nclass CreatureFeatureTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Add engineered creature features to DataFrame.\"\"\"\n\n def fit(self, X, y=None):\n \"\"\"Does not save state\"\"\"\n\n return self\n\n def transform(self, X):\n \"\"\"Derives additional features used in the training of models.\"\"\"\n df = X.copy()\n\n # Creature Features\n def pt_type(row):\n if (type(row['power']) == type('str')) and (type(row['toughness']) == type('str')):\n if '*' in row['power']+row['toughness'] or row['toughness']<='0':\n return 'variable'\n return 'static'\n return 'none'\n def power_to_int(row):\n if row['pt_type']=='static': \n return int(row['power'])\n else:\n return 0\n def tough_to_int(row):\n if row['pt_type']=='static':\n return int(row['toughness'])\n else:\n return 0\n\n # Create pt_type feature, convert static pts to ints\n df['pt_type'] = df.apply(pt_type, axis=1)\n df['power'] = df.apply(power_to_int, axis=1)\n df['toughness'] = df.apply(tough_to_int, axis=1)\n\n # Only engineer creatures with static PT\n mask = df['pt_type']=='static'\n\n # ACTUAL ENGINEERING\n df['p:t'] = df[mask]['power']/df[mask]['toughness']\n df['avg_pt'] = (df[mask]['power']+df[mask]['toughness'])/2\n df['cmc:apt'] = df[mask]['cmc']/df[mask]['avg_pt']\n\n return df\n\nclass PlaneswalkerTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Add engineered creature features to DataFrame.\"\"\"\n\n def fit(self, X, y=None):\n \"\"\"Does not save state\"\"\"\n\n return self\n\n def transform(self, X):\n \"\"\"Derives additional features used in the training of models.\"\"\"\n df = X.copy()\n def loyal_type(row):\n try:\n loyal = int(row['loyalty'])\n row['loyalty']=loyal\n row['l_type']='static'\n return row\n except:\n row['loyalty']=0\n row['l_type']='variable'\n return row\n\n df = df.apply(loyal_type, axis=1)\n \n return df\n\nclass DropFeaturesTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Select features.\"\"\"\n\n # TODO: add parameterization of features for code reuse (or find a generic transformer)\n def __init__(self):\n self.features_to_drop = [\n 'cardname'\n # ,'setname'\n ,'type_line'\n ,'mana_cost'\n ,'oracle_text'\n ,'set'\n ,'colors'\n ,'color_identity'\n ,'legalities'\n ,'timestamp'\n ,'card_types'\n ,'mod_types'\n ,'sub_types'\n ,'l_type'\n ,'pt_type'\n ,'layout'\n # ,'price'\n ]\n\n def fit(self, X, y=None):\n \"\"\"Does not save state.\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"'Return DataFrame containing only the configured features.\"\"\"\n cols = list(set(self.features_to_drop) & set(X.columns))\n df = X.drop(columns=cols)\n return df\n\nclass SetExclusionTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Removes sets\"\"\"\n def __init__(self, sets_to_drop=[]):\n if sets_to_drop:\n self.sets_to_drop = sets_to_drop\n else:\n self.sets_to_drop = [\n 'Kaladesh Inventions',\n 'Zendikar Expeditions',\n 'Portal Three Kingdoms',\n 'Legends',\n 'Arabian Nights',\n 'Modern Masters',\n 'Eternal Masters',\n 'Modern Masters 2017',\n 'Modern Masters 2015',\n 'Iconic Masters',\n 'Portal Second Age',\n 'Portal',\n 'The Dark',\n 'Battle Royale Box Set',\n 'Beatdown Box Set',\n 'Starter 2000',\n 'Starter 1999',\n 'Prerelease Events',\n 'Release Events',\n 'Rivals of Ixalan',\n\n # 'Commander 2013',\n # 'Commander 2014',\n # 'Commander 2015',\n # 'Commander 2016',\n ]\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n \"\"\"Drops unnamed column & duplicates, and sets id as index\"\"\"\n df = X.copy()\n return df[df['setname'].apply(lambda x: x not in self.sets_to_drop)]\n\nclass BoolTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Changes all Falses to 0 and Trues to 1\"\"\"\n def fit(self, X, y=None):\n \"\"\"Does not save state.\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Drops unnamed column & duplicates, and sets id as index\"\"\"\n df = X.copy()\n df['reprint'] = 1*df['reprint']\n return df\n\nclass CreateDummiesTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Creates Dummies for given features\"\"\"\n def __init__(self, dummy_features=['rarity','layout','pt_type','l_type']):\n self.dummy_features = dummy_features\n\n def fit(self, X, y=None):\n \"\"\"Does not save state.\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Drops unnamed column & duplicates, and sets id as index\"\"\"\n df = X.copy()\n df = pd.get_dummies(df, columns=self.dummy_features, prefix=self.dummy_features)\n return df\n\nclass TestFillTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Fills unmatched columns in X_test with -1\"\"\"\n def __init__(self, fill_value=-1):\n self.fill_value = fill_value\n\n def fit(self, X, y=None):\n self.train_columns_ = set(X.columns)\n return self\n\n def transform(self, X):\n \"\"\"Enlarges test columns to equal train size\"\"\"\n df = X.copy()\n # Fill columns in train but not test with fill value\n missing = self.train_columns_ - set(X.columns)\n if missing:\n for column in missing:\n df[column] = self.fill_value\n # drop columns in test but not in train\n df = df[list(self.train_columns_)]\n return df\n\nclass TypelineTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Creates Dummies for typeline\"\"\"\n def __init__(self):\n self.card_types = set(['Creature','Land','Instant','Sorcery','Enchantment','Artifact','Planeswalker'])\n self.sub_types = set()\n self.mod_types = set()\n\n def fit(self, X, y=None):\n \"\"\"identifies all subtypes\"\"\"\n # Cleave split cards and transforms\n cards = [x.split('//') for x in X['type_line'].unique()]\n for card in cards:\n for subcard in card:\n types = subcard.split(' — ')\n self.mod_types.update(set(types[0].split()) - self.card_types)\n try:\n self.sub_types.update(set(types[1].split()))\n except:\n pass\n \n return self\n\n def transform(self, X):\n \"\"\"Creates type dummies for main types\"\"\"\n df = X.copy()\n\n def type_sets(row):\n card_types = set()\n sub_types = set()\n mod_types = set()\n card = row['type_line'].split('//')\n for subcard in card:\n types = subcard.split(' — ')\n card_types.update(set(types[0].split()) & self.card_types)\n mod_types.update(set(types[0].split()) - self.card_types)\n try:\n sub_types.update(set(types[1].split()))\n except:\n pass\n row['card_types'] = card_types\n row['mod_types'] = mod_types\n row['sub_types'] = sub_types\n return row\n\n def type_dummies(row):\n for card_type in self.card_types:\n row[card_type] = 1*(card_type in row['card_types'])\n for mod_type in self.mod_types:\n row[mod_type] = 1*(mod_type in row['mod_types'])\n return row\n\n df = df.apply(type_sets, axis=1)\n # Dummify card type membership, type_mod membership\n df = df.apply(type_dummies, axis=1)\n\n return df\n\nclass ColorIDTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Creates Dummies for color identity\"\"\"\n def __init__(self):\n self.colors = set(['W','U','B','R','G'])\n\n def fit(self, X, y=None):\n \"\"\"No saved state\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Dummifies color identity for each card\"\"\"\n df = X.copy()\n\n def color_dummies(row):\n for color in self.colors:\n row[color] = 1*(color in row['color_identity'])\n return row\n\n # Dummify color identity membership\n df = df.apply(color_dummies, axis=1)\n\n return df\n\nclass AbilityCountsTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Creates counts for various ability types\"\"\"\n\n def fit(self, X, y=None):\n \"\"\"No saved state\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Reads text and counts ability blocks, activated, and triggered abilities\"\"\"\n df = X.copy()\n\n def count_abilities(row):\n txt = row['oracle_text']\n if pd.isnull(txt):\n txt = ''\n row['ability_sects'] = len(txt.split('\\r\\r\\n'))\n else:\n row['ability_sects'] = len(txt.split('\\r\\r\\n'))\n\n # Count ability blocks\n row['ability_sects'] = len(txt.split('\\r\\r\\n'))\n # Count activated\n row['activated'] = txt.count(':') \n # Count triggered\n row['triggered'] = len(re.findall('When|At|As',txt))\n # Count mana abilities\n row['mana_abilities'] = len(re.findall('Add|add', txt))\n \n return row\n\n # Dummify color identity membership\n df = df.apply(count_abilities, axis=1)\n\n return df\n\nclass StandardSeasonTransformer(BaseEstimator, TransformerMixin):\n \"\"\" Add features to season matrix \"\"\"\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n \"\"\" X has 'set_count' and 'season' as column \"\"\"\n Xt = X.copy()\n Xt['set_count_sq'] = np.square(Xt['set_count'])\n Xt['set_count_rt'] = np.sqrt(Xt['set_count'])\n Xt['sin_set'] = np.sin(Xt['set_count'])\n\n return Xt\n\nclass PriceToPowerTransformer(BaseEstimator, TransformerMixin):\n \"\"\" Transforms price to power by scaling according to rarity, based on observed trends \"\"\"\n def __init__(self, rarity_baseline={'mythic':10,'rare':1.5,'uncommon':0.5,'common':0.2}):\n self.rarity_baseline = rarity_baseline\n\n def fit(self, X, y=None):\n \"\"\" calculates average price by rarity of cards, averages over seasons, loads into attribute. Requires 'rarity' column \"\"\"\n self.rarity_scaler_ = self.rarity_baseline.copy()\n seasonal_prices_df = X.copy()\n seasons = get_seasons(seasonal_prices_df)\n for rarity in self.rarity_baseline.keys():\n rare_mask = seasonal_prices_df['rarity']==rarity\n if rare_mask.sum():\n rare_mean = seasonal_prices_df[rare_mask][seasons].mean()\n for avg_price in rare_mean:\n if not pd.isnull(avg_price):\n self.rarity_scaler_[rarity] = (self.rarity_scaler_[rarity]*3 + avg_price)/4 \n return self\n\n def transform(self, X, y_price):\n \"\"\" transforms price in dollars into unitless power metric (pegged to avg mythic price) \"\"\"\n rarity_df = X.copy()\n y_power = np.ones(y_price.shape)\n\n for rarity in self.rarity_baseline.keys():\n rare_mask = rarity_df['rarity']==rarity\n if rare_mask.sum():\n # Scales to mean mythic rare price\n ratio = self.rarity_scaler_['mythic']/self.rarity_scaler_[rarity]\n y_power[rare_mask] = y_price[rare_mask]*ratio\n\n return y_power\n\n def inverse_transform(self, X, y_power):\n \"\"\" transforms power back into price \"\"\"\n rarity_df = X.copy()\n y_price = np.ones(y_power.shape)\n\n for rarity in self.rarity_baseline.keys():\n rare_mask = rarity_df['rarity']==rarity\n if rare_mask.sum():\n ratio = self.rarity_scaler_['mythic']/self.rarity_scaler_[rarity]\n y_price[rare_mask] = y_power[rare_mask]/ratio\n\n return y_price\n\n\n # TODO optimize efficiency by combining code from fit and transform\n def fit_transform(self, X, y_price):\n \"\"\" Performs fit and transform in one step, returning transformed price to power \"\"\"\n self.fit(X)\n y_power = self.transform(X, y_price)\n\n return y_power\n\nclass StandardPriceTransformer(BaseEstimator, TransformerMixin):\n \"\"\" Performs standard price masking on input df \"\"\"\n def __init__(self, std_sets_df):\n \"\"\" std_sets_df is a dataframe with sets as the indices and seasons as the colums \"\"\"\n self.std_sets_df = std_sets_df\n self.seasons_ = std_sets_df.columns\n \n def fit(self, X, y=None):\n return self\n\n def _standard_mask(self, row):\n for season in self.seasons_:\n row[season] = row[season]*self.std_sets_df.loc[row['setname']][season]\n return row\n\n def transform(self, X, y=None):\n \"\"\" X is seasonal prices, to be filtered for standard only \"\"\"\n seasonal_prices_df = X.copy()\n return seasonal_prices_df.apply(self._standard_mask, axis=1)","sub_path":"model/.ipynb_checkpoints/master_transmuter-checkpoint.py","file_name":"master_transmuter-checkpoint.py","file_ext":"py","file_size_in_byte":18694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362761347","text":"from rest_framework import viewsets\nfrom ..models.ticket import Ticket, STATES\nfrom ..serializers import TicketSerializer, TicketIdSerializer\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.decorators import action\nfrom django_fsm import TransitionNotAllowed\nfrom rest_framework import mixins\nfrom django.utils import timezone\nfrom ..filters.ticket import TicketFilter\nfrom ..helpers import CanViewStatistic\nfrom django.conf import settings\nfrom pytz import timezone as tz\n\n\nclass TicketViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n queryset = Ticket.objects.all()\n serializer_class = TicketSerializer\n permission_classes = (IsAuthenticated,)\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n create and return created ticket\n \"\"\"\n user = request.user\n serializer = TicketSerializer(data=request.data)\n if serializer.is_valid():\n ticket = Ticket(\n title=serializer.validated_data['title'],\n description=serializer.validated_data['description'],\n author=user,\n )\n ticket.save()\n serializer = TicketSerializer(ticket)\n return Response(serializer.data, status=201)\n else:\n return Response('Not created', status=502)\n\n def retrieve(self, request, pk=None, **kwargs):\n \"\"\"\n return ticket by pk\n \"\"\"\n user = request.user\n if user.is_superuser:\n queryset = self.queryset\n else:\n queryset = self.queryset.filter(author=user)\n ticket = get_object_or_404(queryset, pk=pk)\n serializer = TicketSerializer(ticket)\n return Response(serializer.data)\n\n\nclass TicketChangeStateViewSet(viewsets.GenericViewSet):\n queryset = Ticket.objects.all()\n serializer_class = TicketIdSerializer\n permission_classes = (IsAuthenticated,)\n\n @action(methods=['post'], detail=True)\n def process(self, request, pk=None, **kwargs):\n \"\"\"\n set proceed ticket by pk\n \"\"\"\n user = request.user\n queryset = self.queryset.filter(actor=user)\n ticket = get_object_or_404(queryset, pk=pk)\n try:\n ticket.processed()\n except TransitionNotAllowed:\n return Response('Not allowed now. Possible you are already have a ticket in process.', status=502)\n ticket.save()\n serializer = TicketIdSerializer(ticket)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def cancel(self, request, pk=None, **kwargs):\n \"\"\"\n set cancel ticket by pk\n \"\"\"\n user = request.user\n queryset = self.queryset.filter(actor=user)\n ticket = get_object_or_404(queryset, pk=pk)\n try:\n ticket.canceled()\n except TransitionNotAllowed:\n return Response('Method not alowed', status=502)\n ticket.save()\n serializer = TicketIdSerializer(ticket)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def done(self, request, pk=None, **kwargs):\n \"\"\"\n set done ticket by pk\n \"\"\"\n user = request.user\n queryset = self.queryset.filter(actor=user)\n ticket = get_object_or_404(queryset, pk=pk)\n try:\n ticket.done()\n except TransitionNotAllowed:\n return Response('Method not alowed', status=502)\n ticket.save()\n serializer = TicketIdSerializer(ticket)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def reopen(self, request, pk=None, **kwargs):\n \"\"\"\n set reopen ticket by pk\n \"\"\"\n user = request.user\n queryset = self.queryset.filter(author=user)\n ticket = get_object_or_404(queryset, pk=pk)\n try:\n ticket.reopen()\n except TransitionNotAllowed:\n return Response('Method not alowed', status=502)\n ticket.save()\n serializer = TicketIdSerializer(ticket)\n return Response(serializer.data)\n\n @action(methods=['post'], detail=True)\n def close(self, request, pk=None, **kwargs):\n \"\"\"\n set reopen ticket by pk\n \"\"\"\n user = request.user\n queryset = self.queryset.filter(author=user)\n ticket = get_object_or_404(queryset, pk=pk)\n try:\n ticket.closed()\n except TransitionNotAllowed:\n return Response('Method not alowed', status=502)\n ticket.save()\n serializer = TicketIdSerializer(ticket)\n return Response(serializer.data)\n\n\nclass TicketStatisticAPIView(mixins.ListModelMixin, viewsets.GenericViewSet):\n queryset = Ticket.objects.all()\n serializer_class = TicketSerializer\n permission_classes = (IsAuthenticated, CanViewStatistic)\n filter_class = TicketFilter\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Get statistic info about tickets count per ACTOR (if given) and per UPDATED_AT date range (if given).\n Data divided per ticket.state\n \"\"\"\n actor = request.GET.get('actor', None)\n\n date_from = request.GET.get('date_from', None)\n date_from = timezone.datetime.strptime(date_from, '%d.%m.%Y')if date_from \\\n else timezone.datetime(day=1, month=1, year=2018)\n date_from = date_from.replace(tzinfo=tz(settings.TIME_ZONE))\n\n date_to = request.GET.get('date_to', None)\n print('r',date_to)\n date_to = timezone.datetime.strptime(date_to, '%d.%m.%Y') if date_to else timezone.now()\n date_to = date_to.replace(tzinfo=tz(settings.TIME_ZONE))\n print('l',date_to)\n print('qs', self.queryset)\n print('l', date_to)\n xtask = self.queryset.first()\n print(xtask.updated_at, date_from, date_to)\n print('con1', xtask.updated_at > date_from)\n print('con2', xtask.updated_at < date_to)\n print('con3', date_to > xtask.updated_at > date_from)\n qs = self.queryset.filter(updated_at__range=(date_from, date_to))\n if actor:\n qs = self.queryset.filter(actor=actor)\n\n if qs.count() == 0:\n return Response('No data for given params (id: {}, date_from: {}, date_to: {})'.\n format(actor, date_from.strftime('%d.%m.%Y'), date_to.strftime('%d.%m.%Y')))\n\n tickets_count = {}\n\n for state, state_title in STATES:\n tickets_count.update(\n {state: {\n 'title': state_title,\n 'count': qs.filter(state=state).count()\n }\n })\n\n data = {\n 'actor': actor if actor else 'all',\n 'from date': date_from.strftime('%d.%m.%Y'),\n 'to date': date_to.strftime('%d.%m.%Y'),\n 'tickets': tickets_count\n }\n # print(data)\n return Response(data)\n","sub_path":"iticket/ticket/views/ticket.py","file_name":"ticket.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156206796","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n hash = {}\n n = len(nums)\n for i in range(n):\n # 记下值和index\n hash[nums[i]] = i\n for i in range(n):\n j = hash.get(target - nums[i])\n if j is not None and j != i:\n return [i, j]","sub_path":"two_Sum.py","file_name":"two_Sum.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640189155","text":"# Princeton University licenses this file to You under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License. You may obtain a copy of the License at:\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and limitations under the License.\n\n\n# ********************************************* LLVM bindings **************************************************************\n\nimport atexit\nimport ctypes\nimport inspect\nimport numpy as np\nimport os, re\n\nfrom psyneulink.core.scheduling.time import TimeScale\nfrom psyneulink.core.globals.keywords import AFTER, BEFORE\n\nfrom psyneulink.core import llvm as pnlvm\nfrom .debug import debug_env\nfrom .helpers import ConditionGenerator\nfrom llvmlite import ir\n\n__all__ = ['LLVMBuilderContext', '_modules', '_find_llvm_function', '_convert_llvm_ir_to_ctype']\n\n_modules = set()\n_all_modules = set()\n_struct_count = 0\n\n@atexit.register\ndef module_count():\n if \"stat\" in debug_env:\n print(\"Total LLVM modules: \", len(_all_modules))\n print(\"Total structures generated: \", _struct_count)\n\n# TODO: Should this be selectable?\n_int32_ty = ir.IntType(32)\n_float_ty = ir.DoubleType()\n\nclass LLVMBuilderContext:\n uniq_counter = 0\n _llvm_generation = 0\n\n def __init__(self):\n self.int32_ty = _int32_ty\n self.float_ty = _float_ty\n self.module = None\n\n def __enter__(self):\n assert self.module is None\n self.module = ir.Module(name=\"PsyNeuLinkModule-\" + str(LLVMBuilderContext._llvm_generation))\n LLVMBuilderContext._llvm_generation += 1\n return self\n\n def __exit__(self, e_type, e_value, e_traceback):\n assert self.module is not None\n _modules.add(self.module)\n _all_modules.add(self.module)\n LLVMBuilderContext.module = None\n\n\n def get_unique_name(self, name):\n LLVMBuilderContext.uniq_counter += 1\n name = re.sub(r\"[- ()\\[\\]]\", \"_\", name)\n return name + '_' + str(LLVMBuilderContext.uniq_counter)\n\n def get_builtin(self, name, args, function_type = None):\n if name in ('pow', 'log', 'exp'):\n return self.get_llvm_function(\"__pnl_builtin_\" + name)\n return self.module.declare_intrinsic(\"llvm.\" + name, args, function_type)\n def get_llvm_function(self, name):\n try:\n f = name._llvm_function\n except AttributeError:\n f = _find_llvm_function(name, _all_modules | {self.module})\n # Add declaration to the current module\n if f.name not in self.module.globals:\n decl_f = ir.Function(self.module, f.type.pointee, f.name)\n assert decl_f.is_declaration\n return decl_f\n return f\n\n @staticmethod\n def get_debug_location(func, component):\n if \"debug_info\" not in debug_env:\n return\n\n mod = func.module\n path = inspect.getfile(component.__class__) if component is not None else \"\"\n d_version = mod.add_metadata([ir.IntType(32)(2), \"Dwarf Version\", ir.IntType(32)(4)])\n di_version = mod.add_metadata([ir.IntType(32)(2), \"Debug Info Version\", ir.IntType(32)(3)])\n flags = mod.add_named_metadata(\"llvm.module.flags\")\n if len(flags.operands) == 0:\n flags.add(d_version)\n flags.add(di_version)\n cu = mod.add_named_metadata(\"llvm.dbg.cu\")\n di_file = mod.add_debug_info(\"DIFile\", {\n \"filename\": os.path.basename(path),\n \"directory\": os.path.dirname(path),\n })\n di_func_type = mod.add_debug_info(\"DISubroutineType\", {\n # None as `null`\n \"types\": mod.add_metadata([None]),\n })\n di_compileunit = mod.add_debug_info(\"DICompileUnit\", {\n \"language\": ir.DIToken(\"DW_LANG_Python\"),\n \"file\": di_file,\n \"producer\": \"PsyNeuLink\",\n \"runtimeVersion\": 0,\n \"isOptimized\": False,\n }, is_distinct=True)\n cu.add(di_compileunit)\n di_func = mod.add_debug_info(\"DISubprogram\", {\n \"name\": func.name,\n \"file\": di_file,\n \"line\": 0,\n \"type\": di_func_type,\n \"isLocal\": False,\n \"unit\": di_compileunit,\n}, is_distinct=True)\n di_loc = mod.add_debug_info(\"DILocation\", {\n \"line\": 0,\n \"column\": 0,\n \"scope\": di_func,\n })\n return di_loc\n\n def get_input_struct_type(self, component):\n if hasattr(component, '_get_input_struct_type'):\n return component._get_input_struct_type(self)\n\n default_var = component.defaults.variable\n return self.convert_python_struct_to_llvm_ir(default_var)\n\n def get_output_struct_type(self, component):\n if hasattr(component, '_get_output_struct_type'):\n return component._get_output_struct_type(self)\n\n default_val = component.defaults.value\n return self.convert_python_struct_to_llvm_ir(default_val)\n\n def get_param_struct_type(self, component):\n if hasattr(component, '_get_param_struct_type'):\n return component._get_param_struct_type(self)\n\n params = component._get_param_values()\n return self.convert_python_struct_to_llvm_ir(params)\n\n def get_context_struct_type(self, component):\n if hasattr(component, '_get_context_struct_type'):\n return component._get_context_struct_type(self)\n\n try:\n stateful = tuple(getattr(component, sa) for sa in component.stateful_attributes)\n return self.convert_python_struct_to_llvm_ir(stateful)\n except AttributeError:\n return ir.LiteralStructType([])\n\n def get_data_struct_type(self, component):\n if hasattr(component, '_get_data_struct_type'):\n return component._get_data_struct_type(self)\n\n return ir.LiteralStructType([])\n\n def get_param_ptr(self, component, builder, params_ptr, param_name):\n idx = self.int32_ty(component._get_param_ids().index(param_name))\n return builder.gep(params_ptr, [self.int32_ty(0), idx])\n\n def get_state_ptr(self, component, builder, state_ptr, state_name):\n idx = self.int32_ty(component.stateful_attributes.index(state_name))\n return builder.gep(state_ptr, [self.int32_ty(0), idx])\n\n def unwrap_2d_array(self, builder, element):\n if isinstance(element.type.pointee, ir.ArrayType) and isinstance(element.type.pointee.element, ir.ArrayType):\n assert element.type.pointee.count == 1\n return builder.gep(element, [self.int32_ty(0), self.int32_ty(0)])\n return element\n\n def gen_composition_exec(self, composition, simulation=False):\n # Create condition generator\n cond_gen = ConditionGenerator(self, composition)\n\n name = 'exec_wrap_sim_' if simulation else 'exec_wrap_'\n func_name = self.get_unique_name(name + composition.name)\n func_ty = ir.FunctionType(ir.VoidType(), (\n self.get_context_struct_type(composition).as_pointer(),\n self.get_param_struct_type(composition).as_pointer(),\n self.get_input_struct_type(composition).as_pointer(),\n self.get_data_struct_type(composition).as_pointer(),\n cond_gen.get_condition_struct_type().as_pointer()))\n llvm_func = ir.Function(self.module, func_ty, name=func_name)\n llvm_func.attributes.add('argmemonly')\n context, params, comp_in, data_arg, cond = llvm_func.args\n for a in llvm_func.args:\n a.attributes.add('nonnull')\n a.attributes.add('noalias')\n\n # Create entry block\n entry_block = llvm_func.append_basic_block(name=\"entry\")\n builder = ir.IRBuilder(entry_block)\n builder.debug_metadata = self.get_debug_location(llvm_func, composition)\n\n if \"const_params\" in debug_env:\n const_params = params.type.pointee(composition._get_param_initializer(None))\n params = builder.alloca(const_params.type)\n builder.store(const_params, params)\n\n if \"alloca_data\" in debug_env:\n data = builder.alloca(data_arg.type.pointee)\n data_vals = builder.load(data_arg)\n builder.store(data_vals, data)\n else:\n data = data_arg\n\n # Call input CIM\n input_cim_w = composition._get_node_wrapper(composition.input_CIM, simulation)\n input_cim_f = self.get_llvm_function(input_cim_w)\n builder.call(input_cim_f, [context, params, comp_in, data, data])\n\n if simulation is False and composition.enable_controller and \\\n composition.controller_mode == BEFORE:\n assert composition.controller is not None\n controller = composition._get_node_wrapper(composition.controller, simulation)\n controller_f = self.get_llvm_function(controller)\n builder.call(controller_f, [context, params, comp_in, data, data])\n\n # Allocate run set structure\n run_set_type = ir.ArrayType(ir.IntType(1), len(composition.nodes))\n run_set_ptr = builder.alloca(run_set_type, name=\"run_set\")\n\n # Allocate temporary output storage\n output_storage = builder.alloca(data.type.pointee, name=\"output_storage\")\n\n iter_ptr = builder.alloca(self.int32_ty, name=\"iter_counter\")\n builder.store(self.int32_ty(0), iter_ptr)\n\n loop_condition = builder.append_basic_block(name=\"scheduling_loop_condition\")\n builder.branch(loop_condition)\n\n # Generate a while not 'end condition' loop\n builder.position_at_end(loop_condition)\n run_cond = cond_gen.generate_sched_condition(builder,\n composition.termination_processing[TimeScale.TRIAL],\n cond, None)\n run_cond = builder.not_(run_cond, name=\"not_run_cond\")\n\n loop_body = builder.append_basic_block(name=\"scheduling_loop_body\")\n exit_block = builder.append_basic_block(name=\"exit\")\n builder.cbranch(run_cond, loop_body, exit_block)\n\n\n # Generate loop body\n builder.position_at_end(loop_body)\n\n zero = self.int32_ty(0)\n any_cond = ir.IntType(1)(0)\n\n # Calculate execution set before running the mechanisms\n for idx, mech in enumerate(composition.nodes):\n run_set_mech_ptr = builder.gep(run_set_ptr,\n [zero, self.int32_ty(idx)],\n name=\"run_cond_ptr_\" + mech.name)\n mech_cond = cond_gen.generate_sched_condition(builder,\n composition._get_processing_condition_set(mech),\n cond, mech)\n ran = cond_gen.generate_ran_this_pass(builder, cond, mech)\n mech_cond = builder.and_(mech_cond, builder.not_(ran),\n name=\"run_cond_\" + mech.name)\n any_cond = builder.or_(any_cond, mech_cond, name=\"any_ran_cond\")\n builder.store(mech_cond, run_set_mech_ptr)\n\n for idx, mech in enumerate(composition.nodes):\n run_set_mech_ptr = builder.gep(run_set_ptr, [zero, self.int32_ty(idx)])\n mech_cond = builder.load(run_set_mech_ptr, name=\"mech_\" + mech.name + \"_should_run\")\n with builder.if_then(mech_cond):\n mech_w = composition._get_node_wrapper(mech, simulation);\n mech_f = self.get_llvm_function(mech_w)\n # Wrappers do proper indexing of all strctures\n if len(mech_f.args) == 5: # Mechanism wrappers have 5 inputs\n builder.call(mech_f, [context, params, comp_in, data, output_storage])\n else:\n builder.call(mech_f, [context, params, comp_in, data, output_storage, cond])\n\n cond_gen.generate_update_after_run(builder, cond, mech)\n\n # Writeback results\n for idx, mech in enumerate(composition.nodes):\n run_set_mech_ptr = builder.gep(run_set_ptr, [zero, self.int32_ty(idx)])\n mech_cond = builder.load(run_set_mech_ptr, name=\"mech_\" + mech.name + \"_ran\")\n with builder.if_then(mech_cond):\n out_ptr = builder.gep(output_storage, [zero, zero, self.int32_ty(idx)], name=\"result_ptr_\" + mech.name)\n data_ptr = builder.gep(data, [zero, zero, self.int32_ty(idx)],\n name=\"data_result_\" + mech.name)\n builder.store(builder.load(out_ptr), data_ptr)\n\n # Update step counter\n with builder.if_then(any_cond):\n cond_gen.bump_ts(builder, cond)\n\n # Increment number of iterations\n iters = builder.load(iter_ptr, name=\"iterw\")\n iters = builder.add(iters, self.int32_ty(1), name=\"iterw_inc\")\n builder.store(iters, iter_ptr)\n\n max_iters = len(composition.scheduler_processing.consideration_queue)\n completed_pass = builder.icmp_unsigned(\"==\", iters,\n self.int32_ty(max_iters),\n name=\"completed_pass\")\n # Increment pass and reset time step\n with builder.if_then(completed_pass):\n builder.store(zero, iter_ptr)\n # Bumping automatically zeros lower elements\n cond_gen.bump_ts(builder, cond, (0, 1, 0))\n\n builder.branch(loop_condition)\n\n builder.position_at_end(exit_block)\n\n if simulation is False and composition.enable_controller and \\\n composition.controller_mode == AFTER:\n assert composition.controller is not None\n controller = composition._get_node_wrapper(composition.controller, simulation)\n controller_f = self.get_llvm_function(controller)\n builder.call(controller_f, [context, params, comp_in, data, data])\n\n # Call output CIM\n output_cim_w = composition._get_node_wrapper(composition.output_CIM, simulation);\n output_cim_f = self.get_llvm_function(output_cim_w)\n builder.call(output_cim_f, [context, params, comp_in, data, data])\n\n if \"alloca_data\" in debug_env:\n data_vals = builder.load(data)\n builder.store(data_vals, data_arg)\n\n # Bump run counter\n cond_gen.bump_ts(builder, cond, (1, 0, 0))\n\n builder.ret_void()\n\n return llvm_func\n\n def gen_composition_run(self, composition, simulation=False):\n name = 'run_wrap_sim_' if simulation else 'run_wrap_'\n func_name = self.get_unique_name(name + composition.name)\n func_ty = ir.FunctionType(ir.VoidType(), (\n self.get_context_struct_type(composition).as_pointer(),\n self.get_param_struct_type(composition).as_pointer(),\n self.get_data_struct_type(composition).as_pointer(),\n self.get_input_struct_type(composition).as_pointer(),\n self.get_output_struct_type(composition).as_pointer(),\n self.int32_ty.as_pointer(),\n self.int32_ty.as_pointer()))\n llvm_func = ir.Function(self.module, func_ty, name=func_name)\n llvm_func.attributes.add('argmemonly')\n context, params, data, data_in, data_out, runs_ptr, inputs_ptr = llvm_func.args\n for a in llvm_func.args:\n a.attributes.add('nonnull')\n a.attributes.add('noalias')\n\n # simulation does not care about the output\n # it extracts results of the controller objective mechanism\n if simulation:\n data_out.attributes.remove('nonnull')\n\n # Create entry block\n entry_block = llvm_func.append_basic_block(name=\"entry\")\n builder = ir.IRBuilder(entry_block)\n builder.debug_metadata = self.get_debug_location(llvm_func, composition)\n\n # Allocate and initialize condition structure\n cond_gen = ConditionGenerator(self, composition)\n cond_type = cond_gen.get_condition_struct_type()\n cond = builder.alloca(cond_type)\n cond_init = cond_type(cond_gen.get_condition_initializer())\n builder.store(cond_init, cond)\n\n iter_ptr = builder.alloca(self.int32_ty, name=\"iter_counter\")\n builder.store(self.int32_ty(0), iter_ptr)\n\n loop_condition = builder.append_basic_block(name=\"run_loop_condition\")\n builder.branch(loop_condition)\n\n # Generate a \"while < count\" loop\n builder.position_at_end(loop_condition)\n count = builder.load(iter_ptr)\n runs = builder.load(runs_ptr)\n run_cond = builder.icmp_unsigned('<', count, runs)\n\n loop_body = builder.append_basic_block(name=\"run_loop_body\")\n exit_block = builder.append_basic_block(name=\"exit\")\n builder.cbranch(run_cond, loop_body, exit_block)\n\n # Generate loop body\n builder.position_at_end(loop_body)\n\n # Current iteration\n iters = builder.load(iter_ptr);\n\n # Get the right input stimulus\n input_idx = builder.urem(iters, builder.load(inputs_ptr))\n data_in_ptr = builder.gep(data_in, [input_idx])\n\n # Call execution\n if simulation:\n exec_f = self.get_llvm_function(composition._llvm_simulation.name)\n else:\n exec_f = self.get_llvm_function(composition)\n builder.call(exec_f, [context, params, data_in_ptr, data, cond])\n\n if not simulation:\n # Extract output_CIM result\n idx = composition._get_node_index(composition.output_CIM)\n result_ptr = builder.gep(data, [self.int32_ty(0), self.int32_ty(0), self.int32_ty(idx)])\n output_ptr = builder.gep(data_out, [iters])\n result = builder.load(result_ptr)\n builder.store(result, output_ptr)\n\n # Increment counter\n iters = builder.add(iters, self.int32_ty(1))\n builder.store(iters, iter_ptr)\n builder.branch(loop_condition)\n\n builder.position_at_end(exit_block)\n\n # Store the number of executed iterations\n builder.store(builder.load(iter_ptr), runs_ptr)\n\n builder.ret_void()\n\n return llvm_func\n\n def gen_multirun_wrapper(self, function):\n\n if function.module is not self.module:\n function = ir.Function(self.module, function.type.pointee, function.name)\n assert function.is_declaration\n\n args = [a.type for a in function.args]\n args.append(self.int32_ty.as_pointer())\n multirun_ty = ir.FunctionType(function.type.pointee.return_type, args)\n multirun_f = ir.Function(self.module, multirun_ty, function.name + \"_multirun\")\n block = multirun_f.append_basic_block(name=\"entry\")\n cond_block = multirun_f.append_basic_block(name=\"loop_cond\")\n body_block = multirun_f.append_basic_block(name=\"loop_body\")\n exit_block = multirun_f.append_basic_block(name=\"exit_loop\")\n\n builder = ir.IRBuilder(block)\n\n limit_ptr = multirun_f.args[-1]\n index_ptr = builder.alloca(self.int32_ty)\n builder.store(index_ptr.type.pointee(0), index_ptr)\n builder.branch(cond_block)\n\n with builder.goto_block(cond_block):\n index = builder.load(index_ptr)\n limit = builder.load(limit_ptr)\n cond = builder.icmp_unsigned(\"<\", index, limit)\n builder.cbranch(cond, body_block, exit_block)\n\n with builder.goto_block(body_block):\n # Runs need special handling. data_in and data_out are one dimensional,\n # but hold entries for all parallel invocations.\n is_comp_run = len(function.args) == 7\n if is_comp_run:\n runs_count = multirun_f.args[5]\n input_count = multirun_f.args[6]\n\n # Index all pointer arguments\n index = builder.load(index_ptr)\n indexed_args = []\n for i, arg in enumerate(multirun_f.args[:-1]):\n # Don't adjust #inputs and #trials\n if isinstance(arg.type, ir.PointerType):\n offset = index\n # #runs and #trials needs to be the same\n if is_comp_run and i >= 5:\n offset = self.int32_ty(0)\n # data arrays need special handling\n elif is_comp_run and i == 4: # data_out\n offset = builder.mul(index, builder.load(runs_count))\n elif is_comp_run and i == 3: # data_in\n offset = builder.mul(index, builder.load(input_count))\n\n arg = builder.gep(arg, [offset])\n\n indexed_args.append(arg)\n\n builder.call(function, indexed_args)\n new_idx = builder.add(index, index.type(1))\n builder.store(new_idx, index_ptr)\n builder.branch(cond_block)\n\n with builder.goto_block(exit_block):\n builder.ret_void()\n\n return multirun_f\n\n def convert_python_struct_to_llvm_ir(self, t):\n if type(t) is list:\n assert all(type(x) == type(t[0]) for x in t)\n elem_t = self.convert_python_struct_to_llvm_ir(t[0])\n return ir.ArrayType(elem_t, len(t))\n elif type(t) is tuple:\n elems_t = (self.convert_python_struct_to_llvm_ir(x) for x in t)\n return ir.LiteralStructType(elems_t)\n elif isinstance(t, (int, float)):\n return self.float_ty\n elif isinstance(t, np.ndarray):\n return self.convert_python_struct_to_llvm_ir(t.tolist())\n elif t is None:\n return ir.LiteralStructType([])\n elif isinstance(t, np.random.RandomState):\n return pnlvm.builtins.get_mersenne_twister_state_struct(self)\n\n assert False, \"Don't know how to convert {}\".format(type(t))\n\ndef _find_llvm_function(name, mods = _all_modules):\n f = None\n for m in mods:\n if name in m.globals:\n f = m.get_global(name)\n\n if not isinstance(f, ir.Function):\n raise ValueError(\"No such function: {}\".format(name))\n return f\n\ndef _gen_cuda_kernel_wrapper_module(function):\n module = ir.Module(name=\"wrapper_\" + function.name)\n\n decl_f = ir.Function(module, function.type.pointee, function.name)\n assert decl_f.is_declaration\n kernel_func = ir.Function(module, function.type.pointee, function.name + \"_cuda_kernel\")\n block = kernel_func.append_basic_block(name=\"entry\")\n builder = ir.IRBuilder(block)\n\n # Calculate global id of a thread in x dimension\n intrin_ty = ir.FunctionType(ir.IntType(32), [])\n tid_x_f = ir.Function(module, intrin_ty, \"llvm.nvvm.read.ptx.sreg.tid.x\")\n ntid_x_f = ir.Function(module, intrin_ty, \"llvm.nvvm.read.ptx.sreg.ntid.x\")\n ctaid_x_f = ir.Function(module, intrin_ty, \"llvm.nvvm.read.ptx.sreg.ctaid.x\")\n global_id = builder.mul(builder.call(ctaid_x_f, []), builder.call(ntid_x_f, []))\n global_id = builder.add(global_id, builder.call(tid_x_f, []))\n\n # Runs need special handling. data_in and data_out are one dimensional,\n # but hold entries for all parallel invocations.\n is_comp_run = len(kernel_func.args) == 7\n if is_comp_run:\n runs_count = kernel_func.args[5]\n input_count = kernel_func.args[6]\n\n # Index all pointer arguments\n indexed_args = []\n for i, arg in enumerate(kernel_func.args):\n # Don't adjust #inputs and #trials\n if isinstance(arg.type, ir.PointerType):\n offset = global_id\n # #runs and #trials needs to be the same\n if is_comp_run and i >= 5:\n offset = ir.IntType(32)(0)\n # data arrays need special handling\n elif is_comp_run and i == 4: # data_out\n offset = builder.mul(global_id, builder.load(runs_count))\n elif is_comp_run and i == 3: # data_in\n offset = builder.mul(global_id, builder.load(input_count))\n\n arg = builder.gep(arg, [offset])\n\n indexed_args.append(arg)\n builder.call(decl_f, indexed_args)\n builder.ret_void()\n\n # Add kernel mark metadata\n module.add_named_metadata(\"nvvm.annotations\", [kernel_func, \"kernel\", ir.IntType(32)(1)])\n\n return module\n\n_type_cache = {}\n\ndef _convert_llvm_ir_to_ctype(t):\n if t in _type_cache:\n return _type_cache[t]\n\n type_t = type(t)\n if type_t is ir.VoidType:\n return None\n elif type_t is ir.IntType:\n if t.width == 32:\n return ctypes.c_int\n elif t.width == 64:\n return ctypes.c_longlong\n elif type_t is ir.DoubleType:\n return ctypes.c_double\n elif type_t is ir.FloatType:\n return ctypes.c_float\n elif type_t is ir.PointerType:\n # FIXME: Can this handle void*? Do we care?\n pointee = _convert_llvm_ir_to_ctype(t.pointee)\n ret_t = ctypes.POINTER(pointee)\n elif type_t is ir.ArrayType:\n element_type = _convert_llvm_ir_to_ctype(t.element)\n ret_t = element_type * len(t)\n elif type_t is ir.LiteralStructType:\n global _struct_count\n uniq_name = \"struct_\" + str(_struct_count)\n _struct_count += 1\n\n field_list = []\n for i, e in enumerate(t.elements):\n # llvmlite modules get _unique string only works for symbol names\n field_uniq_name = uniq_name + \"field_\" + str(i)\n field_list.append((field_uniq_name, _convert_llvm_ir_to_ctype(e)))\n\n ret_t = type(uniq_name, (ctypes.Structure,), {\"__init__\": ctypes.Structure.__init__})\n ret_t._fields_ = field_list\n assert len(ret_t._fields_) == len(t.elements)\n else:\n assert False, \"Don't know how to convert LLVM type: {}\".format(t)\n\n _type_cache[t] = ret_t\n return ret_t\n","sub_path":"psyneulink/core/llvm/builder_context.py","file_name":"builder_context.py","file_ext":"py","file_size_in_byte":26016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"84308226","text":"#imports\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pprint\nimport json\nimport datetime\nimport unicodedata\n\nteamName = \"REDHOTS FC 05G\"\nteamSchedule = []\n\npp = pprint.PrettyPrinter(indent=4)\n\nurl = \"http://events.gotsport.com/events/schedule.aspx?EventID=53686&GroupID=545173&Gender=Girls&Age=12\"\n#requests\nurl_r = requests.get(url)\n#run the requests through soup\nurl_soup = BeautifulSoup(url_r.content, \"html.parser\")\n\nscheduleDiv = url_soup.find(\"div\",{\"class\":\"grid_12\"})\nscheduleTables = scheduleDiv.findAll(\"table\")\n\n#pp.pprint('Tables: '+str(len(scheduleTables)))\n\nfor table in scheduleTables:\n scheduleRows = table.findAll(\"tr\")\n #pp.pprint('Rows in this table: '+str(len(scheduleRows)))\n\n\n for i in range(0, len(scheduleRows)):\n #pp.pprint(' '+str(i))\n if i==0:\n gameDate = scheduleRows[i].text.strip()\n if i>1:\n scheduleTDs = scheduleRows[i].findAll(\"td\")\n game = {}\n game['date'] = gameDate\n game[\"time\"] = scheduleRows[i].find(\"div\",{\"class\":\"MatchTime\"}).text.strip()\n game[\"home\"] = scheduleRows[i].find(\"td\",{\"class\":\"homeTeam\"}).text.strip()\n game[\"away\"] = scheduleRows[i].find(\"td\",{\"class\":\"awayTeam\"}).text.strip()\n scores = scheduleRows[i].findAll(\"span\",{\"class\":\"score\"})\n game[\"homeScore\"] = scores[0].text.strip()\n game[\"awayScore\"] = scores[1].text.strip()\n game[\"field\"] = scheduleRows[i].find(\"td\",{\"class\":\"location\"}).text.strip()\n\n #pp.pprint(\" \"+game['date']+\": \"+game[\"home\"]+\" vs. \"+game[\"away\"])\n\n teamSchedule.append(game)\n\npp.pprint(teamSchedule)\n\n#declare files, w+ create if don't exist\nj = open( \"/var/www/html/apps/redhots/data/team-schedule-all.json\",\"w+\")\n#j = open( \"../data/team-schedule-all.json\",\"w+\")\n#minified\n#json.dump(teamSchedule, j, sort_keys=True, separators=(',',':'))\n\n#prettified\njson.dump(teamSchedule, j, sort_keys=False, indent=4)\n","sub_path":"build/scripts/get-schedule-all.py","file_name":"get-schedule-all.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"612487564","text":"#!/opt/local/bin/python\n \nfrom lxml import etree\nimport re\nimport os\nfrom multiprocessing import Process\n\ndef namespace(element):\n m = re.match('\\{.*\\}', element.tag)\n return m.group(0) if m else ''\n\ndef getMatch(fname, act):\n r = etree.parse(fname).getroot()\n ns = namespace(r)\n t = r.find('.//{0}pealkiri'.format(ns)).text\n return (act, re.compile('(.*)(' + t +'\\w*(\\s|\\Z)+)', re.I))\n\ndef getLinks(fname, act, searchList):\n r = etree.parse(fname).getroot()\n ns = namespace(r)\n txt = \"\"\n\n matches = dict()\n for child in r.findall('.//{0}tavatekst'.format(ns)):\n if child.text:\n txt = txt + \"\\t\" + child.text\n for act, searchTerm in searchList:\n print(\"next matching term\")\n matches[act] = len(re.findall(searchTerm, txt)) \n\n return matches\n\n# Get the titles of acts per month and compile a list of REs to match them\nr = re.compile(\"(\\d+)\\.(\\d+)\\.(\\d+)\\-(.*)\\.xml\", re.I)\nd = dict() \nfor filename in os.listdir('.'):\n m = re.match(r, filename)\n if m:\n k = m.group(2) + \"-\" + m.group(3)\n if not k in d:\n d[k] = []\n\n d[k].append(getMatch(filename, m.group(4)))\n\nprint('Title extraction done')\n\nmatrices = dict()\nprint('Starting to find links')\nfor filename in os.listdir('.'):\n print(filename)\n m = re.match(r, filename)\n if m:\n k = m.group(2) + \"-\" + m.group(3)\n\n act = m.group(4)\n if not k in matrices:\n matrices[k] = dict()\n\n matrices[k][act] = getLinks(filename, act, d[k])\n print(matrices[k][act])\n","sub_path":"rt_AB_backup.py","file_name":"rt_AB_backup.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619765957","text":"from django.test import TestCase\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Count\n\nfrom homepage.models import Post\n\ndef create_objects():\n Post.objects.create(title='Fishing in a lake', body='', category='fishing')\n Post.objects.create(title='How to get a catch', body='', category='fishing')\n Post.objects.create(title='Computering', body='', category='CS')\n return Post.objects.all()\n\nclass PostModelTest(TestCase):\n\n def test_saving_and_retrieving_posts(self):\n first_post = Post()\n first_post.title = 'Title: first post!'\n first_post.body = 'Some text 1'\n first_post.save()\n\n second_post = Post()\n second_post.title = 'Title: second post!'\n second_post.body = 'Some text 2'\n second_post.save()\n\n saved_posts = Post.objects.all()\n self.assertEqual(saved_posts.count(), 2)\n\n first_saved_post = saved_posts[0]\n second_saved_post = saved_posts[1]\n self.assertEqual(first_saved_post.title, 'Title: first post!')\n self.assertEqual(first_saved_post.body, 'Some text 1')\n self.assertEqual(second_saved_post.title, 'Title: second post!')\n self.assertEqual(second_saved_post.body, 'Some text 2')\n\n def test_get_categories_with_num_count(self):\n posts = create_objects()\n\n # Post.objects.values('category').annotate(num_cat=Count('category'))\n categories = [x for x in posts[0].get_all_categories()]\n\n expected_categories = [\n {'category': 'CS', 'num_cat': 1},\n {'category': 'fishing', 'num_cat': 2}]\n\n self.assertEqual(categories, expected_categories)\n\n\n def test_model_cant_save_title_with_empty_string(self):\n post = Post.objects.create(title='', body='Body')\n post.title = ''\n post.save()\n # self.assertRaises(ValidationError, post.save)\n\n # check if blank=False applies to admin?\n\n\n def test_slug_is_updated_when_saved(self):\n post = Post.objects.create(title='My test title', body='')\n post.title = 'I want this title instead'\n post.save()\n\n self.assertEqual('i-want-this-title-instead', post.slug)\n\n","sub_path":"homepage/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59854184","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# time: 2018/1/30 16:45\n\"\"\"\n计算到每个点的各个距离的点的个数都是多少\n\"\"\"\n\nclass Solution(object):\n def numberOfBoomerangs(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n m = {}\n ret = 0\n for x, y in points:\n k = \"%d-%d\" % (x, y)\n for xx, yy in points:\n d = (xx - x) ** 2 + (yy - y) ** 2\n\n if d in m.setdefault(k, {}):\n m[k][d] += 1\n else:\n m[k][d] = 1\n for d in m[k]:\n ret += m[k][d] * (m[k][d] - 1)\n return ret\n\n\nprint(Solution().numberOfBoomerangs([[0, 0], [1, 0], [2, 0]]))\n","sub_path":"python/a447.py","file_name":"a447.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496414471","text":"# Due to certain issues with numpy and issues with latest windows update stick to 1.19.3\n'''(https://stackoverflow.com/questions/64778222/how-to-fix-runtimeerror-the-current-numpy-installation-fails-to-pass-a-sanity)'''\nimport csv\nimport re\nimport pandas as pd\n\nimport gensim\nimport inline as inline\nfrom gensim import corpora\nfrom textblob import TextBlob\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom collections import defaultdict\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport itertools\nimport collections\nfrom nltk.probability import FreqDist\nimport matplotlib.pyplot as plt\n\nimport matplotlib.pyplot as plt\n\n\n\n\n\nreviewsArray=[] #array which contains the reviews\nreviewAfterPosTagging=[] #array after post tagging the reviews\nonlyNouns=[] #array which contains only the nouns which came after the pos tagging\nnewPolarity=0 #variable which is assigned for the polarity value\n\n\n\n\n# TEXTBLOB\n# find the polarity of the adjectives which remained in a sentence\ndef polarityAndWriteToFile(text):\n reviews = TextBlob(text)\n\n # change the polarity values which comes as decimal values to 1, -1 or 0\n if (reviews.polarity > 0):\n newPolarity = 1\n elif (reviews.polarity < 0):\n newPolarity = -1\n else:\n newPolarity = 0\n\n # write the data to the .csv file only the adjectives in a review\n writer.writerow({\"review\": text, \"polarity\": newPolarity})\n\n\n# read from the csv file\n\n\n\nwith open('amazonJsonTOCSV.csv', 'r', encoding=\"utf-8\") as csv_file:#open the .csv file after converting\n csv_reader = csv.reader(csv_file) #read it from the csv reader which is in python\n line_count = 0 #break line by line in the datset including the headings\n\n with open('onlyReviews.csv', \"a\",encoding='utf-8') as csv_file_2: # open the .csv file and append the data when loaded\n fieldNames = [\"review\", \"polarity\"] # headings of the .csv file\n writer = csv.DictWriter(csv_file_2, fieldnames=fieldNames) #write to the file as a Dictionary(Dictionary Writer)\n writer.writeheader()#write the heading to the .csv file\n\n for row in csv_reader:#loop through the .csv file\n\n if line_count == 0:#if the line is in the 0 index position which the headings of the dataset skip to the next line\n line_count += 1#break the line when the line count is zero which is the headings\n\n else:\n comment = row[5]#take only the column 5 which contain only the summary pf the review of the product\n line_count += 1\n # de-emogifying\n regrex_pattern = re.compile(pattern=\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags \n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001F929\"\n u\"\\U0001F92E\"\n \"]+\", flags=re.UNICODE)\n\n deEmojisedComment = regrex_pattern.sub(r'', comment)#remove the emojis in the dataset\n deEmojisedComment = deEmojisedComment.replace(\"\\\\\", \"\") #remove the back slash\n deEmojisedComment=str(deEmojisedComment)[1:-1] #remove the square brackets\n deEmojisedComment=deEmojisedComment.replace('\"',\"\") # remove the apostrophe\n deEmojisedComment=deEmojisedComment.replace(\"'\",\"\") # remove the apostrophe\n deEmojisedComment=deEmojisedComment.replace(\"“\",\"\")\n deEmojisedComment=deEmojisedComment.replace(\"’\",\"\")\n deEmojisedComment=deEmojisedComment.replace(\"*\",\"\")\n\n words = set(nltk.corpus.words.words())\n deEmojisedComment=\" \".join(w for w in nltk.wordpunct_tokenize(deEmojisedComment) if w.lower() in words or not w.isalpha())\n removePunctuations = re.sub(r\"[=<>()''/,+.;@#?!&$:*_-]\",\" \",deEmojisedComment)#remove the punctuation\n\n #custom stopword list\n stopwordsVariable=['i',\"br\",\"n\",'me', 'my', 'myself', 'we', 'our', 'ours',\n 'ourselves', 'you', \"you're\", \"you've\", \"you'll\", \"you'd\",\n 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',\n 'himself', 'she', \"she's\", 'her', 'hers', 'herself', 'it',\n \"it's\", 'its', 'itself', 'they', 'them', 'their', 'theirs',\n 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',\n \"that'll\", 'these', 'those', 'am', 'is', 'are', 'was', 'were',\n 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do',\n 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if',\n 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',\n 'for', 'with', 'about', 'against', 'between', 'into', 'through',\n 'during', 'before', 'after', 'above', 'below', 'to', 'from',\n 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under',\n 'again', 'further', 'then', 'once', 'here', 'there', 'when',\n 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few',\n 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not',\n 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',\n 'can', 'will', 'just', 'don', \"don't\", 'should', \"should've\",\n 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren',\n \"aren't\", 'couldn', \"couldn't\", 'didn', \"didn't\", 'doesn',\n \"doesn't\", 'hadn', \"hadn't\", 'hasn', \"hasn't\", 'haven',\n \"haven't\", 'isn', \"isn't\", 'ma', 'mightn', \"mightn't\",\n 'mustn', \"mustn't\", 'needn', \"needn't\", 'shan', \"shan't\",\n 'shouldn', \"shouldn't\", 'wasn', \"wasn't\", 'weren', \"weren't\",\n 'won', \"won't\",\"div\",'wouldn',\"T\",\"wouldn't\",\"%\",\"S\",\"span\",\n \"br\",\"class=cr\",\"en\",\"al\",\"un\",\"el\",\"usar\",\"malo\",\"lo\"]\n\n #STOPWORD\n\n stop_words = stopwordsVariable\n\n word_tokens = word_tokenize(removePunctuations)\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n final_sentence = \" \".join(filtered_sentence)\n\n # we can use pos tagging or textblob for feature extraction(noun phrases)\n\n '''pos tagging feature extraction'''\n '''https://stackoverflow.com/questions/40167612/how-to-keep-only-the-noun-words-in-a-wordlist-python-nltk'''\n\n # #pos tagging\n # tokens = nltk.word_tokenize(final_sentence)\n # tags = nltk.pos_tag(tokens)\n # #categorizing only the adjectives\n # # adjectives = [word for word, pos in tags if (pos == 'JJ' or pos == 'JJR' or pos == 'JJS' or pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS')]\n # adjectives = [word for word, pos in tags if (pos == 'NN' )]\n #\n # # print(adjectives,\"\\n\")\n # adjectives_joined = \" \".join(adjectives)\n # # print(adjectives_joined,\"\\n\")\n\n\n '''textblob feature extraction'''\n # Convert array of comments into a single string\n comments = TextBlob(' '.join(filtered_sentence))\n # Check out noun phrases, will be useful for frequent feature extraction\n\n word_freq = defaultdict(int)\n count=0;\n arrayOfTextFeatures=[]\n for text in comments.noun_phrases:\n word_freq[text] += 1\n count += comments.words.count(text)\n polarityAndWriteToFile(text) # method calling and showing the polarity\n arrayOfTextFeatures=[text]\n text.lower()\n\n '''removing non-english words'''\n text = \" \".join(w for w in nltk.wordpunct_tokenize(text) if w.lower() in words or not w.isalpha())\n #print(text)\n\n\n\n # print(pd.DataFrame.from_dict(word_freq, orient='index') \\\n # .sort_values(0, ascending=False) \\\n # .rename(columns={0: 'abs_freq'}))\n\n\n '''get the frequency of the words'''\n fdist = FreqDist(comments.noun_phrases)\n #print(fdist)\n\n fdist.most_common(2)\n fdist.plot(30, cumulative=False)\n plt.show()\n\n\n\n\n","sub_path":"PosTagging-Polarity-FeatureExtraction-DataCleaning.py","file_name":"PosTagging-Polarity-FeatureExtraction-DataCleaning.py","file_ext":"py","file_size_in_byte":9872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646839996","text":"# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nimport jsonschema\nimport six\n\nfrom rally import exceptions\nfrom rally import utils\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Context(object):\n \"\"\"This class is a factory for context classes.\n\n Every context class should be a subclass of this method and implement\n 2 abstract methods: setup() and cleanup()\n\n It covers:\n 1) proper setting up of context config\n 2) Auto discovering & get by name\n 3) Validation by CONFIG_SCHEMA\n 4) Order of context creation\n \"\"\"\n __ctx_name__ = \"base\"\n __ctx_order__ = 0\n __ctx_hidden__ = True\n\n CONFIG_SCHEMA = {}\n\n def __init__(self, context):\n self.config = context.get(\"config\", {}).get(self.__ctx_name__, {})\n self.context = context\n self.task = context[\"task\"]\n\n @classmethod\n def validate(cls, config, non_hidden=False):\n if non_hidden and cls.__ctx_hidden__:\n raise exceptions.NoSuchContext(name=cls.__ctx_name__)\n jsonschema.validate(config, cls.CONFIG_SCHEMA)\n\n @staticmethod\n def get_by_name(name):\n \"\"\"Returns Context class by name.\"\"\"\n for context in utils.itersubclasses(Context):\n if name == context.__ctx_name__:\n return context\n raise exceptions.NoSuchContext(name=name)\n\n @abc.abstractmethod\n def setup(self):\n \"\"\"This method sets context of benchmark.\"\"\"\n\n @abc.abstractmethod\n def cleanup(self):\n \"\"\"This method cleans context of benchmark.\"\"\"\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self.cleanup()\n\n\nclass ContextManager(object):\n \"\"\"Creates context environment and runs method inside it.\"\"\"\n\n @staticmethod\n def run(context, func, *args, **kwargs):\n ctxlst = [Context.get_by_name(name) for name in context[\"config\"]]\n ctxlst = map(lambda ctx: ctx(context),\n sorted(ctxlst, key=lambda x: x.__ctx_order__))\n\n return ContextManager._magic(ctxlst, func, *args, **kwargs)\n\n @staticmethod\n def validate(context, non_hidden=False):\n for name, config in context.iteritems():\n Context.get_by_name(name).validate(config, non_hidden=non_hidden)\n\n @staticmethod\n def _magic(ctxlst, func, *args, **kwargs):\n \"\"\"Some kind of contextlib.nested but with black jack & recursion.\n\n This method uses recursion to build nested \"with\" from list of context\n objects. As it's actually a combination of dark and voodoo magic I\n called it \"_magic\". Please don't repeat at home.\n\n :param ctxlst: list of instances of subclasses of Context\n :param func: function that will be called inside this context\n :param args: args that will be passed to function `func`\n :param kwargs: kwargs that will be passed to function `func`\n :returns: result of function call\n \"\"\"\n if not ctxlst:\n return func(*args, **kwargs)\n\n with ctxlst[0]:\n # TODO(boris-42): call of setup could be moved inside __enter__\n # but it should be in try-except, and in except\n # we should call by hand __exit__\n ctxlst[0].setup()\n tmp = ContextManager._magic(ctxlst[1:], func, *args, **kwargs)\n return tmp\n","sub_path":"rally/benchmark/context/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266700284","text":"'''\nCRITICAL LOADING EXPERIMENT\n\nWritten by Patrick Park (RO, Physics '22)\nppark@reed.edu\nFirst published Dec. 24, 2020\nLast updated Jan. 16, 2021\n\n'''\n\nimport os, sys, multiprocessing\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\n\nfrom mcnp_funcs import *\n\n# Variables\nfilepath = \"C:/MCNP6/facilities/reed/critloadexp\" # do NOT include / at the end\ninputs_folder_name = \"inputs\"\noutputs_folder_name = \"outputs\"\nwater = \"102\" # m102 is the mat id for H2O in MCNP materials cards\nwater_density = \"-1.00\" # Density of water-- needs to be specified bc it varies with temp & pressure. Negative indicates units of g/cm3 in MCNP syntax. \n# universes = {'A':\"5504\",'B':\"5505\",'C':\"5506\",'D':\"5507\",'E':\"5508\",'F':\"5509\"} # testing only\n\ncore_pos = {'B1','B2','B3','B4','B5','B6',\n 'C1','C2','C3','C4','C6','C7','C8','C10','C11','C12',\n 'D1','D2','D3','D4','D5','D6','D7','D8','D9','D10','D11','D12',\n 'D13','D14','D15','D16','D17','D18',\n 'E2','E3','E4','E5','E6','E7','E8','E9','E10','E11','E12',\n 'E13','E14','E15','E16','E17','E18','E19','E20','E21','E22','E23','E24',\n 'F1','F2','F3','F4','F5','F7','F8','F14','F15','F16','F17','F18',\n 'F19','F20','F21','F22','F24','F26','F27','F28','F29','F30'}\n\nfe_id = {'B1':'7202','B2':'9678','B3':'9679','B4':'7946','B5':'7945','B6':'8104',\n 'C1':'4086','C2':'4070','C3':'8102','C4':'3856','C6':'8103',\n 'C7':'4117','C8':'8105','C10':'8736','C11':'8735','C12':'10705',\n 'D1':'3679','D2':'8732','D3':'4103','D4':'8734','D5':'3685','D6':'4095',\n 'D7':'4104','D8':'4054','D9':'4118','D10':'3677','D11':'4131','D12':'4065',\n 'D13':'3851','D14':'3866','D15':'8733','D16':'4094','D17':'4129','D18':'3874',\n 'E2':'3872','E3':'4106','E4':'3671','E5':'4062','E6':'4121','E7':'4114',\n 'E8':'4077','E9':'3674','E10':'4071','E11':'4122','E12':'4083','E13':'3853',\n 'E14':'4134','E15':'4133','E16':'4085','E17':'4110','E18':'4055','E19':'3862',\n 'E20':'4064','E21':'3858','E22':'4053','E23':'3748','E24':'3852',\n 'F1':'4057','F2':'4125','F3':'4074','F4':'4069','F5':'4088','F7':'3868',\n 'F8':'4120','F14':'3810','F15':'4130','F16':'4091','F17':'3673','F18':'3682',\n 'F19':'4132','F20':'4046','F21':'3865','F22':'3743','F24':'3835','F26':'3676',\n 'F27':'3840','F28':'3854','F29':'4049','F30':'4127'}\n\nsop_fe = 'C10 D15 D14 F20 D16 E19 C11 F21' # str of FEs removed during standard procedure\nkeff_csv_name = \"keff.csv\"\ninv_M_csv_name = \"inv_M.csv\"\n\n# \ndef main(argv):\n os.chdir(f'{filepath}')\n # '''\n base_deck = find_base_file(filepath)\n check_kcode(filepath,base_deck)\n opts = [] \n while len(opts)==0:\n user_input = input(f\"Input core positions separated by a space, 'sop' to input the standard procedure for the 1/M experiment ({sop_fe}), or 'quit' to quit: \") # 'A1 B1 C1 C5 C9 D1 E1 F1' # 'C10 D15 D14 F20 D16 C19 C11 F21' # \n if user_input.lower() in ['s','sop']:\n user_input = sop_fe \n elif user_input.lower() in ['q','quit','kill']: \n sys.exit()\n user_input = user_input.split(' ')\n for c in user_input: \n if c.upper() in core_pos: opts.append(c) # user_input is matched with a FE\n else: print(f\"**WARNING: There is no fuel element in core position {c}!\")\n\n inputs_created = replace_mats(filepath, inputs_folder_name, base_deck, opts)\n \n if not check_run_mcnp(): sys.exit()\n \n num_of_fe_to_be_repl = 1\n tasks = get_tasks()\n\n run_mcnp(filepath,f\"{filepath}/{base_deck}\",outputs_folder_name,tasks) # runs base case\n \n while num_of_fe_to_be_repl <= len(opts):\n new_input_deck = 'cle-'+'-'.join(opts[:num_of_fe_to_be_repl])+'.i'\n run_mcnp(filepath,f\"{filepath}/{inputs_folder_name}/{new_input_deck}\",outputs_folder_name,tasks)\n num_of_fe_to_be_repl += 1\n\n print('MCNP calculations complete!')\n\n # Deletes MCNP runtape and source dist files.\n delete_files(f\"{filepath}/{outputs_folder_name}\",r=True,s=True)\n\n files_to_extract_keff = []\n base_output = 'o_'+base_deck.split('/')[-1].split(\".\")[0]+'.o'\n files_to_extract_keff.append(base_output) \n for name in inputs_created:\n output = 'o_'+base_deck.split('/')[-1].split(\".\")[0]+f'-{name}'+'.o'\n files_to_extract_keff.append(output)\n\n\n # Setup a dataframe to collect keff values\n keff_df = pd.DataFrame(columns=[\"output file\", \"keff\", \"keff unc\"])\n keff_df[\"output file\"] = files_to_extract_keff\n keff_df.set_index(\"output file\",inplace=True)\n\n for file in files_to_extract_keff:\n keff, keff_unc = extract_keff(f\"{filepath}/{outputs_folder_name}/{file}\")\n keff_df.loc[file,f\"keff\"] = keff \n keff_df.loc[file,f\"keff unc\"] = keff_unc \n\n print(keff_df)\n\n keff_df.to_csv(keff_csv_name, encoding='utf8')\n print(f\"All {len(files_to_extract_keff)} keff values and their uncertainties have been extracted to '{keff_csv_name}'.\")\n\n calc_inv_M(keff_csv_name)\n # '''\n \n plot_inv_M(inv_M_csv_name)\n\n \n\n#\n#\n# Helper Functions\n#\n#\n\ndef replace_mats(filepath, inputs_folder_name, base_deck, opts):\n fe_id_found = {c:0 for c in opts}\n inputs_created = []\n\n # If the inputs folder doesn't exist, create it.\n if not os.path.isdir(f\"{filepath}/{inputs_folder_name}\"): os.mkdir(f\"{filepath}/{inputs_folder_name}\")\n\n num_of_fe_to_be_repl = 1\n\n while num_of_fe_to_be_repl <= len(opts):\n file = open(f\"{filepath}/{base_deck}\",'r') # opens MCNP input deck, must be in while loop\n fe_to_be_repl= opts[:num_of_fe_to_be_repl] \n new_file_name = '-'.join(fe_to_be_repl)\n input_already_exists = f\"cle-{new_file_name}.i\" in os.listdir(f'{filepath}/{inputs_folder_name}')\n for line in file:\n entries = line.split(' ')\n entry_no = 1 \n for c in fe_to_be_repl:\n if entries[0][:4]==fe_id[c.upper()]:\n fe_id_found[c]+=1\n if not input_already_exists:\n while entries[entry_no]=='': entry_no+=1\n entries[entry_no]=water\n entry_no+=1\n while entries[entry_no]=='': entry_no+=1\n entries[entry_no]=water_density\n if not input_already_exists: print(' '.join(entries),file=open(f\"{filepath}/{inputs_folder_name}/cle-{new_file_name}.i\",'a'),end='')\n if not input_already_exists: print(f\"Created input deck 'cle-{new_file_name}.i'\") \n else: print(f\"--Input deck will be skipped because 'cle-{new_file_name}.i' already exists.\")\n inputs_created.append(new_file_name)\n num_of_fe_to_be_repl += 1\n \n for c in fe_id_found:\n \tif fe_id_found[c]==0:\n \t\tprint(f\"Python was unable to find any instances of the fuel element in core position {c}!\") \n \n return inputs_created\n\n\ndef calc_inv_M(keff_csv_name):\n inv_M_df = pd.read_csv(keff_csv_name)\n inv_M_df.columns =['output file', 'inv M', 'inv M unc']\n inv_M_df['inv M'] = 1 - inv_M_df['inv M']\n inv_M_df = inv_M_df.iloc[::-1]\n inv_M_df.to_csv(inv_M_csv_name, index=False, encoding='utf8')\n print(f\"All {len(inv_M_df.index)} 1/M values have been calculated in '{inv_M_csv_name}'.\")\n\n\ndef plot_inv_M(csv_file):\n inv_M_df = pd.read_csv(csv_file)\n \n num_fe_loaded = np.arange(0,len(inv_M_df.index))\n num_fe_added = num_fe_loaded - 8\n num_fe_added_1 = num_fe_added[:-1]\n inv_M_mcnp = inv_M_df.iloc[num_fe_loaded,1]\n inv_M_exp = [.0833,.07142,.07142,.0625,.0555,.0500,.0435,.0400]\n \n my_dpi = 320\n dot_marker_size = 2\n line_width = 0.5\n data_line_width = 0.75\n ax_label = 'x-small'\n tick_label = 'xx-small'\n \n fig, ax = plt.subplots(figsize=(1636/my_dpi, 673/my_dpi), dpi=my_dpi,facecolor='w',edgecolor='k')\n ax.plot(num_fe_added, inv_M_mcnp, '-bo', label=\"MCNP\", markersize=dot_marker_size, linewidth=data_line_width)\n ax.plot(num_fe_added_1, inv_M_exp,'-ro', label=\"Experimental\", markersize=dot_marker_size, linewidth=data_line_width)\n \n x1,y1,x2,y2 = num_fe_added[-2],inv_M_df.iloc[num_fe_loaded[-2],1],num_fe_added[-1],inv_M_df.iloc[num_fe_loaded[-1],1]\n line_eqn_mcnp = lambda x_mcnp : ((y2-y1)/(x2-x1)) * (x_mcnp - x1) + y1 # make use of line equation to form function line_eqn(x) that generated y\n xrange_mcnp = np.arange(num_fe_added[-1],(num_fe_added[-1]+20)) # generate range of x values based on your graph\n plt.plot(xrange_mcnp, [ line_eqn_mcnp(x_mcnp) for x_mcnp in xrange_mcnp], color='b', linestyle='--', linewidth=data_line_width) # plot the line with generate x ranges and created y ranges\n \n x3,y3,x4,y4 = num_fe_added[-3], inv_M_exp[num_fe_loaded[-3]], num_fe_added[-2], inv_M_exp[num_fe_loaded[-2]]\n line_eqn_exp = lambda x_exp : ((y4-y3)/(x4-x3)) * (x_exp - x3) + y3 \n xrange_exp = np.arange(num_fe_added_1[-1],(num_fe_added_1[-1]+20)) # generate range of x values based on your graph\n plt.plot(xrange_exp, [ line_eqn_exp(x_exp) for x_exp in xrange_exp], color='r', linestyle='--', linewidth=data_line_width) # plot the line with generate x ranges and created y ranges\n\n for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(line_width)\n \n ax.set_xlim([-8,12])\n ax.set_ylim([0,.1])\n\n ax.xaxis.set_major_locator(MultipleLocator(1))\n ax.yaxis.set_major_locator(MultipleLocator(0.01))\n \n ax.minorticks_on()\n ax.xaxis.set_minor_locator(MultipleLocator(1))\n ax.yaxis.set_minor_locator(MultipleLocator(0.005))\n \n ax.tick_params(axis='both', which='both', labelsize=tick_label,width=line_width)\n \n ax.grid(b=True, which='major', color='#999999', linestyle='-', linewidth=line_width)\n ax.grid(which='minor', linestyle=':', linewidth=line_width, color='gray')\n \n ax.set_xlabel(r'Fuel elements added (0 = standard core config.)',fontsize=ax_label)\n ax.set_ylabel(r'1/M ($1-k_{eff}$)',fontsize=ax_label)\n ax.legend(title=f'Key', title_fontsize=tick_label, ncol=1, fontsize=tick_label,loc='upper right')\n # fontsize: int or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}\n\n plt.savefig(f'results.png', bbox_inches = 'tight', pad_inches = 0.1, dpi=my_dpi)\n \n \nif __name__ == \"__main__\":\n main(sys.argv[1:])\n \n","sub_path":"cle.py","file_name":"cle.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233621371","text":"# Databricks notebook source\n# MAGIC %md \n# MAGIC # DSML Overview session\n# MAGIC ### Model Deployment & Management\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC ### Key take aways for this demo:\n# MAGIC \n# MAGIC * Show how the models can be for batch and streaming inference with MLflow\n# MAGIC * Show how to manage incremental versions of models with MLflow model registry\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Prepare data\n\n# COMMAND ----------\n\n# DBTITLE 1,Import needed packages\nfrom pyspark.sql.functions import *\n\n# COMMAND ----------\n\n# DBTITLE 1,Read dataset into Spark DataFrame\nsource_table = \"lending_club.cleaned\"\ndf = spark.table(source_table)\n\n# COMMAND ----------\n\n# DBTITLE 1,Assign target and predictor columns\npredictors = [\n \"purpose\", \"term\", \"home_ownership\", \"addr_state\", \"verification_status\",\n \"application_type\", \"loan_amnt\", \"emp_length\", \"annual_inc\", \"dti\", \n \"delinq_2yrs\", \"revol_util\", \"total_acc\", \"credit_length_in_years\", \n \"int_rate\", \"net\", \"issue_year\"\n]\ntarget = 'bad_loan'\n\n# COMMAND ----------\n\n# MAGIC %md ## List and compare models from tracking server\n\n# COMMAND ----------\n\n# DBTITLE 1,Get MLflow Experiment ID\nfrom mlflow.tracking import MlflowClient\n\npath = \"/Shared/lending_club\"\n\nclient = MlflowClient()\nexperimentID = [e.experiment_id for e in client.list_experiments() if e.name==path][0]\nexperimentID\n\n# COMMAND ----------\n\n# DBTITLE 1,Get all runs for our experiment\nruns = spark.read.format(\"mlflow-experiment\").load(experimentID)\n\ndisplay(runs)\n\n# COMMAND ----------\n\n# DBTITLE 1,Pick run with top ROC\nbest_run_id = runs.orderBy(desc(\"metrics.roc\")).limit(1).select(\"run_id\").collect()[0].run_id\nbest_run_id\n\n# COMMAND ----------\n\n# DBTITLE 1,Retrieve model as scikit-learn model and score on Pandas DataFrame\nimport mlflow.sklearn\nmodel_name = \"random-forest-model\"\nmodel = mlflow.sklearn.load_model(model_uri=f\"runs:/{best_run_id}/{model_name}\")\nmodel\n\n# COMMAND ----------\n\npdDf = df.toPandas()\n\nfor col in pdDf.columns:\n if pdDf.dtypes[col]=='object':\n pdDf[col] = pdDf[col].astype('category').cat.codes\n pdDf[col] = pdDf[col].fillna(0)\n \nX_test, Y_test = pdDf[predictors], pdDf[target]\n\n# COMMAND ----------\n\npredictions = model.predict(X_test)\npredictions[:20]\n\n# COMMAND ----------\n\n# DBTITLE 1,Retrieve model with mlflow.pyfunc.spark_udf and push into Spark pipeline\nimport mlflow.pyfunc\nspark_model = mlflow.pyfunc.spark_udf(spark, model_uri=f\"runs:/{best_run_id}/{model_name}\")\nspark_model\n\n# COMMAND ----------\n\npredictions_df = spark.table(\"lending_club.model_test\").withColumn(\"prediction\", spark_model(*predictors))\ndisplay(predictions_df)\n\n# COMMAND ----------\n\n# DBTITLE 1,Use the model in a Spark Structured Streaming pipeline?\nstreaming_df = (\n spark.readStream\n .format(\"delta\")\n .option(\"maxFilesPerTrigger\", 1)\n .load(\"/home/stuart/datasets/lending_club/model_test/\")\n)\n\nscored_stream_df = streaming_df.withColumn(\"prediction\", spark_model(*predictors))\n\ndisplay(scored_stream_df)\n\n# COMMAND ----------\n\n# DBTITLE 1,Finally, we all know the world runs on SQL\nspark.udf.register(\"debt_model\", spark_model)\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC select *, debt_model(\n# MAGIC purpose, term, home_ownership, addr_state\n# MAGIC , verification_status, application_type, loan_amnt\n# MAGIC , emp_length, annual_inc, dti, \n# MAGIC delinq_2yrs, revol_util, total_acc\n# MAGIC , credit_length_in_years, int_rate, net, issue_year) as prediction\n# MAGIC from lending_club.model_test\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Registering a model\n\n# COMMAND ----------\n\nresult = mlflow.register_model(f\"runs:/{best_run_id}/{model_name}\", model_name)\nresult\n\n# COMMAND ----------\n\n# DBTITLE 1,Promote this version to 'deployment ready' status\nclient.transition_model_version_stage(\n name=result.name,\n version=result.version,\n stage=\"Production\"\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Deploying the latest best model\n\n# COMMAND ----------\n\ncurrent_prod_model = f\"models:/{model_name}/production\"\nspark_model = mlflow.pyfunc.spark_udf(spark, current_prod_model)\npredictions_df = spark.table(\"lending_club.model_test\").withColumn(\"prediction\", spark_model(*predictors))\ndisplay(predictions_df)\n\n# COMMAND ----------\n\ndbutils.notebook.exit(\"0\")\n\n# COMMAND ----------\n\n","sub_path":"03_Deployment-management.py","file_name":"03_Deployment-management.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496039229","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 13 22:41:16 2018\n\n@author: tomoyuki\n\"\"\"\n\nimport numpy as np\nfrom numpy.random import randn,rand\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n\n\n\n# 0時点変化量と1時点変化量のモデル\ndef model(t0,a,b):\n t1 = (a + b * t0 + randn(1)*0.2)[0]\n return t1\n\n# 初期変化量をinit_diffとして,n時点までの変化量をシミュレート\na = -0.00187163\nb = -0.42820586\ninit_diff = a*(1/(1-b))\nn = 1000\n\nprint(init_diff)\ndiff_list = []\ndiff_list.append(init_diff)\nfor i in range(n):\n diff_list.append(model(diff_list[i],a,b))\n\ndf_diff = pd.DataFrame({\"diff\":diff_list})\n\n# シミュレートしたn時点までの変化量をプロット\nplt.plot(df_diff);plt.grid();plt.show();\n\n# 0時点変化量と1時点変化量をプロット\nplt.scatter(df_diff.shift(),df_diff);plt.grid();plt.show();\n\n\n# 0時点・1時点変化量と2時点変化量をプロット\ndf = pd.DataFrame({\"t2\":df_diff.iloc[:,0],\n \"t1\":np.round(df_diff.shift(1).iloc[:,0],1),\n \"t0\":np.round(df_diff.shift(2).iloc[:,0],1)})\n \ndf_pivot = pd.pivot_table(data=df , values='t2', columns='t0', index='t1', aggfunc=np.mean)\n\nsns.heatmap(df_pivot, annot=False, fmt=\"1.1f\", linewidths=.5, cmap=\"YlOrRd_r\", vmin=min(df_pivot),vmax=max(df_pivot))\nplt.show()\n\n\n# 初期値をinit_orgとして原系列を計算\ninit_org = 2\n\norg_list = []\norg_list.append(init_org)\n\nfor i in range(n):\n org_list.append(org_list[i]+diff_list[i])\n \ndf_org = pd.DataFrame({\"org\":org_list})\nplt.plot(df_org);plt.grid();plt.ylim([-5,5]);plt.show();\n\n\n\n\n\n\n\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\n\ndef linear3DModel(x1, x2, a, b1, b2):\n y = a + b1*x1 + b2*x2\n return y\n\n\ndef plotLinear3DModel(a, b1, b2):\n\n x1 = []\n x2 = []\n y = []\n for i in range(len(x_org)):\n for j in range(len(x_org)):\n x1.append(x_org[i])\n x2.append(x_org[j])\n y.append(linear3DModel(x_org[i], x_org[j], a, b1, b2))\n \n \n df = pd.DataFrame({\"y\":y, \"x1\":np.round(np.array(x1),1), \"x2\":np.round(np.array(x2),1)})\n \n df_pivot = pd.pivot_table(data=df , values='y', columns='x1', index='x2', aggfunc=np.mean)\n \n #sns.heatmap(df_pivot, annot=False, fmt=\"1.1f\", linewidths=.5, cmap=\"YlOrRd_r\", vmin=-4,vmax=4)\n sns.heatmap(df_pivot, annot=False, fmt=\"1.1f\", linewidths=.5, cmap=\"gist_yarg_r\", vmin=-4,vmax=4)\n plt.show()\n\n\nx_org = np.arange(-4,4,0.5)\n\na = 0\n\n\nb1 = -0.8\nb2 = -0.8\n\nplotLinear3DModel(a, b1, b2)\n\n\n\n","sub_path":"python/time_series_test.py","file_name":"time_series_test.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"612539846","text":"from unittest.mock import sentinel\n\nimport pytest\n\nfrom via.views.proxy import proxy\n\n\nclass TestProxy:\n def test_it(\n self, pyramid_request, get_url_details, via_client_service, url_from_user_input\n ):\n pyramid_request.path = \"/https://example.org\"\n\n result = proxy(pyramid_request)\n\n url_from_user_input.assert_called_with(\"https://example.org\")\n pyramid_request.checkmate.raise_if_blocked.assert_called_once_with(\n url_from_user_input.return_value\n )\n get_url_details.assert_called_once_with(url_from_user_input.return_value)\n via_client_service.url_for.assert_called_once_with(\n url_from_user_input.return_value, sentinel.mime_type, pyramid_request.params\n )\n assert result == {\"src\": via_client_service.url_for.return_value}\n\n @pytest.fixture(autouse=True)\n def get_url_details(self, patch):\n get_url_details = patch(\"via.views.proxy.get_url_details\")\n get_url_details.return_value = sentinel.mime_type, sentinel.status_code\n return get_url_details\n\n @pytest.fixture(autouse=True)\n def url_from_user_input(self, patch):\n return patch(\"via.views.proxy.url_from_user_input\")\n","sub_path":"tests/unit/via/views/proxy_test.py","file_name":"proxy_test.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352695463","text":"import pandas as pd\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.grid_search import GridSearchCV\n\n\ndef clean_data(df):\n # Cleans data in an input dataframe, df.\n\n # 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked' are all non-numeric. Of these, sex is probably the most\n # useful for predicting survival.\n\n # Assign the sex as 0 for male, 1 for female\n df.loc[df[\"Sex\"] == \"male\", \"Sex\"] = 0\n df.loc[df[\"Sex\"] == \"female\", \"Sex\"] = 1\n\n # There are some missing ages and fares, let's fill them with the medians as an approximation.\n df['Age'] = df['Age'].fillna(df['Age'].median())\n df['Fare'] = df['Fare'].fillna(df['Fare'].median())\n\n return df\n\n\ntitanic = pd.read_csv('train.csv')\n\ntitanic = clean_data(titanic)\n\n# Let's find out what actually effects a passengers survival\n# The columns we'll use to predict the target\npredictors = [\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\"]\n\n\nmodel = GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3)\n# Parameters to consider searching\nparam_grid = {\n 'kernel': ['linear', 'rbf']\n}\n\n#CV_rfc = GridSearchCV(estimator=model, param_grid=param_grid, cv= 5)\n#CV_rfc.fit(titanic[predictors], titanic[\"Survived\"])\n#print(CV_rfc.best_params_)\n\nkfold = KFold(n_splits=3, random_state=0)\nresults = cross_val_score(model, titanic[predictors], titanic['Survived'], cv=kfold)\n\n# results for CV on test data\nprint(results.mean())\n\n# generate predictions on the test_data\ntest_data = pd.read_csv(\"test.csv\")\n\ntest_data = clean_data(test_data)\n\n# Fit the model to the training data\nmodel.fit(titanic[predictors], titanic[\"Survived\"])\n\npredictions = model.predict(test_data[predictors])\n\nsubmission_df = pd.DataFrame({\n \"PassengerId\":test_data[\"PassengerId\"],\n \"Survived\":predictions\n })\n\n# Write the submission df to file so I can submit it to Kaggle for scoring.\nsubmission_df.to_csv(\"submission.csv\", index=False)\n","sub_path":"machine_learning/titanic/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"466311631","text":"import matplotlib.pyplot as plt\r\n\r\nfrom math import pi, sin, cos\r\n\r\nclass plotGrowth:\r\n def __init__(self, axiom , rule, iterations, turn_angle):\r\n self.toRadians = pi / 180\r\n self.rule = rule\r\n self.axiom = axiom\r\n self.iterations = iterations\r\n self.turn_angle = turn_angle\r\n\r\n def coords(self,rule):\r\n saved_states = list()\r\n state = (0, 0, 90)\r\n yield (0, 0)\r\n\r\n for command in rule:\r\n x, y, angle = state\r\n\r\n if command.lower() in 'abcdefghij': \r\n state = (x - cos(angle * self.toRadians),\r\n y + sin(angle * self.toRadians),\r\n angle)\r\n \r\n if command.islower(): \r\n yield (float('nan'), float('nan'))\r\n\r\n yield (state[0], state[1])\r\n\r\n elif command == '+': \r\n state = (x, y, angle + self.turn_angle)\r\n\r\n elif command == '-': \r\n state = (x, y, angle - self.turn_angle)\r\n\r\n elif command == '[': \r\n saved_states.append(state)\r\n\r\n elif command == ']': \r\n state = saved_states.pop()\r\n yield (float('nan'), float('nan'))\r\n x, y, _ = state\r\n yield (x, y)\r\n \r\n\r\n def transform(self):\r\n for _ in range(self.iterations):\r\n self.axiom = \"\".join(self.rule.get(i,i) for i in self.axiom)\r\n return self.axiom\r\n\r\n def get_cords(self):\r\n moves = self.transform()\r\n cor = self.coords(moves)\r\n x ,y = zip(*cor)\r\n return x,y\r\n\r\n#a = plotGrowth('F', {'F': 'FF[++F][-FF]'}, 6, 22) #bush1\r\n#a = plotGrowth(\"X\", {\"X\":\"F+[[X]-X]-F[-FX]+X\", \"F\":\"FF\"}, 6,20)\r\n#a = plotGrowth('A', {'F': 'FF', 'A': 'F[+AF-[A]--A][---A]'}, 6, 22.5) #1 stcik\r\n#a = plotGrowth('Y', {'X': ' X[-FFF][+FFF]FX', 'Y': ' YFX[+Y][-Y]'}, 8, 25.7) #1 bush2\r\n#a = plotGrowth('F', {'F': ' F[+FF][-FF]F[-F][+F]F'}, 5, 35) #1\r\n#a = plotGrowth('F', {\"F\":\"FF-[XY]+[XY]\",'X': '+FY', 'Y': '-FX'}, 7, 22.5) #0\r\n#a = plotGrowth('X', {\"F\":\"FF\",'X': 'F[+X]F[-X]+X'}, 10, 20) #1\r\n\r\n\"\"\"\r\na = plotGrowth('aF',\r\n {\"a\":\"FFFFFy[++++n][----t]fb\",\r\n 'b': '+FFFFFy[++++n][----t]fc',\r\n \"c\":\"FFFFFy[++++n][----t]fd\",\r\n \"d\":\"-FFFFFy[++++n][----t]fe\",\r\n \"e\":\"FFFFFy[++++n][----t]fg\",\r\n \"g\":\"FFFFFy[+++fa]fh\",\r\n \"h\":\"FFFFFy[++++n][----t]fi\",\r\n \"i\":\"+FFFFFy[++++n][----t]fj\",\r\n \"j\":\"FFFFFy[++++n][----t]fk\",\r\n \"k\":\"FFFFFy[++++n][----t]fl\",\r\n \"l\":\"FFFFFy[++++n][----t]fm\",\r\n \"m\":\"FFFFFy[---fa]fa\",\r\n \"n\":\"ofFFF\",\r\n \"o\":\"fFFFp\",\r\n \"p\":\"fFFF[-s]q\",\r\n \"q\":\"fFFF[-s]r\",\r\n \"r\":\"fFFF[-s]\",\r\n \"s\":\"fFfF\",\r\n \"t\":\"ufFFF\",\r\n \"u\":\"fFFFv\",\r\n \"v\":\"fFFF[+s]w\",\r\n \"w\":\"fFFF[+s]x\",\r\n \"x\":\"fFFF[+s]\",\r\n \"y\":\"Fy\",},\r\n 1, 12)\r\n\r\n\"\"\"\r\n##x,y = a.get_cords()\r\n#plt.plot(x,y)\r\n#plt.show()\r\n\r\n","sub_path":"growth/parentClass.py","file_name":"parentClass.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467800756","text":"# Future\nfrom __future__ import division, print_function, unicode_literals\n\n# Standard Library\nfrom builtins import str\n\n# Third Party\nimport pytest\n\n# DocumentCloud\nfrom documentcloud.annotations import Annotation\n\n\nclass TestAnnotation:\n def test_create_delete(self, document):\n assert len(document.notes.list().results) == 1\n note = document.notes.create(\n \"Test Note\", 0, \"

Note content!

\", x1=0.1, y1=0.1, x2=0.2, y2=0.2\n )\n assert len(document.notes.list().results) == 2\n for note in document.notes:\n assert isinstance(note, Annotation)\n note.delete()\n assert len(document.notes.list().results) == 1\n\n def test_create_page_note(self, document):\n note = document.notes.create(\"Test Note\", 0, \"

Page note!

\")\n assert note\n note.delete()\n\n def test_create_partial_coords(self, document):\n with pytest.raises(ValueError):\n document.notes.create(\"Test Note\", 0, \"

Page note!

\", x1=0.5)\n\n def test_create_invalid_coords(self, document):\n with pytest.raises(ValueError):\n document.notes.create(\n \"Test Note\", 0, \"

Page note!

\", x1=0.5, y1=1.5, x2=2, y2=3\n )\n\n def test_str(self, document):\n assert str(document.notes[0])\n\n def test_alias(self, document):\n assert document.notes is document.annotations\n\n def test_location(self, document):\n note = document.notes[0]\n assert note.location.top < note.location.bottom\n assert note.location.left < note.location.right\n","sub_path":"tests/test_annotations.py","file_name":"test_annotations.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"252352368","text":"class Solution:\n def findanagrams(self, s, p):\n n1 = len(s)\n n2 = len(p)\n if n1 == 0 or n1 < n2:\n return []\n res = []\n for i in range(n1-n2+1):\n sub_str = s[i:n2+i]\n if self.isanagrams(sub_str, p):\n res.append(i)\n return res\n\n def count_ord(self, s):\n count = 0\n for item in s:\n count += ord(item)\n return count\n\n def isanagrams(self, s, t):\n count_s = self.count_(s)\n count_t = self.count_(t)\n if count_s == count_t:\n return True\n\n def count_(self, s):\n dict_01 = {item: 0 for item in s}\n for char in s:\n if char in dict_01:\n dict_01[char] += 1\n return dict_01\n\n def findanagrams_01(self, s, p):\n n1 = len(s)\n n2 = len(p)\n if not n1 or n1 < n2:\n return []\n p_list = [0]*26\n s_list = [0]*26\n res = []\n for p1 in p:\n p_list[ord(p1)-97] += 1\n left = 0\n for right in range(n1):\n if right < len(p)-1:\n s_list[ord(s[right])-97] += 1\n continue\n s_list[ord(s[right])-97] += 1\n if p_list == s_list:\n res.append(left)\n s_list[ord(s[left])-97] -= 1\n left += 1\n return res\n\n","sub_path":"Week_02/find_all_anagrams_in_string.py","file_name":"find_all_anagrams_in_string.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"450885970","text":"from django.shortcuts import render\nfrom django.views.generic.list import ListView\nfrom .models import Student\n\n# Create your views here.\nclass StudentList(ListView):\n model = Student # This Single line will perform all below task automatically in Generic ListView:\n # student = Student.object.all()\n # context = {'student_list': student}\n # return render(request, 'App_ListView/student_list.html', context)\n################################## CUSTOMIZING Generic ListView #####################################################\n\n #give your own template name.\n template_name = 'App_ListView/student.html' # If this is not kept then we have to give default template name as 'student_list.html' \n # i.e(moedl_list.html) Model name in small. \n # '_list' is suffix for template in Generic ListView.\n # To give different name to template as compared to default template name we have given this \n #'template_name' attribute.\n\n # ordering = ['name'] #If you want to order all the records by it's name the we can use it as 'ordering'\n\n #give your own context name\n # context_object_name = 'mystudents' # provides our own context object name which can be used in templates.\n # If this is not used then our default context object name will be 'student_list' i.e(modelname_list)\n\n\n #Filter all records queryset as per your needs\n # def get_queryset(self):\n # return Student.objects.filter(course='PHP') \n\n # If you want to pass the context from Generic ListView \n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, *kwargs)\n context['order_by'] = Student.objects.all().order_by('name')\n return context\n\n # To render different templates depending upon the condition\n #def get_template_names(self): # django ko Generic ListView le provide gareko method ho\n # if self.request.COOKIES['user'] == 'sonam': # If 'cookie' ko 'user' vanni value ma name 'sonam' cha vani sonam.html template render huncha.\n # Cookie value and name has to be set manually by going into browser developer tool.\n # template_name = 'App_ListView/sonam.html' # template_name mathidefine gareko attributes ma sonam.html template haldeko.\n # else:\n # template_name = self.template_name # If cookie ma name sonam chaina vani default template load garauni\n # # i.e in lin no. 16 ('App_ListView/student.html)\n # return [template_name] # this function will return the list so 'template_name' is in type of list.\n\n \n # Another Real life example for 'get_template_name' method\n # def get_template_names(self):\n\n # if self.request.user.is_superuser: # If the user is logged in as 'superuser' then show the 'super.html' template\n # template_name = 'App_ListView/super.html'\n \n # elif self.request.user.is_staff: # If the user is logged in as 'staff' then show the 'staff.html' template\n # template_name = 'App_ListView/staff.html'\n\n # else: # If the user is logged in as normal user then show the 'student.html' template \n # # i.e in line no. 16 ('App_ListView/student.html)\n # template_name = self.template_name\n\n # return [template_name]\n\n\n\n\n\n\n\n\n","sub_path":"06.Generic_ListView/Generic_ListView/App_ListView/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"155842663","text":"\nimport iemdb\nimport mx.DateTime\nIEM = iemdb.connect('iem', bypass=True)\nicursor = IEM.cursor()\n\nicursor.execute(\"\"\"\n\tSELECT id, x(geom) as lon, y(geom) as lat, tmpf from current_log c JOIN stations s on (s.iemid = c.iemid)\n\tWHERE (network ~* 'ASOS' or network = 'AWOS') and country = 'US'\n\tand valid BETWEEN now() - '1500 minutes'::interval \n and now() - '1440 minutes'::interval\n and tmpf > 0 \n\t\"\"\")\ndata = {}\nfor row in icursor:\n\tdata[ row[0] ] = {'lon': row[1], 'lat': row[2], 'val': row[3] }\n\nicursor.execute(\"\"\"\n\tSELECT id, tmpf from current c JOIN stations s ON (s.iemid = c.iemid)\n\tWHERE (network ~* 'ASOS' or network = 'AWOS') and country = 'US' \n and state not in ('AK','HI')\n and tmpf > 0 and valid > now() - '60 minutes'::interval\n\t\"\"\")\n\nobs = []\nlats = []\nlons = []\nmask = []\nfor row in icursor:\n\tif not data.has_key(row[0]):\n\t\tcontinue\n\tv = (row[1] - data[row[0]]['val'])\n\tobs.append( v )\n\t#if v <= -24.:\n\t#\tmask.append( True )\n\t#else:\n\tmask.append( False )\n\tlats.append( data[row[0]]['lat'] )\n\tlons.append( data[row[0]]['lon'] )\n\nimport iemplot\n\ncfg = {\n\t'_conus' : True,\n\t'lbTitleString' : 'F',\n\t 'wkColorMap': 'BlAqGrYeOrRe',\n\t'_valuemask'\t: mask,\n\t'_showvalues'\t: True,\n\t'_format'\t\t: '%.0f',\n\t'_title'\t: '24 Hour Temperature Change',\n\t'_valid'\t: '24 Hour Period Ending %s' % (mx.DateTime.now().strftime(\"%d %b %Y %I %p\"),),\n}\ntmpfp = iemplot.simple_contour(lons, lats, obs, cfg)\niemplot.makefeature(tmpfp)\n","sub_path":"scripts/feature/24h_change.py","file_name":"24h_change.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487968641","text":"import json\nimport unittest\nfrom unittest.mock import call, MagicMock, patch\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom botocore.stub import Stubber, ANY\n\nfrom budget import app\n\n\n@patch.dict('budget.app.configuration', {'account_id': '012345678901'})\nclass TestCreateBudgets(unittest.TestCase):\n\n def test_create_budgets_no_new_users(self):\n no_new_users = []\n teams_by_user_id = {} # this doesn't matter if there are no new users\n result = app.create_budgets(no_new_users, teams_by_user_id)\n # if there are no new users, we expect a message indicating that no\n # new budgets were created\n expected = 'Budgets created for synapse ids: none'\n self.assertEqual(result, expected)\n\n\n @patch('budget.app.create_budget', MagicMock(return_value={}))\n @patch('budget.app.create_budget_notifications', MagicMock(return_value={}))\n @patch.dict('budget.app.TEAM_BUDGET_RULES', {'teams': {'12345': {}}})\n def test_create_budgets_some_users(self):\n new_users = ['3406211', '3388489']\n teams_by_user_id = {'3406211': ['12345'], '3388489': ['12345']}\n result = app.create_budgets(new_users, teams_by_user_id)\n # if there are new users, we expect a message that budgets were\n # created for each of them\n expected = 'Budgets created for synapse ids: 3406211, 3388489'\n self.assertEqual(result, expected)\n\n\n @patch.dict('budget.app.TEAM_BUDGET_RULES', {\n 'teams':{'12345': {'amount': '100','period': 'ANNUALLY'}}\n })\n def test_create_budget(self):\n synapse_id = '3388489'\n team = '12345'\n budgets_client = boto3.client('budgets')\n with Stubber(budgets_client) as stubber:\n app.get_client = MagicMock(return_value=budgets_client)\n expected_params = {\n 'AccountId': '012345678901',\n 'Budget': {\n 'BudgetName': 'service-catalog_3388489',\n 'BudgetLimit': {\n 'Amount': '100',\n 'Unit': 'USD'\n },\n 'CostFilters': {\n 'TagKeyValue': [\n (\n 'aws:servicecatalog:provisioningPrincipalArn$arn:aws:sts::'\n '012345678901:assumed-role/ServiceCatalogEndusers/3388489'\n )\n ]\n },\n 'CostTypes': {\n 'IncludeRefund': False,\n 'IncludeCredit': False\n },\n 'TimeUnit': 'ANNUALLY',\n 'BudgetType': 'COST'\n }\n }\n # verify that the boto3 client will be called with the expected values\n stubber.add_response('create_budget', {}, expected_params)\n result = app.create_budget(synapse_id, team)\n expected = {}\n self.assertEqual(result, expected)\n\n\n def test_create_budget_no_team_rules(self):\n synapse_id = '3388489'\n team = 'foo'\n with self.assertRaises(ValueError) as context_manager:\n app.create_budget(synapse_id, team)\n expected_error = 'No budget rules available for team foo'\n self.assertEqual(str(context_manager.exception), expected_error)\n\n\n @patch.dict('budget.app.TEAM_BUDGET_RULES', {\n 'teams': {\n '12345': {\n 'amount': '100',\n 'period': 'ANNUALLY',\n 'community_manager_emails': []\n }\n }\n })\n def test_create_budget_notifications_makes_expected_call_types(self):\n synapse_id = '3388489'\n team = '12345'\n with patch('budget.app._create_budget_notification') as mock:\n app.create_budget_notifications(synapse_id, team)\n expected = [\n call('3388489', 25.0),\n call('3388489', 50.0),\n call('3388489', 80.0),\n call('3388489', 90.0, admin_emails=[]),\n call('3388489', 100.0, admin_emails=[]),\n call('3388489', 110.0, admin_emails=[])\n ]\n self.assertCountEqual(mock.mock_calls, expected)\n\n\n def test_create_budget_notification_user_only(self):\n budgets_client = boto3.client('budgets')\n fake_topic_arn = 'arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE'\n with Stubber(budgets_client) as stubber, \\\n patch.dict('budget.app.configuration',\n {'notification_topic_arn': fake_topic_arn}):\n app.get_client = MagicMock(return_value=budgets_client)\n # user only\n expected_params = {\n 'AccountId': '012345678901',\n 'BudgetName': 'service-catalog_3388489',\n 'Notification': {\n 'NotificationType': 'ACTUAL',\n 'ComparisonOperator': 'GREATER_THAN',\n 'Threshold': 25.0,\n 'ThresholdType': 'PERCENTAGE',\n 'NotificationState': 'ALARM'\n },\n 'Subscribers': [{\n 'SubscriptionType': 'SNS',\n 'Address': fake_topic_arn\n }]\n }\n # verify that the boto3 client will be called with the expected values\n stubber.add_response('create_notification', {}, expected_params)\n synapse_id = '3388489'\n threshold = 25.0\n result = app._create_budget_notification(synapse_id, threshold)\n expected = {}\n self.assertEqual(result, expected)\n # now with admins\n fake_admin_email = 'jane.doe@sagebase.org'\n expected_params['Subscribers'].insert(0, {\n 'SubscriptionType': 'EMAIL',\n 'Address': fake_admin_email\n })\n stubber.add_response('create_notification', {}, expected_params)\n result = app._create_budget_notification(synapse_id, threshold, admin_emails=[fake_admin_email])\n expected = {}\n self.assertEqual(result, expected)\n","sub_path":"tests/unit/test_create_budgets.py","file_name":"test_create_budgets.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312121175","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport sys\nimport gc\nimport time\nimport os\nimport shutil\nimport uuid\nimport tempfile\nimport tarfile\nimport copy\nfrom threading import Thread\nfrom termcolor import colored\n\nfrom . import EmbeddingFeedForward, EmbeddingCNN2D5C, EmbeddingCNN3D4C, ProposalNormalNormalMixture, ProposalUniformTruncatedNormalMixture, ProposalCategoricalCategorical, ProposalPoissonTruncatedNormalMixture\nfrom .. import __version__, util, ObserveEmbedding\nfrom ..distributions import Normal, Uniform, Categorical, Poisson\n\n\nclass InferenceNetworkFeedForward(nn.Module):\n # observe_embeddings example: {'obs1': {'embedding':ObserveEmbedding.FEEDFORWARD, 'reshape': [10, 10], 'dim': 32, 'depth': 2}}\n def __init__(self, model, valid_size=64, observe_embeddings={}):\n super().__init__()\n self._model = model\n self._layer_proposal = nn.ModuleDict()\n self._layer_observe_embedding = nn.ModuleDict()\n self._layer_observe_embedding_final = None\n self._layer_hidden_shape = None\n self._infer_observe = None\n self._infer_observe_embedding = {}\n self._optimizer = None\n\n self._total_train_seconds = 0\n self._total_train_traces = 0\n self._total_train_iterations = 0\n self._loss_initial = None\n self._loss_min = float('inf')\n self._loss_max = None\n self._loss_previous = float('inf')\n self._history_train_loss = []\n self._history_train_loss_trace = []\n self._history_valid_loss = []\n self._history_valid_loss_trace = []\n self._history_num_params = []\n self._history_num_params_trace = []\n self._modified = util.get_time_str()\n self._updates = 0\n self._on_cuda = False\n self._device = torch.device('cpu')\n\n self._valid_size = valid_size\n self._observe_embeddings = observe_embeddings\n self._valid_batch = None\n\n def _init_layer_observe_embeddings(self, observe_embeddings):\n if len(observe_embeddings) == 0:\n raise ValueError('At least one observe embedding is needed to initialize inference network.')\n observe_embedding_total_dim = 0\n for name, value in observe_embeddings.items():\n distribution = self._valid_batch.traces[0].named_variables[name].distribution\n if distribution is None:\n raise ValueError('Observable {}: cannot use this observation as an input to the inference network, because there is no associated likelihood.'.format(name))\n else:\n if 'reshape' in value:\n input_shape = torch.Size(value['reshape'])\n else:\n input_shape = distribution.sample().size()\n if 'dim' in value:\n output_shape = torch.Size([value['dim']])\n else:\n print('Observable {}: embedding dim not specified, using the default 256.'.format(name))\n output_shape = torch.Size([256])\n if 'embedding' in value:\n embedding = value['embedding']\n else:\n print('Observable {}: observe embedding not specified, using the default FEEDFORWARD.'.format(name))\n embedding = ObserveEmbedding.FEEDFORWARD\n if embedding == ObserveEmbedding.FEEDFORWARD:\n if 'depth' in value:\n depth = value['depth']\n else:\n print('Observable {}: embedding depth not specified, using the default 2.'.format(name))\n depth = 2\n layer = EmbeddingFeedForward(input_shape=input_shape, output_shape=output_shape, num_layers=depth)\n elif embedding == ObserveEmbedding.CNN2D5C:\n layer = EmbeddingCNN2D5C(input_shape=input_shape, output_shape=output_shape)\n elif embedding == ObserveEmbedding.CNN3D4C:\n layer = EmbeddingCNN3D4C(input_shape=input_shape, output_shape=output_shape)\n else:\n raise ValueError('Unknown embedding: {}'.format(embedding))\n layer.to(device=util._device)\n self._layer_observe_embedding[name] = layer\n observe_embedding_total_dim += util.prod(output_shape)\n self._layer_hidden_shape = torch.Size([observe_embedding_total_dim])\n self._layer_observe_embedding_final = EmbeddingFeedForward(input_shape=self._layer_hidden_shape, output_shape=self._layer_hidden_shape, num_layers=1)\n self._layer_observe_embedding_final.to(device=util._device)\n\n def _save(self, file_name):\n self._modified = util.get_time_str()\n self._updates += 1\n\n data = {}\n data['pyprob_version'] = __version__\n data['torch_version'] = torch.__version__\n # The following is due to a temporary hack related with https://github.com/pytorch/pytorch/issues/9981 and can be deprecated by using dill as pickler with torch > 0.4.1\n data['inference_network'] = copy.copy(self)\n data['inference_network']._model = None\n data['inference_network']._optimizer = None\n\n def thread_save():\n tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))\n tmp_file_name = os.path.join(tmp_dir, 'pyprob_inference_network')\n torch.save(data, tmp_file_name)\n tar = tarfile.open(file_name, 'w:gz', compresslevel=2)\n tar.add(tmp_file_name, arcname='pyprob_inference_network')\n tar.close()\n shutil.rmtree(tmp_dir)\n t = Thread(target=thread_save)\n t.start()\n t.join()\n\n @staticmethod\n def _load(file_name):\n try:\n tar = tarfile.open(file_name, 'r:gz')\n tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))\n tmp_file = os.path.join(tmp_dir, 'pyprob_inference_network')\n tar.extract('pyprob_inference_network', tmp_dir)\n tar.close()\n if util._cuda_enabled:\n data = torch.load(tmp_file)\n else:\n data = torch.load(tmp_file, map_location=lambda storage, loc: storage)\n shutil.rmtree(tmp_dir)\n except:\n raise RuntimeError('Cannot load inference network.')\n\n if data['pyprob_version'] != __version__:\n print(colored('Warning: different pyprob versions (loaded network: {}, current system: {})'.format(data['pyprob_version'], __version__), 'red', attrs=['bold']))\n if data['torch_version'] != torch.__version__:\n print(colored('Warning: different PyTorch versions (loaded network: {}, current system: {})'.format(data['torch_version'], torch.__version__), 'red', attrs=['bold']))\n\n ret = data['inference_network']\n if util._cuda_enabled:\n if ret._on_cuda:\n if ret._device != util._device:\n print(colored('Warning: loading CUDA (device {}) network to CUDA (device {})'.format(ret._device, util._device), 'red', attrs=['bold']))\n else:\n print(colored('Warning: loading CPU network to CUDA (device {})'.format(util._device), 'red', attrs=['bold']))\n else:\n if ret._on_cuda:\n print(colored('Warning: loading CUDA (device {}) network to CPU'.format(ret._device), 'red', attrs=['bold']))\n ret.to(device=util._device)\n return ret\n\n def to(self, device=None, *args, **kwargs):\n self._device = device\n self._on_cuda = 'cuda' in str(device)\n super().to(device=device, *args, *kwargs)\n\n def _embed_observe(self, traces=None):\n embedding = []\n for name, layer in self._layer_observe_embedding.items():\n values = torch.stack([util.to_tensor(trace.named_variables[name].value) for trace in traces]).view(len(traces), -1)\n embedding.append(layer(values))\n embedding = torch.cat(embedding, dim=1)\n embedding = self._layer_observe_embedding_final(embedding)\n return embedding\n\n def infer_trace_init(self, observe=None):\n self._infer_observe = observe\n embedding = []\n for name, layer in self._layer_observe_embedding.items():\n value = util.to_tensor(observe[name]).view(1, -1)\n embedding.append(layer(value))\n embedding = torch.cat(embedding, dim=1)\n self._infer_observe_embedding = self._layer_observe_embedding_final(embedding)\n\n def infer_trace_step(self, variable, previous_variable=None):\n success = True\n address = variable.address\n distribution = variable.distribution\n if address not in self._layer_proposal:\n print('Warning: no proposal layer for: {}'.format(address))\n success = False\n\n if success:\n proposal_distribution = self._layer_proposal[address].forward(self._infer_observe_embedding, [variable])\n return proposal_distribution\n else:\n print('Warning: no proposal can be made, prior will be used.')\n return distribution\n\n def _polymorph(self, batch):\n layers_changed = False\n for sub_batch in batch.sub_batches:\n example_trace = sub_batch[0]\n for variable in example_trace.variables_controlled:\n address = variable.address\n distribution = variable.distribution\n variable_shape = variable.value.shape\n if address not in self._layer_proposal:\n print('New proposal layer for address: {}'.format(util.truncate_str(address)))\n if isinstance(distribution, Normal):\n layer = ProposalNormalNormalMixture(self._layer_hidden_shape, variable_shape)\n elif isinstance(distribution, Uniform):\n layer = ProposalUniformTruncatedNormalMixture(self._layer_hidden_shape, variable_shape)\n elif isinstance(distribution, Poisson):\n layer = ProposalPoissonTruncatedNormalMixture(self._layer_hidden_shape, variable_shape)\n elif isinstance(distribution, Categorical):\n layer = ProposalCategoricalCategorical(self._layer_hidden_shape, distribution.num_categories)\n else:\n raise RuntimeError('Distribution currently unsupported: {}'.format(distribution.name))\n layer.to(device=util._device)\n self._layer_proposal[address] = layer\n layers_changed = True\n if layers_changed:\n num_params = sum(p.numel() for p in self.parameters())\n print('Total number of parameters: {:,}'.format(num_params))\n self._history_num_params.append(num_params)\n self._history_num_params_trace.append(self._total_train_traces)\n return layers_changed\n\n def _loss(self, batch):\n gc.collect()\n batch_loss = 0\n for sub_batch in batch.sub_batches:\n example_trace = sub_batch[0]\n observe_embedding = self._embed_observe(sub_batch)\n sub_batch_loss = 0.\n for time_step in range(example_trace.length_controlled):\n address = example_trace.variables_controlled[time_step].address\n variables = [trace.variables_controlled[time_step] for trace in sub_batch]\n values = torch.stack([v.value for v in variables])\n proposal_distribution = self._layer_proposal[address].forward(observe_embedding, variables)\n log_prob = proposal_distribution.log_prob(values)\n if util.has_nan_or_inf(log_prob):\n print(colored('Warning: NaN, -Inf, or Inf encountered in proposal log_prob.', 'red', attrs=['bold']))\n print('proposal_distribution', proposal_distribution)\n print('values', values)\n print('log_prob', log_prob)\n print('Fixing -Inf')\n log_prob = util.replace_negative_inf(log_prob)\n print('log_prob', log_prob)\n if util.has_nan_or_inf(log_prob):\n print(colored('Nan or Inf present in proposal log_prob.', 'red', attrs=['bold']))\n return False, 0\n sub_batch_loss += -torch.sum(log_prob)\n batch_loss += sub_batch_loss\n return True, batch_loss / batch.length\n\n def optimize(self, num_traces, batch_generator, batch_size=64, valid_interval=1000, learning_rate=0.0001, weight_decay=1e-5, auto_save_file_name_prefix=None, auto_save_interval_sec=600, *args, **kwargs):\n if self._valid_batch is None:\n print('Initializing inference network...')\n self._valid_batch = batch_generator.get_batch(self._valid_size, discard_source=True)\n self._init_layer_observe_embeddings(self._observe_embeddings)\n self._polymorph(self._valid_batch)\n\n prev_total_train_seconds = self._total_train_seconds\n time_start = time.time()\n time_loss_min = time.time()\n time_last_batch = time.time()\n last_validation_trace = -valid_interval + 1\n iteration = 0\n trace = 0\n stop = False\n print('Train. time | Trace | Init. loss| Min. loss | Curr. loss| T.since min | Traces/sec')\n max_print_line_len = 0\n loss_min_str = ''\n time_since_loss_min_str = ''\n last_auto_save_time = time.time() - auto_save_interval_sec\n while not stop:\n iteration += 1\n batch = batch_generator.get_batch(batch_size)\n layers_changed = self._polymorph(batch)\n\n if (self._optimizer is None) or layers_changed:\n self._optimizer = optim.Adam(self.parameters(), lr=learning_rate, weight_decay=weight_decay)\n\n self._optimizer.zero_grad()\n success, loss = self._loss(batch)\n if not success:\n print(colored('Cannot compute loss, skipping batch. Loss: {}'.format(loss), 'red', attrs=['bold']))\n else:\n loss.backward()\n self._optimizer.step()\n loss = float(loss)\n\n if self._loss_initial is None:\n self._loss_initial = loss\n self._loss_max = loss\n loss_initial_str = '{:+.2e}'.format(self._loss_initial)\n # loss_max_str = '{:+.3e}'.format(self._loss_max)\n if loss < self._loss_min:\n self._loss_min = loss\n loss_str = colored('{:+.2e}'.format(loss), 'green', attrs=['bold'])\n loss_min_str = colored('{:+.2e}'.format(self._loss_min), 'green', attrs=['bold'])\n time_loss_min = time.time()\n time_since_loss_min_str = colored(util.days_hours_mins_secs_str(0), 'green', attrs=['bold'])\n elif loss > self._loss_max:\n self._loss_max = loss\n loss_str = colored('{:+.2e}'.format(loss), 'red', attrs=['bold'])\n # loss_max_str = colored('{:+.3e}'.format(self._loss_max), 'red', attrs=['bold'])\n else:\n if loss < self._loss_previous:\n loss_str = colored('{:+.2e}'.format(loss), 'green')\n elif loss > self._loss_previous:\n loss_str = colored('{:+.2e}'.format(loss), 'red')\n else:\n loss_str = '{:+.2e}'.format(loss)\n loss_min_str = '{:+.2e}'.format(self._loss_min)\n # loss_max_str = '{:+.3e}'.format(self._loss_max)\n time_since_loss_min_str = util.days_hours_mins_secs_str(time.time() - time_loss_min)\n\n self._loss_previous = loss\n self._total_train_iterations += 1\n trace += batch.length\n self._total_train_traces += batch.length\n total_training_traces_str = '{:9}'.format('{:,}'.format(self._total_train_traces))\n self._total_train_seconds = prev_total_train_seconds + (time.time() - time_start)\n total_training_seconds_str = util.days_hours_mins_secs_str(self._total_train_seconds)\n traces_per_second_str = '{:,.1f}'.format(int(batch.length / (time.time() - time_last_batch)))\n time_last_batch = time.time()\n if num_traces is not None:\n if trace >= num_traces:\n stop = True\n\n self._history_train_loss.append(loss)\n self._history_train_loss_trace.append(self._total_train_traces)\n if trace - last_validation_trace > valid_interval:\n print('\\rComputing validation loss...', end='\\r')\n with torch.no_grad():\n _, valid_loss = self._loss(self._valid_batch)\n valid_loss = float(valid_loss)\n self._history_valid_loss.append(valid_loss)\n self._history_valid_loss_trace.append(self._total_train_traces)\n last_validation_trace = trace - 1\n\n if auto_save_file_name_prefix is not None:\n if time.time() - last_auto_save_time > auto_save_interval_sec:\n last_auto_save_time = time.time()\n file_name = '{}_{}.network'.format(auto_save_file_name_prefix, util.get_time_stamp())\n print('\\rSaving to disk...', end='\\r')\n self._save(file_name)\n\n print_line = '{} | {} | {} | {} | {} | {} | {}'.format(total_training_seconds_str, total_training_traces_str, loss_initial_str, loss_min_str, loss_str, time_since_loss_min_str, traces_per_second_str)\n max_print_line_len = max(len(print_line), max_print_line_len)\n print(print_line.ljust(max_print_line_len), end='\\r')\n sys.stdout.flush()\n print()\n","sub_path":"pyprob/nn/inference_network_feedforward.py","file_name":"inference_network_feedforward.py","file_ext":"py","file_size_in_byte":17995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321357858","text":"#!/usr/bin/python\n#\n# Copyright 2015 John Kendrick\n#\n# This file is part of PDielec\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# You should have received a copy of the MIT License\n# along with this program, if not see https://opensource.org/licenses/MIT\n#\n\"\"\"Read the contents of a QE output file containing QE dynmical matrix\"\"\"\nimport re\nimport math\nimport numpy as np\nfrom Python.Constants import amu, angs2bohr, hartree2ev\nfrom Python.UnitCell import UnitCell\nfrom Python.GenericOutputReader import GenericOutputReader\n\n\nclass QEOutputReader(GenericOutputReader):\n \"\"\"Read the contents of a QE output file containing QE dynmical matrix\"\"\"\n\n def __init__(self, filenames):\n GenericOutputReader.__init__(self, filenames)\n self.type = 'QE output'\n self._alat = None\n return\n\n def _read_output_files(self):\n \"\"\"Read the QE file names\"\"\"\n # Define the search keys to be looked for in the files\n self.manage = {} # Empty the dictionary matching phrases\n self.manage['header'] = (re.compile('Dynamical matrix file'), self._read_header)\n self.manage['lattice'] = (re.compile('Basis vectors'), self._read_lattice_vectors)\n self.manage['lattice2'] = (re.compile('cubic'), self._read_lattice_vectors)\n self.manage['lattice3'] = (re.compile('^ *crystal axes:'), self._read_crystal_axes)\n self.manage['positions'] = (re.compile('ATOMIC_POSITIONS'), self._read_atomic_positions)\n self.manage['dynamical'] = (re.compile(' *Dynamical Matrix in c'), self._read_dynamical)\n self.manage['epsilon'] = (re.compile(' *Dielectric Tensor:'), self._read_epsilon)\n self.manage['charges'] = (re.compile(' *Effective Charges E-U:'), self._read_born_charges)\n self.manage['energy_cutoff'] = (re.compile(' *kinetic-energy cutoff'), self._read_energy_cutoff)\n self.manage['kpoints'] = (re.compile(' *number of k points'), self._read_kpoints)\n self.manage['kpoint_grid'] = (re.compile('K_POINTS automatic'), self._read_kpoint_grid)\n self.manage['electrons'] = (re.compile('^ *number of electrons'), self._read_electrons)\n self.manage['energy'] = (re.compile('^ *total energy *='), self._read_energy)\n self.manage['alat'] = (re.compile('^ *lattice parameter'), self._read_alat)\n self.manage['pressure'] = (re.compile('^ *total *stress *.Ry'), self._read_pressure)\n self.manage['nions'] = (re.compile('^ *number of atoms/cell'), self._read_nions)\n for f in self._outputfiles:\n self._read_output_file(f)\n return\n\n def _read_nions(self, line):\n self.nions = int(line.split()[4])\n\n def _read_pressure(self, line):\n self.pressure = float(line.split()[5])/10.0\n\n def _read_alat(self, line):\n t = float(line.split()[4])\n if abs(t - angs2bohr) < 1.0e-4:\n t = angs2bohr\n # There are rounding errors when reading from the log file\n # So only read if there is no alternative\n if self._alat is None:\n self._alat = t\n\n def _read_electrons(self, line):\n self.electrons = float(line.split()[4])\n\n def _read_energy(self, line):\n self.final_energy_without_entropy = float(line.split()[3]) * hartree2ev / 2.0\n self.final_free_energy = float(line.split()[3]) * hartree2ev / 2.0\n\n def _read_energy_cutoff(self, line):\n self.energy_cutoff = float(line.split()[3]) * hartree2ev / 2.0\n\n def _read_kpoints(self, line):\n self.kpoints = int(line.split()[4])\n\n def _read_kpoint_grid(self, line):\n line = self.file_descriptor.readline()\n self.kpoint_grid = [ float(f) for f in line.split()[0:3] ]\n\n def _read_header(self, line):\n line = self.file_descriptor.readline()\n line = self.file_descriptor.readline()\n self.nspecies = int(line.split()[0])\n self.nions = int(line.split()[1])\n t = float(line.split()[3])\n if abs(t - angs2bohr) < 1.0e-4:\n t = angs2bohr\n self._alat = t\n\n def _read_epsilon(self, line):\n self.file_descriptor.readline()\n linea = self.file_descriptor.readline().split()\n self.zerof_optical_dielectric = []\n self.zerof_optical_dielectric.append([float(f) for f in linea[0:3]])\n linea = self.file_descriptor.readline().split()\n self.zerof_optical_dielectric.append([float(f) for f in linea[0:3]])\n linea = self.file_descriptor.readline().split()\n self.zerof_optical_dielectric.append([float(f) for f in linea[0:3]])\n return\n\n def _read_masses(self):\n self.masses_per_type = []\n self.species = []\n for i in range(self.nspecies):\n linea = self.file_descriptor.readline().replace('\\'', '').split()\n self.species.append(linea[1].capitalize())\n # The factor of two is because au in pwscf are half mass of electron\n self.masses_per_type.append(float(linea[2])*2/amu)\n self._read_fractional_coordinates()\n return\n\n def _read_dynamical(self, line):\n nmodes = self.nions*3\n hessian = np.zeros((nmodes, nmodes))\n self.file_descriptor.readline()\n linea = self.file_descriptor.readline().split()\n # We only want to read the hessian at gamma\n q = [float(q) for q in linea[3:6]]\n qsum = q[0]*q[0] + q[1]*q[1] + q[2]*q[2]\n if qsum > 0.0001:\n return\n # We read the hessian and store the mass weighted matrix\n linea = self.file_descriptor.readline().split()\n for a in range(self.nions):\n for b in range(self.nions):\n self.file_descriptor.readline()\n for ixyz in range(3):\n ipos = a*3 + ixyz\n linea = self.file_descriptor.readline().split()\n for jxyz in range(3):\n jpos = b*3 + jxyz\n # factor of 0.5 'cos of au units in pwscf\n hessian[ipos, jpos] = 0.5*float(linea[2*jxyz])/(amu*math.sqrt(self.masses[a]*self.masses[b]))\n # end for jxyz\n # end for ixyz\n # end for b\n # end for a\n self._dynamical_matrix(hessian)\n\n def _read_born_charges(self, line):\n self.born_charges = []\n line = self.file_descriptor.readline()\n for i in range(self.nions):\n b = []\n line = self.file_descriptor.readline()\n line = self.file_descriptor.readline()\n b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])\n line = self.file_descriptor.readline()\n b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])\n line = self.file_descriptor.readline()\n b.append([float(line.split()[0]), float(line.split()[1]), float(line.split()[2])])\n self.born_charges.append(b)\n return\n\n def _read_crystal_axes(self, line):\n linea = self.file_descriptor.readline().split()\n avector = [float(f)*self._alat/angs2bohr for f in linea[3:6]]\n linea = self.file_descriptor.readline().split()\n bvector = [float(f)*self._alat/angs2bohr for f in linea[3:6]]\n linea = self.file_descriptor.readline().split()\n cvector = [float(f)*self._alat/angs2bohr for f in linea[3:6]]\n self.unit_cells.append(UnitCell(avector, bvector, cvector))\n self.ncells = len(self.unit_cells)\n self.volume = self.unit_cells[-1].volume\n return\n\n def _read_lattice_vectors(self, line):\n linea = self.file_descriptor.readline().split()\n avector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]\n linea = self.file_descriptor.readline().split()\n bvector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]\n linea = self.file_descriptor.readline().split()\n cvector = [float(f)*self._alat/angs2bohr for f in linea[0:3]]\n self.unit_cells.append(UnitCell(avector, bvector, cvector))\n self.ncells = len(self.unit_cells)\n self.volume = self.unit_cells[-1].volume\n self._read_masses()\n return\n\n def _read_atomic_positions(self,line):\n # This is a nasty fix for a problem I cant get to the bottom of\n # The coordinates stored in the dynamical matrix file do not seem to be fractional coordinates\n # So I am reading the fractional coordinates here - from the log file\n self.fractional_coordinates = []\n species_list = []\n for i in range(self.nions):\n linea = self.file_descriptor.readline().split()\n self.fractional_coordinates.append([float(linea[1]), float(linea[2]), float(linea[3])])\n \n\n def _read_fractional_coordinates(self):\n self.masses = []\n self.atom_type_list = []\n self.ions_per_type = [ 0 for i in range(self.nspecies) ]\n species_list = []\n self.xyz_coordinates = []\n # It took a long time to work out that alat is in bohr\n const = self._alat/angs2bohr\n for i in range(self.nions):\n linea = self.file_descriptor.readline().split()\n species_index = int(linea[1])\n self.xyz_coordinates.append([const*float(linea[2]), const*float(linea[3]), const*float(linea[4])])\n self.masses.append(self.masses_per_type[species_index-1])\n self.atom_type_list.append(species_index-1)\n self.ions_per_type[species_index-1] += 1\n species_list.append(self.species[species_index-1])\n self.unit_cells[-1].set_xyz_coordinates(self.xyz_coordinates)\n self.unit_cells[-1].set_element_names(species_list)\n return\n","sub_path":"Python/QEOutputReader.py","file_name":"QEOutputReader.py","file_ext":"py","file_size_in_byte":10033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321456304","text":"\nimport tensorflow as tf\n\nfrom model_deploy import Deploy\nimport tensorflow.contrib.slim as slim\nimport get_inputs\n\ndef _create_losses(input_queue,train_config,model_fn=None):\n dectect_model = model_fn()\n batch_size = train_config[\"batch_size\"]\n input_dict = input_queue.dequeue()\n\n width = []\n height = []\n images = []\n filename = []\n groundtruth_box = []\n groundtruth_class = []\n preprocess_images = []\n preprocess_box = []\n for i in range(batch_size):\n width.append(input_dict[i][\"width\"])\n height.append(input_dict[i][\"height\"])\n images.append(input_dict[i][\"image\"])\n groundtruth_box.append(input_dict[i][\"bbox\"])\n groundtruth_class.append(input_dict[i][\"label\"])\n filename.append(input_dict[i][\"filename\"])\n\n for image in images:\n resize_images = dectect_model.preprocess(image)\n preprocess_images.append(resize_images)\n\n images = tf.concat(preprocess_images, 0)\n\n for indx,box in enumerate(groundtruth_box):\n resize_box = dectect_model.process_gtbox(box,train_config[\"resize\"],width[indx],height[indx])\n preprocess_box.append(resize_box)\n\n\n\n #prediction_dict = dectect_model._predictor(images)\n\n\n dectect_model.provide_groundtruth(preprocess_box, groundtruth_class, filename, width, height)\n\n prediction_dict = dectect_model._predictor(images)\n\n loss = dectect_model._loss(prediction_dict)\n\n for loss_tensor in loss.values():\n tf.losses.add_loss(loss_tensor)\n\n return loss\n\n\ndef train(create_input_dict_fn,\n create_model_fn,\n train_config,\n train_dir,\n task,\n num_clones,\n worker_replicas,\n clone_on_cpu,\n ps_tasks,\n worker_job_name,\n is_chief,):\n\n #模型实例先留着,后面预训练模型时再用\n detection_model = create_model_fn()\n\n with tf.Graph().as_default():\n #配置类\n deploy_config = Deploy.DeploymentConfig(\n num_clones=num_clones,clone_on_cpu=clone_on_cpu,\n replica_id=task,num_replicas=worker_replicas,\n num_ps_tasks=ps_tasks,\n worker_job_name=worker_job_name)\n\n with tf.device(deploy_config.variables_device()):\n global_step = slim.create_global_step()\n\n batch_size = train_config[\"batch_size\"] // num_clones\n\n with tf.device(deploy_config.inputs_device()):\n #从tfrecord读数据,组成batch,生成样本队列\n input_queue =get_inputs.read_and_transform_dataset(per_clone_batch_size=batch_size,\n create_tensor_dict_fn=create_input_dict_fn)\n _create_losses(input_queue,train_config,model_fn=create_model_fn)\n'''\ndef match(gtbox, gtclass, anchors):\n\n batch_cls_target = []\n batch_loc_target = []\n match_list = []\n unmatch_list = []\n batch_iou_max = []\n\n iou_matrix = Iou_interface(anchors, gtbox)\n for i, sclae_iou in enumerate(iou_matrix):\n sclae_match = []\n sclae_unmatch = []\n sclae_cls_target = []\n sclae_loc_target = []\n iou_max = []\n for j, iou in enumerate(sclae_iou):\n tf.summary.histogram(\"scale_iou\", iou)\n\n column_max_indx = tf.argmax(iou, axis=0) # take the per column max value index,index value between[0~M]\n\n tf.summary.histogram(\"indx\", column_max_indx)\n column_max_values = tf.reduce_max(iou, axis=0) # take the per column max value\n\n tf.summary.histogram(\"values\", column_max_values)\n iou_max.append(tf.expand_dims(column_max_values, axis=0))\n # Less than or equal to than threshold position is false,more than the threshold position is true,\n # The false position is no target anchors,true position is targeted anchors\n object_mask = tf.less_equal(0.5, column_max_values)\n tf.summary.histogram(\"obj_mask\", tf.cast(object_mask, dtype=tf.float32))\n sclae_match.append(tf.expand_dims(object_mask, axis=0))\n noobj_mask = 1.0 - tf.cast(object_mask, tf.float32)\n sclae_unmatch.append(tf.expand_dims(noobj_mask, axis=0))\n # gtbox [M,4] column_max_indx's length is [N],broadcast gtbox to [N,4]\n new_box = tf.gather(gtbox[j], column_max_indx)\n sclae_loc_target.append(tf.expand_dims(new_box, axis=0))\n new_class = tf.gather(gtclass[j], column_max_indx)\n sclae_cls_target.append(tf.expand_dims(new_class, axis=0))\n batch_loc_target.append(tf.concat(sclae_loc_target, axis=0))\n batch_cls_target.append(tf.concat(sclae_cls_target, axis=0))\n match_list.append(tf.concat(sclae_match, axis=0))\n unmatch_list.append(tf.concat(sclae_unmatch, axis=0))\n batch_iou_max.append(tf.concat(iou_max, axis=0))\n return batch_cls_target,batch_loc_target,match_list,unmatch_list,batch_iou_max\n\ndef Iou(gtbox,bbox):\n\n area1 = area(bbox)\n area2 = area(gtbox)\n xmin1, ymin1, xmax1, ymax1 = tf.split(gtbox, num_or_size_splits=4, axis=1)\n xmin2, ymin2, xmax2, ymax2 = tf.split(bbox, num_or_size_splits=4, axis=1)\n all_pairs_min_ymax = tf.minimum(ymax1, tf.transpose(ymax2, [1, 0]))\n all_pairs_max_ymin = tf.maximum(ymin1, tf.transpose(ymin2, [1, 0]))\n intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)\n all_pairs_min_xmax = tf.minimum(xmax1, tf.transpose(xmax2, [1, 0]))\n all_pairs_max_xmin = tf.maximum(xmin1, tf.transpose(xmin2, [1, 0]))\n intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)\n innersize = tf.multiply(intersect_heights, intersect_widths)\n uion = tf.transpose(area1, [1, 0]) + area2 - innersize\n iou = tf.where(tf.equal(innersize, 0), tf.zeros_like(innersize),\n tf.truediv(innersize, uion), name=\"iou\")\n\n return iou\n\ndef Iou_interface(bbox,gtbox,name=\"Iou\"):\n #gtbox (N,4)\n # bbox (13,13,3,4),(26,26,3,4),(52,52,3,4) if not tiny_yolo else (13,13,3,4),(26,26,3,4)\n # reshape (507,4) (2028,4)\n all_iou_matrix = []\n with tf.name_scope(name):\n if name==\"Iou\":\n for i,t_bbox in enumerate(bbox):\n iou_matrix = []\n t_bbox = tf.reshape(t_bbox,(-1,4))\n for indx,g_box in enumerate(gtbox):\n iou = Iou(g_box,t_bbox)\n iou_matrix.append(iou)\n all_iou_matrix.append(iou_matrix)\n\n return all_iou_matrix\n\ndef area(box_1):\n if box_1.shape.as_list()[1]==2:\n width,heigth = tf.split(box_1, num_or_size_splits=2, axis=1)\n area = tf.multiply(width,heigth)\n else:\n xmin,ymin,xmax,ymax = tf.split(box_1,num_or_size_splits=4,axis=1)\n area = (xmax-xmin) * (ymax - ymin)\n return area'''","sub_path":"trainer1.py","file_name":"trainer1.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"276277981","text":"import numpy as np\nimport pandas as pd\n\nids = [28, 29, 32, 35, 51, 54, 55]\n\nfeature_importances = np.zeros(shape=(10000, 3))\nfor i in ids:\n a = np.array(pd.read_csv(f'../tree_based_model/features/{i}.csv', header=None))\n feature_importances += a\n\n# load datas \nX = np.load('../../X_train.npz')['arr_0']\ny = np.load('../../Y_train.npz')['arr_0'][:, 2]\nTX = np.load('../../X_test.npz')['arr_0']\nprint('finish loading')\n\n# search params\nparams = {\n 'num_leaves': 180,\n 'min_data_in_leaf': 20,\n 'bagging_fraction': 0.75,\n 'bagging_freq': 7,\n 'learning_rate': 0.1,\n 'boosting': 'dart',\n 'num_iterations': 1000\n}\nprint(params)\n\n# search\n\nimport itertools\nimport lightgbm as lgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\n\nper = 1e-3\n\nX_train = X[:, feature_importances[:, 2] > per]\nX_test = TX[:, feature_importances[:, 2] > per]\nw_train = 1.0 / y\n\nmodel = lgb.LGBMRegressor(**params)\nmodel.fit(X_train, y, sample_weight=w_train)\n\npd.DataFrame(model.predict(X_train).reshape(-1, 1)).to_csv('train_t2y2.csv', index=None, header=None)\npd.DataFrame(model.predict(X_test).reshape(-1, 1)).to_csv('test_t2y2.csv', index=None, header=None)\n\nprint(np.mean(np.abs(y - model.predict(X_train)) / y))\nprint(np.mean(np.abs(y - model.predict(X_train))))\n\n","sub_path":"130/t2y2.py","file_name":"t2y2.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"607442863","text":"import os\nfrom .paths import prefixes, executables, weak_executables, home\nimport dotfiler.logger as log\nimport os.path as path\n\nclass Component(object):\n \"\"\"A base class for component classes which inspect the availability or\n location of softwares installed on system.\n \"\"\"\n def __init__(self):\n super(Component, self).__init__()\n self.exists = False\n self.messages = []\n def mark_found(self):\n self.exists = True\n def log(self, msg):\n self.messages.append(msg)\n def finalize(self):\n if not self.exists:\n log.warn(str(self) + \": The component was not found.\")\n msgs = self.messages\n if len(msgs) > 0:\n log.notice(\" Log messages:\")\n for msg in msgs:\n log.notice(\" \" + msg)\n\n @classmethod\n def get(cls):\n return get_component(cls)\n\n__loaded_components = {}\n\ndef get_component(comp_class):\n inst = __loaded_components.get(comp_class)\n if inst is None:\n inst = comp_class()\n __loaded_components[comp_class] = inst\n inst.finalize()\n return inst\n\nclass Executable(Component):\n def __init__(self, name):\n super(Executable, self).__init__()\n self.path = None\n self.name = name\n\n def search_standard_directories(self):\n for par_dir in executables:\n self.search_directory(par_dir)\n for par_dir in weak_executables:\n self.search_directory(par_dir)\n\n def search_directory(self, par_dir):\n if self.exists:\n return\n p = os.path.join(par_dir, self.name)\n if os.path.exists(p):\n if self.is_valid_file(p):\n self.path = p\n self.mark_found()\n else:\n self.log(\"'%s' was found, but detected as invalid.\" % p)\n else:\n self.log(\"'%s' was not found.\" % p)\n\n def is_valid_file(self, path):\n return True\n\n def __str__(self):\n return \"Executable '%s'\" % self.name\n\npython_vers = (\"2.7\", \"3.4\", \"3.6\")\npython_pkg_dirs = [\n os.path.join(prefix, \"lib\", \"python\" + ver, sd + \"-packages\")\n for sd in (\"site\", \"dist\")\n for ver in python_vers\n for prefix in prefixes]\npython_pkg_dirs += [\n os.path.join(\"/Library/Python/\" + ver, sd + \"-packages\")\n for sd in (\"site\", \"dist\")\n for ver in python_vers]\npython_pkg_dirs += [\n os.path.join(\"/Library/Frameworks/Python.framework/Versions/\" + ver + \"/lib/python\" + ver, sd + \"-packages\")\n for sd in (\"site\", \"dist\")\n for ver in python_vers]\npython_pkg_dirs += [\n os.path.join(\"/opt/local/Library/Frameworks/Python.framework/Versions/\" + ver + \"/lib/python\" + ver, sd + \"-packages\")\n for sd in (\"site\", \"dist\")\n for ver in python_vers]\n\nclass PythonPackage(Component):\n \"\"\"A base class for compoent classes which inspect the availabiliy of\n a python package (2.x or 3.x) located in site-package or dist-package.\n \"\"\"\n def __init__(self, name):\n super(PythonPackage, self).__init__()\n self.path = None\n self.name = name\n\n def search_standard_package_directories(self):\n for pkg_par_dir in python_pkg_dirs:\n self.search_package_directory(pkg_par_dir)\n\n def search_package_directory(self, pkg_par_dir):\n if self.exists:\n return\n pkg_dir = os.path.join(pkg_par_dir, self.name)\n if os.path.exists(pkg_dir) and self.is_valid_package_path(pkg_dir):\n self.path = pkg_dir\n self.mark_found()\n else:\n self.log(\"'%s' was not found.\" % pkg_dir)\n\n def is_valid_package_path(self, pkg_path):\n return True\n\n def __str__(self):\n return \"Python package '%s'\" % self.name\n\nclass TermCap(Component):\n def __init__(self, name):\n super(TermCap, self).__init__()\n self.name = name\n def search_standard_dbs(self):\n for prefix in prefixes:\n self.search_db(path.join(prefix, \"share/misc/termcap\"))\n def search_db(self, termcap_path):\n if self.exists:\n return\n if not os.path.exists(termcap_path):\n self.log(\"'%s' was not found.\" % termcap_path)\n return\n with open(termcap_path, 'r') as f:\n termcap_src = f.read().decode('utf8')\n lines = termcap_src.replace('\\\\\\n', '').splitlines()\n for line in lines:\n line = line.strip()\n if line.startswith('#'):\n continue\n i = line.find(':')\n if i < 1:\n continue\n line = line[0:i].split('|')\n if self.name in line:\n self.mark_found()\n return\n self.log(\"terminal '%s' was not found in '%s'.\" % (self.name, termcap_path))\n\nclass TermInfo(Component):\n def __init__(self, name):\n super(TermInfo, self).__init__()\n self.path = None\n self.name = name\n\n def search_standard_directories(self):\n for prefix in prefixes:\n self.search_directory(path.join(prefix, \"share/terminfo\"))\n self.search_directory(path.join(prefix, \"lib/terminfo\"))\n\n def search_directory(self, par_dir):\n if self.exists:\n return\n name = self.name\n\n fn = os.path.join(par_dir, name[0], name)\n if os.path.exists(fn):\n self.path = fn\n self.mark_found()\n return\n else:\n self.log(\"'%s' was not found.\" % fn)\n\n fn = os.path.join(par_dir, (\"0\" + hex(ord(name[0])))[-2:], name)\n if os.path.exists(fn):\n self.path = fn\n self.mark_found()\n return\n else:\n self.log(\"'%s' was not found.\" % fn)\n def __str__(self):\n return \"Terminfo '%s'\" % self.name\n\nclass Screen256ColorTermCap(TermCap):\n def __init__(self):\n super(Screen256ColorTermCap, self).__init__('screen-256color')\n self.search_standard_dbs()\n\nclass Screen256ColorTermInfo(TermInfo):\n def __init__(self):\n super(Screen256ColorTermInfo, self).__init__('screen-256color')\n self.search_standard_directories()\n\nclass PowerlineDaemon(Executable):\n def __init__(self):\n super(PowerlineDaemon, self).__init__('powerline-daemon')\n self.search_standard_directories()\n\nclass PowerlineStatusPackage(PythonPackage):\n def __init__(self):\n super(PowerlineStatusPackage, self).__init__('powerline')\n self.bindings_path = None\n self.search_standard_package_directories()\n self.search_package_directory(\"/usr/share\")\n\n def is_valid_package_path(self, pkg_path):\n if path.exists(path.join(pkg_path, \"bindings\")):\n self.bindings_path = path.join(pkg_path, \"bindings\")\n return True\n if path.exists(path.join(pkg_path, \"fish\")):\n self.bindings_path = path\n return True\n self.log(\"Bindings were not found for '%s'.\" % pkg_path)\n return False\n\nclass RVM(Component):\n def __init__(self):\n super(RVM, self).__init__()\n\n bash_profile = \"/etc/profile.d/rvm.sh\"\n if path.exists(bash_profile):\n self.bash_profile = bash_profile\n self.mark_found()\n else:\n self.log(\"'%s' was not found.\" % bash_profile)\n def __str__(self):\n return \"RVM\"\n\nclass Pipenv(Executable):\n def __init__(self):\n super(Pipenv, self).__init__('pipenv')\n self.search_standard_directories()\n\nclass OPAM(Executable):\n def __init__(self):\n super(OPAM, self).__init__('opam')\n self.search_standard_directories()\n\nclass PowerlineRS(Executable):\n def __init__(self):\n super(PowerlineRS, self).__init__('powerline-rs')\n self.search_standard_directories()\n\nclass Lsd(Executable):\n \"\"\" The next gen ls command (https://github.com/Peltoche/lsd)\n \"\"\"\n def __init__(self):\n super(Lsd, self).__init__('lsd')\n self.search_standard_directories()\n\nclass SingleUserNix(Executable):\n def __init__(self):\n super(SingleUserNix, self).__init__('nix-shell')\n self.search_directory(path.join(home, '.nix-profile', 'bin'))\n","sub_path":"scripts/dotfiler/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"85219356","text":"#!/usr/bin/env python\n__author__ = 'Hristo'\n\nimport os\nimport shutil\n\nprint(\"Cleaning project structure...\")\n\npathName = \".\"\nfileToRemove = \"__pycache__\"\n \nfor subdirectoryPath, subdirectoryName, fileName in os.walk(pathName):\n if fileToRemove in subdirectoryPath:\n print(\"Removing: \" + subdirectoryPath)\n shutil.rmtree(subdirectoryPath)","sub_path":"QtBrowser/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204895656","text":"import tradersbot as tt\r\nimport random\r\n\r\n###########################################################\r\n# Make sure you run pip install tradersbot before running #\r\n###########################################################\r\n\r\n# Make a tradersbot\r\nt = tt.TradersBot(host='127.0.0.1', id='trader0', password='trader0')\r\n\r\n# Constants\r\nPOS_LIMIT = 500\r\nORDER_LIMIT = 100\r\n\r\n# Keeps track of prices\r\nSECURITIES = {}\r\nPREDS = {}\r\ntime = 0;\r\nopen_orders = {};\r\n\r\n# Initializes the prices\r\n# Initializes prediction dictionary\r\ndef ack_register_method(msg, order):\r\n\tglobal SECURITIES, PREDS, time\r\n\tsecurity_dict = msg['case_meta']['securities']\r\n\tfor security in security_dict.keys():\r\n\t\tif not(security_dict[security]['tradeable']): \r\n\t\t\tcontinue\r\n\t\tSECURITIES[security] = security_dict[security]['starting_price']\r\n\r\n\tfor security in security_dict:\r\n\t\tPREDS[security] = {};\r\n\r\n# Updates latest price and time\r\ndef market_update_method(msg, order):\r\n\tglobal SECURITIES, time\r\n\r\n\tsecurity = msg['market_state']['ticker']\r\n\r\n\t# Gets the price by averaging the highest bid (or buy order)\r\n\t# and lowest ask (or sell order)\r\n\tmax_bid = -1;\r\n\tmin_ask = -1;\r\n\tfor bid in msg['market_state']['bids']:\r\n\t\tif float(bid) > max_bid:\r\n\t\t\tmax_bid = float(bid);\r\n\t\r\n\tfor ask in msg['market_state']['asks']:\r\n\t\tif min_ask == -1 or float(ask) < min_ask:\r\n\t\t\tmin_ask =float(ask);\r\n\r\n\tif min_ask == -1 or max_bid == -1:\r\n\t\tprice = msg['market_state']['last_price'];\r\n\telse:\r\n\t\tprice = (min_ask + max_bid) / 2;\r\n\tSECURITIES[security] = price;\r\n\r\n\t# Sets the time\r\n\ttime = msg['elapsed_time']\r\n\r\n# Buys or sells in a random quantity every time it gets an update\r\n# You do not need to buy/sell here\r\n# Checks to make sure does not violate position limits or order limit\r\ndef trader_update_method(msg, order):\r\n\tprint(\"Hello world!\")\r\n\tglobal SECURITIES, POS_LIMIT, open_orders\r\n\r\n\tpositions = msg['trader_state']['positions']\r\n\topen_orders = msg['trader_state']['open_orders']\r\n\r\n\tfor security in positions.keys():\r\n\t\tif len(open_orders) > ORDER_LIMIT:\r\n\t\t\tbreak;\r\n\t\tif abs(positions[security]) >= POS_LIMIT:\r\n\t\t\tcontinue;\r\n\t\tif random.random() < 0.5:\r\n\t\t\tquant = min(10*random.randint(1, 10), POS_LIMIT-positions[security])\r\n\t\t\tif quant < 0:\r\n\t\t\t\tcontinue\r\n\t\t\torder.addBuy(security, quantity=quant,price=SECURITIES[security])\r\n\t\telse:\r\n\t\t\tquant = min(10*random.randint(1, 10), positions[security]+POS_LIMIT)\r\n\t\t\tif quant < 0:\r\n\t\t\t\tcontinue\r\n\t\t\torder.addSell(security, quantity=quant,price=SECURITIES[security])\r\n\r\n# Update store of predictions\r\n# You may want to change the way predictions are stored\r\ndef news_method(msg, order):\r\n\tglobal PREDS\r\n\tinfo = msg['news']['headline'].split()\r\n\tsecurity = info[0]\r\n\tnew_time = float(info[1])\r\n\tprice = float(msg['news']['body']);\r\n\tPREDS[security][new_time] = price;\r\n\r\n\r\n\r\n###############################################\r\n#### You can add more of these if you want ####\r\n###############################################\r\n\r\nt.onAckRegister = ack_register_method\r\nt.onMarketUpdate = market_update_method\r\nt.onTraderUpdate = trader_update_method\r\nt.onNews = news_method\r\n#t.onTrade = trade_method\r\n#t.onAckModifyOrders = ack_modify_orders_method\r\nt.run()","sub_path":"order-execution/ox/simplebot.py","file_name":"simplebot.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619197899","text":"from ._anvil_designer import HomepageTemplate\nfrom anvil import *\nimport anvil.server\nimport anvil.tables as tables\nimport anvil.tables.query as q\nfrom anvil.tables import app_tables\nfrom ..ArticleEdit import ArticleEdit\n\n\n\nclass Homepage(HomepageTemplate):\n def __init__(self, **properties):\n # Set Form properties and Data Bindings.\n self.init_components(**properties)\n\n # Any code you write here will run when the form opens.\n self.refresh_articles()\n \n # Set an event handler on the RepeatingPanel (our 'articles_panel')\n self.articles_panel.set_event_handler('x-delete-article', self.delete_article)\n\n def add_article_button_click(self, **event_args):\n # Initialise an empty dictionary to store the user inputs\n new_article = {}\n # Open an alert displaying the 'ArticleEdit' Form\n save_clicked = alert(\n content=ArticleEdit(item=new_article),\n title=\"Add Article\",\n large=True,\n buttons=[(\"Save\", True), (\"Cancel\", False)],\n )\n # If the alert returned 'True', the save button was clicked.\n if save_clicked:\n anvil.server.call('add_article', new_article)\n \n self.refresh_articles()\n\n def refresh_articles(self):\n # Load existing articles from the Data Table, and display them in the RepeatingPanel\n self.articles_panel.items = anvil.server.call('get_articles') \n \n def delete_article(self, article, **event_args):\n # Delete the article\n anvil.server.call('delete_article', article)\n # Refresh articles to remove the deleted article from the Homepage\n self.refresh_articles()\n","sub_path":"News_app/client_code/Homepage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"569690251","text":"import pygame, logSystem\nfrom renderGameTest import *\nfrom config import *\nfrom music import *\nimport config\n\nSTEP = 64\n\nWINDOW_HEIGHT = 1024\nWINDOW_WEIGHT = 800\n\nGAME_HEIGHT = 576\nGAME_WEIGHT = 576\n\nINV_HEIGHT = WINDOW_HEIGHT - GAME_HEIGHT\nINV_WEIGHT = GAME_WEIGHT\n\nsurfInv = pygame.Surface((INV_HEIGHT,INV_WEIGHT))\nsurfSelect = pygame.Surface((STEP,STEP))\nsurfInvlog = pygame.Surface((3*STEP, 3*STEP))\nsurfInvitm = pygame.Surface((5*STEP, 3*STEP))\n\nglotok = pygame.mixer.Sound('music/glotok.ogg')\nbrosok = pygame.mixer.Sound('music/brosok.ogg')\n\ndef loadInv():\n\tf = open('inv.txt', 'r')\n\ts = f.read()\n\tm = []\n\ttmp = []\n\tfor x in range(0,len(s)):\n\t\tif s[x]=='\\n':\n\t\t\tm.append(tmp)\n\t\t\ttmp = []\n\t\telse:\n\t\t\ttmp.append(s[x])\n#\tprint(m)\n\treturn m\n\ndef renderInv(surfSelect,a,b,sc):\n\tx = 0\n\ty = 0\n\tfor i in range(0,len(config.inv)):\n\t\tfor j in range(0,len(config.inv[i])):\n\t\t\tif config.inv[i][j]=='0':\n\t\t\t\timg = pygame.image.load('srcBMP/inv/invempty.bmp')\n\t\t\t\timg_rect = img.get_rect(topleft=(x,y))\n\t\t\t\tsurfInvitm.blit(img,img_rect)\n\t\t\t\tx+=STEP\n\t\t\telif config.inv[i][j]=='a':\n\t\t\t\timg = pygame.image.load('srcBMP/inv/invpotion.bmp')\n\t\t\t\timg_rect = img.get_rect(topleft=(x,y))\n\t\t\t\tsurfInvitm.blit(img,img_rect)\n\t\t\t\tx+=STEP\n\t\t\telif config.inv[i][j]=='d':\n\t\t\t\timg = pygame.image.load('srcBMP/inv/invsword.bmp')\n\t\t\t\timg_rect = img.get_rect(topleft=(x,y))\n\t\t\t\tsurfInvitm.blit(img,img_rect)\n\t\t\t\tx+=STEP\n\t\t\telif config.inv[i][j]=='e':\n\t\t\t\timg = pygame.image.load('srcBMP/inv/invspear.bmp')\n\t\t\t\timg_rect = img.get_rect(topleft=(x,y))\n\t\t\t\tsurfInvitm.blit(img,img_rect)\n\t\t\t\tx+=STEP\n\t\t\telif config.inv[i][j]=='i':\n\t\t\t\timg = pygame.image.load('srcBMP/inv/invarmour1.bmp')\n\t\t\t\timg_rect = img.get_rect(topleft=(x,y))\n\t\t\t\tsurfInvitm.blit(img,img_rect)\n\t\t\t\tx+=STEP\n\t\t\telif config.inv[i][j]=='j':\n\t\t\t\timg = pygame.image.load('srcBMP/inv/invarmour2.bmp')\n\t\t\t\timg_rect = img.get_rect(topleft=(x,y))\n\t\t\t\tsurfInvitm.blit(img,img_rect)\n\t\t\t\tx+=STEP\n\t\tx = 0\n\t\ty += STEP\n\tsurfSelect.fill((255, 0, 0))\n\tsurfInvitm.blit(surfSelect,(b*STEP,a*STEP))\n\tsurfInvlog.fill((78, 78, 78))\n\tx = 5\n\tinvlogmsg = ['Уровень брони: '+str(config.player['arm']), 'Сила: '+str(config.player['power'])]\n\tfor s in invlogmsg:\n\t\tfont = pygame.font.SysFont('verdana',20)\n\t\ttext = font.render(s, 1, (255,255,255))\n\t\tsurfInvlog.blit(text,(5,x))\n\t\tx+=20\n\timg = pygame.image.load('srcBMP/inv/invbg.bmp')\n\timg_rect = img.get_rect(topleft=(0,0))\n\tsurfInv.blit(img,img_rect)\n\tsurfInv.blit(surfInvlog, (3*STEP, STEP))\n\tsurfInv.blit(surfInvitm, (1*STEP, 5*STEP))\n\tif config.player['type'] == 0:\n\t\timg = pygame.image.load('srcBMP/inv/invempty.bmp')\n\t\tsurfInv.blit(img, (STEP, STEP))\n\telif config.player['type'] == 1:\n\t\timg = pygame.image.load('srcBMP/inv/invsword.bmp')\n\t\tsurfInv.blit(img, (STEP, STEP))\n\telif config.player['type'] == 2:\n\t\timg = pygame.image.load('srcBMP/inv/invspear.bmp')\n\t\tsurfInv.blit(img, (STEP, STEP))\n\tif config.player['arm'] == 0:\n\t\timg = pygame.image.load('srcBMP/inv/invempty.bmp')\n\t\tsurfInv.blit(img, (STEP, 3*STEP))\n\telif config.player['arm'] == 1:\n\t\timg = pygame.image.load('srcBMP/inv/invarmour1.bmp')\n\t\tsurfInv.blit(img, (STEP, 3*STEP))\n\telif config.player['arm'] == 2:\n\t\timg = pygame.image.load('srcBMP/inv/invarmour2.bmp')\n\t\tsurfInv.blit(img, (STEP, 3*STEP))\n\tsc.blit(surfInv,(GAME_HEIGHT,0))\n\ndef openInv(maps,player,sc):\n\tlogSystem.blitLog('inv',[],sc)\n\ta = 0\n\tb = 0\n\tsurfSelect.set_alpha(127)\n\trenderInv(surfSelect,a,b,sc)\n\twhile True:\n\t\tfor i in pygame.event.get():\n\t\t\tif i.type == pygame.QUIT:\n\t\t\t\texit()\n\t\t\telif i.type == pygame.KEYDOWN:\n\t\t\t\tif i.key == pygame.K_i:\n\t\t\t\t\tsurfSelect.set_alpha(0)\n\t\t\t\t\trenderInv(surfSelect,a,b,sc)\n\t\t\t\t\treturn\n\t\t\t\telif i.key == pygame.K_e:\n\t\t\t\t\t#Использование зелья\n\t\t\t\t\tif config.inv[a][b] == 'a':\n\t\t\t\t\t\tvolume = config.PROCENT / 100\n\t\t\t\t\t\tglotok.set_volume(volume)\n\t\t\t\t\t\topenSound(glotok)\n\t\t\t\t\t\tplayer['hp'] +=2\n\t\t\t\t\t\tif player['hp'] > 6:\n\t\t\t\t\t\t\tplayer['hp'] = 6\n\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\t\t\trenderHP(sc)\n\t\t\t\t\t#Замена экипировки\n\t\t\t\t\telif config.inv[a][b] == 'd':\n\t\t\t\t\t\tif player['type'] == 0:\n\t\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\t\t\telif player['type'] == 1:\n\t\t\t\t\t\t\tconfig.inv[a][b] = 'd'\n\t\t\t\t\t\telif player['type'] == 2:\n\t\t\t\t\t\t\tconfig.inv[a][b] = 'e'\n\t\t\t\t\t\tplayer['type'] = 1\n\t\t\t\t\t\tplayer['power'] = 2\n\t\t\t\t\t\trefreshPlayer(player)\n\t\t\t\t\telif config.inv[a][b] == 'e':\n\t\t\t\t\t\tif player['type'] == 0:\n\t\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\t\t\telif player['type'] == 1:\n\t\t\t\t\t\t\tconfig.inv[a][b] = 'd'\n\t\t\t\t\t\telif player['type'] == 2:\n\t\t\t\t\t\t\tconfig.inv[a][b] = 'e'\n\t\t\t\t\t\tplayer['type'] = 2\n\t\t\t\t\t\tplayer['power'] = 1\n\t\t\t\t\t\trefreshPlayer(player)\n\t\t\t\t\t#Замена брони\n\t\t\t\t\telif config.inv[a][b] == 'i':\n\t\t\t\t\t\tif player['arm'] < 1:\n\t\t\t\t\t\t\tif player['arm'] == 0:\n\t\t\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\t\t\t\telif player['arm'] == 1:\n\t\t\t\t\t\t\t\tconfig.inv[a][b] = 'i'\n\t\t\t\t\t\t\telif player['arm'] == 2:\n\t\t\t\t\t\t\t\tconfig.inv[a][b] = 'j'\n\t\t\t\t\t\t\tplayer['arm'] = 1\n\t\t\t\t\t\t\trefreshPlayer(player)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogSystem.blitLog('inv',[0],sc)\n\t\t\t\t\telif config.inv[a][b] == 'j':\n\t\t\t\t\t\tif player['arm'] < 2:\n\t\t\t\t\t\t\tif player['arm'] == 0:\n\t\t\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\t\t\t\telif player['arm'] == 1:\n\t\t\t\t\t\t\t\tconfig.inv[a][b] = 'i'\n\t\t\t\t\t\t\telif player['arm'] == 2:\n\t\t\t\t\t\t\t\tconfig.inv[a][b] = 'j'\n\t\t\t\t\t\t\tplayer['arm'] = 2\n\t\t\t\t\t\t\trefreshPlayer(player)\n\t\t\t\t#Деекипировка\n\t\t\t\telif i.key == pygame.K_r:\n\t\t\t\t\tif config.inv[a][b] == '0':\n\t\t\t\t\t\tif player['type'] == 0:\n\t\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\t\t\telif player['type'] == 1:\n\t\t\t\t\t\t\tconfig.inv[a][b] = 'd'\n\t\t\t\t\t\telif player['type'] == 2:\n\t\t\t\t\t\t\tconfig.inv[a][b] = 'e'\n\t\t\t\t\t\tplayer['type'] = 0\n\t\t\t\t\t\tplayer['power'] = 0.5\n\t\t\t\t\t\trefreshPlayer(player)\n\t\t\t\t#Уничтожение вещи\n\t\t\t\telif i.key == pygame.K_q:\n\t\t\t\t\tif config.inv[a][b] != '0':\n\t\t\t\t\t\tvolume = config.PROCENT / 100\n\t\t\t\t\t\tbrosok.set_volume(volume)\n\t\t\t\t\t\topenSound(brosok)\n\t\t\t\t\t\tconfig.inv[a][b] = '0'\n\t\t\t\telif i.key == pygame.K_UP:\n\t\t\t\t\ta -= 1\n\t\t\t\t\tif a < 0:\n\t\t\t\t\t\ta = 0\n\t\t\t\telif i.key == pygame.K_RIGHT:\t\n\t\t\t\t\tb += 1\n\t\t\t\t\tif b > 4:\n\t\t\t\t\t\tb = 4\n\t\t\t\telif i.key == pygame.K_DOWN:\n\t\t\t\t\ta += 1\n\t\t\t\t\tif a > 2:\n\t\t\t\t\t\ta = 2\n\t\t\t\telif i.key == pygame.K_LEFT:\n\t\t\t\t\tb -= 1\n\t\t\t\t\tif b < 0:\n\t\t\t\t\t\tb = 0\n\t\t\t\telse:\n\t\t\t\t\tprint('ERROR KEY')\n\t\trenderInv(surfSelect,a,b,sc)\n\t\trenderMap(maps,sc)\n\t\tpygame.display.update()\n\t\t\ndef refreshPlayer(player):\n\tdictEnv[2] = 'srcBMP/player/player'+str(player['arm'])+str(player['type'])+'.bmp'\n\t\ndef openChest(maps,sc):\n\tx = config.player['i']\n\ty = config.player['j']\n\tif maps[x-1][y]=='4' or maps[x][y-1]=='4' or maps[x][y+1]=='4' or maps[x+1][y] == '4':\n\t\tfor i in range(x-1, x+2):\n\t\t\tfor j in range(y-1, y+2):\n\t\t\t\tif maps[i][j]=='4':\n\t\t\t\t\tfor a in range(0, len(config.inv)):\n\t\t\t\t\t\tfor b in range(0, len(config.inv[a])):\n\t\t\t\t\t\t\tif config.inv[a][b] == '0':\n\t\t\t\t\t\t\t\taddItem(config.inv,a,b,sc)\n\t\t\t\t\t\t\t\tmaps[i][j] = 'a'\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\tprint('You are overencumbered')\n\t\t\t\t\tlogSystem.blitLog('game',[1],sc)\n\t\t\t\t\treturn\n\telse:\n\t\tprint('No chest nearby')\n\t\ndef addItem(inv,x,y,sc):\n\tc=random.randint(1,10)\n\tif c<7:\n\t\tconfig.inv[x][y]='a'\n\t\tprint('Potion added')\n\t\tlogSystem.blitLog('game',[2],sc)\n\telif c==7:\n\t\tconfig.inv[x][y]='d'\n\t\tprint('Sword added')\n\t\tlogSystem.blitLog('game',[3],sc)\n\telif c==8:\n\t\tconfig.inv[x][y]='e'\n\t\tprint('Spear & shield added')\n\t\tlogSystem.blitLog('game',[4],sc)\n\telif c==9:\n\t\tconfig.inv[x][y]='i'\n\t\tprint('Leather armour added')\n\t\tlogSystem.blitLog('game',[5],sc)\n\telif c==10:\n\t\tconfig.inv[x][y]='j'\n\t\tprint('Steel armour added')\n\t\tlogSystem.blitLog('game',[6],sc)","sub_path":"renderInv.py","file_name":"renderInv.py","file_ext":"py","file_size_in_byte":7468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10032628","text":"from aibs.SweepStim import SweepStim\nfrom psychopy import visual, core, event, logging, misc, monitors\nimport itertools\nimport scipy\n\n\"\"\"\nThis is a sample script that sets up a basic white noise experiment. This should be performed by the GUI eventually.\n\"\"\"\n\n\n#GENERIC PARAMETERS (should be passed by GUI, some of which have been read from config file)\nparams = {}\nparams['runs'] = 1 #number of runs\nparams['shuffle'] = True #shuffle sweep tables\nparams['preexpsec'] = 2 #seconds at the start of the experiment\nparams['postexpsec'] = 2 #seconds at the end of the experiment\nparams['sweeplength'] = 2/60.00 #length of sweeps\nparams['postsweepsec'] = 0 #black period after sweeps (foreground remains)\nparams['logdir'] = \"C:\\\\ExperimentLogs\\\\\" #where to put the log\nparams['backupdir'] = None #backup to network\nparams['mouseid'] = \"Spock\" #name of the mouse\nparams['userid'] = \"derricw\" #name of the user\nparams['task'] = \"\" #task type\nparams['stage'] = \"idkwhatthismeans\" #stage\nparams['protocol'] = \"\" #implemented later\nparams['nidevice']='Dev1' #NI device name\nparams['blanksweeps']=0 #blank sweep every x sweeps\nparams['bgcolor']='gray' #background color\nparams['syncsqr']=True #display a flashing square for synchronization\nparams['syncsqrloc']=(-600,-350)\nparams['script']=__file__\n\n\n#SET CONSOLE OUTPUT LEVEL, INITIALIZE WINDOWS\n#logging.console.setLevel(logging.DEBUG) #uncommet for diagnostics\nwindow = visual.Window(units='norm',monitor='testMonitor', fullscr = True, screen = 0, waitBlanking=False)\nwindow.setColor(params['bgcolor'])\n\n#CREATE BACKGROUND STIMULUS\n\nnoise = visual.GratingStim(window,tex=None,mask=\"None\",texRes=64,\n size=[1280,1024], ori = 0, pos = (0,0), name='box', autoLog=False, units = 'pix') \n \n#CREATE BACKGROUND FRAME PARAMETERS (what changes between frames and how much)\nbgFrame = {}\n\n#CREATE BACKGROUND SWEEP PARAMETERS (what changes between sweeps, and in what order)\nbgSweep = {}\n\nnoisematrix = [(scipy.random.randint(2,size = (64,64))*2-1) for x in range(100)]\n\nbgSweep['Tex'] = (noisematrix,0) #each texture we just generated\n\n#CREATE FOREGROUND STIMULUS (none for basic white noise experiment)\n\n\n#CREATE FOREGROUND STIMULUS FRAME PARAMETERS (what changes between frames and how much)\nfgFrame = {}\n\n#CREATE FOREGROUND SWEEP PARAMETERS (what changes between sweeps)\nfgSweep = {}\n\n#CREATE FORAGING CLASS INSTANCE\ng = SweepStim(window = window, params = params, bgStim = noise, bgFrame = bgFrame, bgSweep = bgSweep, fgStim = None)\n#RUN IT\ng.run()","sub_path":"scripts/whitenoiseexp.py","file_name":"whitenoiseexp.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476779523","text":"import os\nimport datetime\n\nimport yaml\n\n\nclass Config:\n def __init__(self, path):\n self.path = path\n self.data = {}\n\n if os.path.exists(self.path):\n self.data = yaml.safe_load(open(self.path, \"r\"))\n if self.data is None:\n # allow an empty file to start with\n self.data = {}\n\n @property\n def output_path(self):\n return os.path.abspath(self.data.get(\"output_path\", \"output\"))\n\n @property\n def content_paths(self):\n if \"content_paths\" in self.data:\n paths = self.data[\"content_paths\"]\n else:\n paths = [\"content\"]\n if os.path.exists(os.path.join(\"theme\", \"content\")):\n paths.append(os.path.join(\"theme\", \"content\"))\n\n # add the built-in content from combine itself\n paths.append(os.path.join(os.path.dirname(__file__), \"base_content\"))\n\n return [os.path.abspath(x) for x in paths]\n\n @property\n def variables(self):\n variables = self.default_variables\n\n for name, data in self.data.get(\"variables\", {}).items():\n if isinstance(data, dict):\n if \"default\" in data:\n variables[name] = data[\"default\"]\n\n if \"from_env\" in data and data[\"from_env\"] in os.environ:\n variables[name] = os.environ[data[\"from_env\"]]\n else:\n variables[name] = data\n\n return variables\n\n @property\n def default_variables(self):\n return {\"now\": datetime.datetime.now} # as a function\n\n @property\n def steps(self):\n return self.data.get(\"steps\", [])\n","sub_path":"combine/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414915990","text":"'''\nCreated on 13/04/13\nCode Jam 2013 Qualification Round C\n@author: manolo\n'''\nimport math\nimport sys\nifile = sys.stdin\ndef r():\n return ifile.readline()[:-1]\n\nofile = open('./c-large1.out', 'w')\ndef w(what):\n ofile.write(what + '\\n')\n\ndef is_fair(n):\n return str(n) == str(n)[::-1]\n\n#def is_square(n):\n# sqrt = math.sqrt(n)\n## print 'sqrt = ' + str(sqrt)\n# if sqrt - long(sqrt) > 0:\n# return False\n# else:\n# return is_fair(long(sqrt))\n \n\ndef find(a, b):\n count = 0\n# print '(' + str(a) + ', ' + str(b) + ')'\n\n sqrt_a = math.sqrt(a)\n# print \"sqrt_a: \" + str(sqrt_a)\n long_sqrt_a = long(sqrt_a)\n# print \"long_sqrt_a: \" + str(long_sqrt_a)\n i = long_sqrt_a\n if sqrt_a - long_sqrt_a > 0:\n i += 1\n# print \"i: \" + str(i)\n\n bb = long(math.sqrt(b))\n# print '(' + str(i) + ', ' + str(bb) + ')'\n while (i <= bb):\n if is_fair(i):\n ii = i*i\n if is_fair(ii):\n# print ii\n count += 1\n# else:\n# print str(i) + \" --> NO\"\n i +=1\n return count\n\nt = long(r())\n#print 't: ' + str(t)\n\nfor i in xrange(t):\n (A, B) = r().split(' ')\n a = long(A)\n b = long(B)\n n = find(a, b)\n w('Case #' + str(i+1) + ': ' + str(n))\n\nofile.close\n\n","sub_path":"solutions_python/Problem_118/909.py","file_name":"909.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475594204","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('userlist/', views.userlist),\n path('addUser/', views.addUser),\n path('addUserHandler/', views.addUserHandler),\n path('editUser/', views.editUser),\n path('editUserHandler/', views.editUserHandler),\n path('login/', views.login),\n path('loginHandler/', views.loginHandler),\n path('loginout/', views.loginout),\n path('homePage/', views.homePage),\n path('menuList/', views.menuList),\n path('addMenu/', views.addMenu),\n path('addMenuHandler/', views.addMenuHandler),\n path('delMenuHandler/', views.delMenuHandler),\n path('editMenu/', views.editMenu),\n path('editMenuHandler/', views.editMenuHandler),\n path('newsList/', views.newsList),\n path('delnews/', views.delnews),\n path('addNews/', views.addNews),\n path('addNewsHandler/', views.addNewsHandler),\n path('editNews/', views.editNews),\n path('editNewsHandler/', views.editNewsHandler),\n path('positionList/', views.positionList),\n path('addPosition/', views.addPosition),\n path('addPositionHandler/', views.addPositionHandler),\n path('editPosition/', views.editPosition),\n path('editPositionHandler/', views.editPositionHandler),\n path('delPosition/', views.delPosition),\n path('poscontent/', views.poscontent),\n path('addPosContent/', views.addPosContent),\n path('delPoscontent/', views.delPoscontent),\n path('editposContent/', views.editposContent),\n path('editposContentHandler/', views.editposContentHandler),\n path('clearFile/', views.clearFile),\n]","sub_path":"admin1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307695965","text":"#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the countApplesAndOranges function below.\n#\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n s1=0\n s2=0\n for i in apples:\n if (i in range(s-a,t-a+1)):\n s1+=1\n for j in oranges:\n if (j in range(s-b,t-b+1)):\n s2+=1\n \n print(s1)\n print(s2)\n\nif __name__ == '__main__':\n st = input().split()\n\n s = int(st[0])\n\n t = int(st[1])\n\n ab = input().split()\n\n a = int(ab[0])\n\n b = int(ab[1])\n\n mn = input().split()\n\n m = int(mn[0])\n\n n = int(mn[1])\n\n apple = list(map(int, input().rstrip().split()))\n\n orange = list(map(int, input().rstrip().split()))\n\n countApplesAndOranges(s, t, a, b, apple, orange)\n","sub_path":"HackerRank/Implementation/countApplesAndOranges.py","file_name":"countApplesAndOranges.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180120226","text":"#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport fcntl\nimport os\nimport signal\nimport subprocess\nfrom time import sleep\n\nimport saspy.sascfg as sascfg\n\n\nclass SASconfig:\n def __init__(self, cfgname='', kernel=None, saspath='', options=''):\n # import pdb; pdb.set_trace()\n\n self.configs = []\n self._kernel = kernel\n self.saspath = saspath\n self.options = options\n\n # GET Config options\n try:\n self.cfgopts = getattr(sascfg, \"SAS_config_options\")\n except:\n self.cfgopts = {}\n lock = self.cfgopts.get('lock_down', True)\n # in lock down mode, don't allow runtime overrides of option values from the config file.\n if lock:\n if len(saspath) > 0 or len(options) > 0:\n print(\"Parameters passed to SAS_session were ignored due to configuration restriction.\")\n saspath = ''\n options = ''\n\n # GET Config names\n self.configs = getattr(sascfg, \"SAS_config_names\")\n\n if len(cfgname) == 0:\n if len(self.configs) == 0:\n print(\"No SAS Configuration names found in saspy.sascfg\")\n return\n else:\n if len(self.configs) == 1:\n cfgname = self.configs[0]\n if kernel is None:\n print(\"Using SAS Config named: \" + cfgname)\n else:\n cfgname = self._prompt(\n \"Please enter the name of the SAS Config you wish to run. Available Configs are: \" + str(\n self.configs) + \" \")\n\n while cfgname not in self.configs:\n cfgname = self._prompt(\n \"The SAS Config name specified was not found. Please enter the SAS Config you wish to use. Available Configs are: \" + str(\n self.configs) + \" \")\n\n self.name = cfgname\n cfg = getattr(sascfg, cfgname)\n if len(saspath) == 0:\n self.saspath = cfg.get('saspath', '/opt/sasinside/SASHome/SASFoundation/9.4/sas')\n if len(options) == 0:\n self.options = cfg.get('options', '')\n\n def _prompt(self, prompt, pw=False):\n if self._kernel is None:\n if not pw:\n try:\n return input(prompt)\n except (KeyboardInterrupt):\n return ''\n else:\n try:\n return getpass.getpass(prompt)\n except (KeyboardInterrupt):\n return ''\n else:\n try:\n return self._kernel._input_request(prompt, self._kernel._parent_ident, self._kernel._parent_header,\n password=pw)\n except (KeyboardInterrupt):\n return ''\n\n\nclass SASsession:\n def __init__(self, cfgname='', kernel=None, saspath='', options=''):\n self.pid = None\n self.stdin = None\n self.stderr = None\n self.stdout = None\n\n self.sascfg = SASconfig(cfgname, kernel, saspath, options)\n self._log_cnt = 0\n self._log = \"\"\n self._logr = \"\"\n\n self._startsas(self.sascfg)\n\n def __del__(self):\n if self.pid:\n self._endsas()\n self.pid = None\n\n def _logcnt(self, nxt=True):\n if nxt:\n self._log_cnt += 1\n return '%08d' % self._log_cnt\n\n def _startsas(self, sasconfig):\n if self.pid:\n return self.pid\n\n pgm = sasconfig.saspath\n parms = [pgm]\n parms += sasconfig.options\n parms += [\"-pagesize\", \"MAX\"]\n parms += [\"-nodms\"]\n parms += [\"-stdio\"]\n parms += [\"-terminal\"]\n parms += [\"-nosyntaxcheck\"]\n parms += ['']\n\n pipe_read = 0\n pipe_write = 1\n\n pin = os.pipe()\n pout = os.pipe()\n perr = os.pipe()\n\n pidpty = os.forkpty()\n if pidpty[0]:\n # we are the parent\n\n pid = pidpty[0]\n os.close(pin[pipe_read])\n os.close(pout[pipe_write])\n os.close(perr[pipe_write])\n\n else:\n # we are the child\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n os.close(0)\n os.close(1)\n os.close(2)\n\n os.dup2(pin[pipe_read], 0)\n os.dup2(pout[pipe_write], 1)\n os.dup2(perr[pipe_write], 2)\n\n os.close(pin[pipe_read])\n os.close(pin[pipe_write])\n os.close(pout[pipe_read])\n os.close(pout[pipe_write])\n os.close(perr[pipe_read])\n os.close(perr[pipe_write])\n\n try:\n os.execv(pgm, parms)\n except:\n os._exit(-6)\n\n self.pid = pidpty[0]\n self.stdin = os.fdopen(pin[pipe_write], mode='wb')\n self.stderr = os.fdopen(perr[pipe_read], mode='rb')\n self.stdout = os.fdopen(pout[pipe_read], mode='rb')\n\n fcntl.fcntl(self.stdout, fcntl.F_SETFL, os.O_NONBLOCK)\n fcntl.fcntl(self.stderr, fcntl.F_SETFL, os.O_NONBLOCK)\n\n self.submit(\"options svgtitle='svgtitle'; options validvarname=any; ods graphics on;\", \"text\")\n\n return self.pid\n\n def _endsas(self):\n rc = 0\n if self.pid:\n code = b\";*\\';*\\\";*/;\\n;quit;endsas;\\n\"\n self.stderr.read1(4096)\n self.stdin.write(code)\n self.stdin.flush()\n sleep(1)\n try:\n rc = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)\n except subprocess.TimeoutExpired:\n print(\"SAS didn't shutdown w/in 5 seconds; killing it to be sure\")\n os.kill(self.pid, signal.SIGKILL)\n self.pid = None\n return rc\n\n def submit(self, code, results=\"html\"):\n odsopen = b\"ods listing close;ods html5 file=stdout options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\\n\"\n odsclose = b\"ods html5 close;ods listing;\\n\"\n ods = True\n htm = \"html HTML\"\n mj = b\";*\\';*\\\";*/;\"\n lstf = '' \n logf = '' \n bail = False\n eof = 5\n bc = False\n done = False\n logn = self._logcnt()\n logcodei = \"%put E3969440A681A24088859985\" + logn + \";\"\n logcodeo = \"\\nE3969440A681A24088859985\" + logn\n\n if self.pid is None:\n return dict(LOG=\"No SAS process attached. SAS process has terminated unexpectedly.\", LST='')\n\n rc = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)\n if rc is not None:\n self.pid = None\n return dict(LOG='SAS process has terminated unexpectedly. Pid State= ' + str(rc), LST='')\n\n if htm.find(results) < 0:\n ods = False\n\n if ods:\n self.stdin.write(odsopen)\n\n out = self.stdin.write(mj + b'\\n' + code.encode() + b'\\n' + mj)\n\n if ods:\n self.stdin.write(odsclose)\n\n out = self.stdin.write(b'\\n' + logcodei.encode() + b'\\n')\n self.stdin.flush()\n\n while not done:\n try:\n while True:\n rc = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)\n if rc is not None:\n self.pid = None\n return dict(LOG='SAS process has terminated unexpectedly. Pid State= ' +\n str(rc), LST='')\n if bail:\n eof -= 1\n if eof < 0:\n break\n lst = self.stdout.read1(4096).decode()\n if len(lst) > 0:\n lstf += lst\n else:\n log = self.stderr.read1(4096).decode() \n if len(log) > 0:\n logf += log\n if logf.count(logcodeo) >= 1:\n bail = True\n if not bail and bc:\n self.stdin.write(odsclose+logcodei.encode() + b'\\n')\n self.stdin.flush()\n bc = False\n done = True\n\n except (KeyboardInterrupt, SystemExit):\n print('Exception caught!')\n ll = self._breakprompt(logcodeo)\n\n if ll.get('ABORT', False):\n return ll\n\n logf += ll['LOG']\n lstf += ll['LST']\n bc = ll['BC']\n\n if not bc:\n print('Exception handled :)\\n')\n else:\n print('Exception ignored, continuing to process...\\n')\n\n self.stdin.write(odsclose+logcodei.encode()+b'\\n')\n self.stdin.flush()\n\n trip = lstf.rpartition(\"/*]]>*/\") \n if len(trip[1]) > 0 and len(trip[2]) < 100:\n lstf = ''\n\n self._log += logf\n self._logr = logf\n final = logf.partition(logcodei)\n z = final[0].rpartition(chr(10))\n prev = '%08d' % (self._log_cnt - 1)\n zz = z[0].rpartition(\"\\nE3969440A681A24088859985\" + prev +'\\n')\n logd = zz[2].replace(mj.decode(), '')\n\n lstd = lstf.replace(chr(12), chr(10)).replace('',\n '').replace(\"font-size: x-small;\",\n \"font-size: normal;\")\n return dict(LOG=logd, LST=lstd)\n\n def _breakprompt(self, eos):\n found = False\n logf = ''\n lstf = ''\n bc = False\n\n if self.pid is None:\n return dict(LOG=b\"No SAS process attached. SAS process has terminated unexpectedly.\", LST=b'', ABORT=True)\n\n interrupt = signal.SIGINT\n os.kill(self.pid, interrupt)\n sleep(.25)\n\n while True:\n rc = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)\n if rc is not None:\n self.pid = None\n outrc = str(rc)\n return dict(LOG=b'SAS process has terminated unexpectedly. Pid State= ' +\n outrc.encode(), LST=b'',ABORT=True)\n\n lst = self.stdout.read1(4096).decode()\n lstf += lst\n if len(lst) > 0:\n lsts = lst.rpartition('Select:')\n if lsts[0] != '' and lsts[1] != '':\n found = True\n query = lsts[1] + lsts[2].rsplit('\\n?')[0] + '\\n'\n print('Processing interrupt\\nAttn handler Query is\\n\\n' + query)\n response = self.sascfg._prompt(\"Please enter your Response: \")\n self.stdin.write(response.encode() + b'\\n')\n self.stdin.flush()\n if (response == 'C' or response == 'c') and query.count(\"C. Cancel\") >= 1:\n bc = True\n break\n else:\n lsts = lst.rpartition('Press')\n if lsts[0] != '' and lsts[1] != '':\n query = lsts[1] + lsts[2].rsplit('\\n?')[0] + '\\n'\n print('Secondary Query is:\\n\\n' + query)\n response = self.sascfg._prompt(\"Please enter your Response: \")\n self.stdin.write(response.encode() + b'\\n')\n self.stdin.flush()\n if (response == 'N' or response == 'n') and query.count(\"N to continue\") >= 1:\n bc = True\n break\n else:\n #print(\"******************No 'Select' or 'Press' found in lst=\")\n pass\n else:\n log = self.stderr.read1(4096).decode()\n logf += log\n self._log += log\n self._logr = log\n\n if log.count(eos) >= 1:\n print(\"******************Found end of step. No interrupt processed\")\n found = True\n\n if found:\n break\n\n sleep(.25)\n\n lstr = lstf\n logr = logf\n\n return dict(LOG=logr, LST=lstr, BC=bc)\n\n\"\"\"\nif __name__ == \"__main__\":\n self._startsas()\n\n ll = self.submit(sys.argv[1], \"text\")\n\n print(ll['LOG'])\n print(ll['LST'])\n\n self.endsas()\n\"\"\"\n","sub_path":"saspy/sasbase.py","file_name":"sasbase.py","file_ext":"py","file_size_in_byte":12946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223035957","text":"\"\"\"Unit tests for API components.\"\"\"\nimport json\nimport unittest\nimport requests\n\n\nclass TestApi(unittest.TestCase):\n \"\"\"Unit tests for API components.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Execute this before the tests.\"\"\"\n cls.base_url = 'http://api:5434/mobydq/api/v1'\n\n def test_get_health(self):\n \"\"\"Unit tests endpoint get /health.\"\"\"\n\n url = self.base_url + '/health'\n headers = {'Content-Type': 'application/json'}\n response = requests.get(url, headers=headers)\n status = response.status_code\n body = json.loads(response.text)\n\n # Assert http status code is 200\n self.assertEqual(status, 200)\n self.assertIsNotNone(body['message'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_api/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269637778","text":"import numpy as np\n#import math\n#import scipy.ndimage\nfrom .frequency import frequency\n\n\ndef ridge_freq(\n im,\n mask,\n orient,\n blksze,\n windsze,\n minWaveLength,\n maxWaveLength):\n rows, cols = im.shape\n freq = np.zeros((rows, cols))\n\n for r in range(0, rows - blksze, blksze):\n for c in range(0, cols - blksze, blksze):\n blkim = im[r:r + blksze][:, c:c + blksze]\n blkor = orient[r:r + blksze][:, c:c + blksze]\n\n freq[r:r + blksze][:,\n c:c + blksze] = frequency(blkim,\n blkor,\n windsze,\n minWaveLength,\n maxWaveLength)\n\n freq = freq * mask\n freq_1d = np.reshape(freq, (1, rows * cols))\n ind = np.where(freq_1d > 0)\n\n ind = np.array(ind)\n ind = ind[1, :]\n\n non_zero_elems_in_freq = freq_1d[0][ind]\n\n meanfreq = np.mean(non_zero_elems_in_freq)\n # does not work properly\n medianfreq = np.median(non_zero_elems_in_freq)\n return(freq, meanfreq)\n","sub_path":"enhance/ridge_frequency.py","file_name":"ridge_frequency.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"8677763","text":"# 2263 <트리의 순회>\n\nimport sys\nsys.setrecursionlimit(10**6)\ninput = lambda: sys.stdin.readline()\n\nn=int(input())\ninorder=list(map(int, input().split()))\npostorder=list(map(int, input().split()))\nroot=postorder[-1]\n# inorder 원소들 어딨는지 알기 쉽게 하기 위해 따로 저장\npos=[0]*(n+1)\n\nfor i in range(n):\n pos[inorder[i]]=i\n\ndef divide(instart, inend, poststart, postend):\n if instart > inend or poststart>postend:\n return\n root=postorder[postend]\n print(root, end=' ')\n root_inorder=pos[root]\n left=root_inorder-instart\n\n divide(instart, root_inorder-1, poststart, poststart+left-1)\n divide(root_inorder+1, inend, poststart+left, postend-1)\n return\n\ndivide(0,n-1,0,n-1)","sub_path":"week2/hyungjoon/2263.py","file_name":"2263.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635571669","text":"import cv2\r\nimport numpy as np\r\ntracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW']\r\ntracker_type = tracker_types[3]\r\n\r\ndef imageCopy(src):\r\n return np.copy(src)\r\n \r\n \r\ndef convertColor(image, flag=None):\r\n if flag is None:\r\n return image\r\n else:\r\n return cv2.cvtColor(image, flag)\r\n\r\n\r\ndef rangeColor(image, lower, upper):\r\n return cv2.inRange(image, lower, upper)\r\n\r\n\r\ndef splitColor(image, lower, upper, flag=None):\r\n converted = convertColor(image, flag)\r\n mask = rangeColor(converted, lower, upper)\r\n return cv2.bitwise_and(image, image, mask=mask)\r\n\r\n\r\ndef polyROI(image, points):\r\n if len(image.shape) == 2:\r\n channels = 1\r\n else:\r\n channels = image.shape[2]\r\n mask = np.zeros_like(image)\r\n ignore_mask_color = (255,) * channels\r\n cv2.fillPoly(mask, points, ignore_mask_color)\r\n return cv2.bitwise_and(image, mask)\r\n\r\n\r\ndef convertColor(image, flag=None):\r\n if flag is None:\r\n return image\r\n else:\r\n return cv2.cvtColor(image, flag)\r\n\r\n\r\ndef cannyEdge(image, threshold1=100, threshold2=200):\r\n return cv2.Canny(image, threshold1, threshold2)\r\n\r\n\r\ndef addWeightedImage(image1, w1, imagw2, w2=None):\r\n if w2 is None:\r\n return cv2.addWeighted(image1, float(w1) * 0.01, imagw2, float(100 - w1) * 0.01, 0)\r\n else:\r\n return cv2.addWeighted(image1, w1 * 0.01, imagw2, w2 * 0.01, 0)\r\n\r\n\r\ndef houghLinesP(image, rho=1.0, theta=np.pi/180, threshold=100, minLineLength=10, maxLineGap=100):\r\n return cv2.HoughLinesP(image, rho, theta, threshold, minLineLength=minLineLength, maxLineGap=maxLineGap)\r\n\r\n\r\ndef drawHoughLinesP(image, lines):\r\n result = imageCopy(image)\r\n if len(image.shape) == 2:\r\n result = convertColor(image, cv2.COLOR_GRAY2BGR)\r\n for i in range(len(lines)):\r\n for x1, y1, x2, y2 in lines[i]:\r\n cv2.line(result, (x1, y1), (x2, y2), (0, 0, 255), 3)\r\n return result\r\n\r\n\r\ndef splitLines(lines):\r\n left_x = []\r\n left_y = []\r\n right_x = []\r\n right_y = []\r\n for line in lines:\r\n x1 = line[0,0]\r\n y1 = line[0,1]\r\n x2 = line[0,2]\r\n y2 = line[0,3]\r\n slope = (float)(y2-y1)/(float)(x2-x1)\r\n if abs(slope) < 0.5:\r\n continue\r\n if slope <= 0:\r\n left_x.append([x1, x2])\r\n left_y.append([y1, y2])\r\n else:\r\n right_x.append([x1, x2])\r\n right_y.append([y1, y2])\r\n return left_x, left_y, right_x, right_y\r\n\r\n\r\ndef meanPoint(x):\r\n sum1 = 0\r\n sum2 = 0\r\n for x1, x2 in x:\r\n sum1 += x1\r\n sum2 += x2\r\n sum1 = int(float(sum1)/float(len(x)))\r\n sum2 = int(float(sum2)/float(len(x)))\r\n return [sum1, sum2]\r\n\r\ndef medianPoint(x):\r\n xx = sorted(x)\r\n return xx[(int)(len(xx)/2)]\r\n \r\n\r\ndef interpolate(list_x, list_y, y):\r\n x1 = list_x[0]\r\n x2 = list_x[1]\r\n y1 = list_y[0]\r\n y2 = list_y[1]\r\n return int(float(y - y1) * float(x2-x1) / float(y2-y1) + x1)\r\n\r\n \r\ndef lineFitting(image, left_x, left_y, right_x, right_y):\r\n result = imageCopy(image)\r\n height = image.shape[0]\r\n lx = meanPoint(left_x)\r\n ly = meanPoint(left_y)\r\n rx = meanPoint(right_x)\r\n ry = meanPoint(right_y)\r\n min_y = int(height*0.6)\r\n max_y = height\r\n min_x_left = interpolate(lx, ly, min_y)\r\n max_x_left = interpolate(lx, ly, max_y)\r\n min_x_right = interpolate(rx, ry, min_y)\r\n max_x_right = interpolate(rx, ry, max_y)\r\n cv2.line(result, (min_x_left, min_y), (max_x_left, max_y), (0, 0, 255), 3)\r\n cv2.line(result, (min_x_right, min_y), (max_x_right, max_y), (0, 0, 255), 3)\r\n return result\r\n\r\n\r\ndef frameProcessing(frame):\r\n result = imageCopy(frame)\r\n height, width = result.shape[:2]\r\n pt1 = (width*0.45, height*0.6)\r\n pt2 = (width*0.55, height*0.6)\r\n pt3 = (width*0.95, height*1.0)\r\n pt4 = (width*0.05, height*1.0)\r\n roi_corners = np.array([[pt1, pt2, pt3, pt4]], dtype=np.int32)\r\n result = polyROI(result, roi_corners)\r\n\r\n lower_white_hls = np.array([0, 200, 0])\r\n upper_white_hls = np.array([179, 255, 255])\r\n lower_yellow_hls = np.array([15, 30, 115])\r\n upper_yellow_hls = np.array([35, 200, 255])\r\n W_hls = splitColor(result, lower_white_hls, upper_white_hls, cv2.COLOR_BGR2HLS)\r\n Y_hls = splitColor(result, lower_yellow_hls, upper_yellow_hls, cv2.COLOR_BGR2HLS)\r\n result = W_hls+Y_hls\r\n result = cannyEdge(result, 100, 200)\r\n result = houghLinesP(result, 1, np.pi/180, 10, 10)\r\n lx, ly, rx, ry = splitLines(result)\r\n result = lineFitting(frame, lx, ly, rx, ry)\r\n return result\r\n\r\npath = \"/home/nw/Desktop/OpenCV_in_Ubuntu/Data/Lane_Detection_Videos/\"\r\nroadVideo_01 = \"solidYellowLeft.mp4\"\r\nroadVideo_02 = \"solidWhiteRight.mp4\"\r\n\r\nopenPath = path+roadVideo_02\r\n\r\ncap = cv2.VideoCapture(openPath)\r\n\r\n# Get frame per second information\r\nfps = cap.get(cv2.CAP_PROP_FPS)\r\nprint(\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}\".format(fps))\r\n# Get width and height information\r\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\nprint(width)\r\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\nprint(height)\r\n# Define the codec and create VideoWriter object\r\nfourcc = int(cv2.VideoWriter_fourcc(*'DIVX'))\r\nprint(fourcc)\r\nout = cv2.VideoWriter('output_Single_Object_Tracker.avi', fourcc, fps, (width, height), True)\r\n\r\nif tracker_type == 'BOOSTING':\r\n tracker = cv2.TrackerBoosting_create()\r\nif tracker_type == 'MIL':\r\n tracker = cv2.TrackerMIL_create()\r\nif tracker_type == 'KCF':\r\n tracker = cv2.TrackerKCF_create()\r\nif tracker_type == 'TLD':\r\n tracker = cv2.TrackerTLD_create()\r\nif tracker_type == 'MEDIANFLOW':\r\n tracker = cv2.TrackerMedianFlow_create()\r\n \r\ncv2.namedWindow(\"Input\", cv2.WINDOW_GUI_EXPANDED)\r\ncv2.namedWindow(\"Output\", cv2.WINDOW_GUI_EXPANDED)\r\nret, frame = cap.read()\r\nif(ret):\r\n bbox = cv2.selectROI(frame, False)\r\n ok = tracker.init(frame, bbox)\r\n\r\nwhile cap.isOpened():\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n if ret:\r\n # Our operations on the frame come here\r\n ret, bbox = tracker.update(frame)\r\n \r\n output = frameProcessing(frame)\r\n if ret:\r\n # Tracking success\r\n p1 = (int(bbox[0]), int(bbox[1]))\r\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\r\n cv2.rectangle(output, p1, p2, (255,0,0), 2, 1)\r\n else :\r\n # Tracking failure\r\n cv2.putText(output, \"Tracking failure detected\", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)\r\n # Display tracker type on frame\r\n cv2.putText(output, tracker_type + \" Tracker\", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);\r\n \r\n # Display FPS on frame\r\n cv2.putText(output, \"FPS : \" + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);\r\n # Write frame-by-frame\r\n out.write(output)\r\n # Display the resulting frame\r\n cv2.imshow(\"Input\", frame)\r\n cv2.imshow(\"Output\", output)\r\n\r\n else:\r\n break\r\n\r\n # waitKey(int(1000.0/fps)) for matching fps of video\r\n if cv2.waitKey(int(1000.0/fps)) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\nout.release()\r\n\r\ncv2.destroyAllWindows()\r\n","sub_path":"courses/w10_opencv/source/OpenCV_in_Ubuntu/Python/2nd_07H/52_Video_Single_Object_tracking.py","file_name":"52_Video_Single_Object_tracking.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3018777","text":"import pathlib\nimport webbrowser\nfrom tests import BaseTestStoreMethods\nfrom store import DropboxStore\n\n\nclass TestDropboxStore(BaseTestStoreMethods.TestStoreMethods):\n def setUp(self):\n self.store_class = DropboxStore\n self.store_name = 'test-dropbox'\n self.store = self.store_class(self.config, self.store_name, self.tokenbox)\n if self.store.authorized() is False:\n webbrowser.open(self.store.get_authorization_url())\n res = input('response url :')\n self.store.fetch_token(res)\n\n def tearDown(self):\n test_dir = pathlib.PurePath('/')\n entries = self.store.get_list(test_dir)\n for entry in entries:\n self.store.remove(test_dir / entry.name)\n del self.store\n","sub_path":"tests/test_dropbox_store.py","file_name":"test_dropbox_store.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391557769","text":"\r\n# 面积图--DTM训练结果总图\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#中文及负号处理,字体\r\nplt.rcParams['font.sans-serif'] = 'Microsoft YaHei'\r\nplt.rcParams['axes.unicode_minus'] = False\r\nfont1 = {'family': 'Times New Roman',\r\n 'weight': 'normal',\r\n 'size': 8,\r\n }\r\nfont2 = {'family': 'Times New Roman',\r\n 'weight': 'normal',\r\n 'size': 10,\r\n }\r\n\r\ndef plotDTMTotal():\r\n # figure\r\n fig = plt.figure(figsize=(6, 6))\r\n x = np.arange(2010, 2020, dtype=int) # x坐标年份\r\n # data\r\n for i in range(1,7):\r\n sheetname = 'Sheet'+str(i)\r\n tmpdf = pd.read_excel(r'D:\\tmpWORKSTATION\\testdtm.xlsx', sheet_name=sheetname)\r\n y = tmpdf.transpose().values\r\n ax = fig.add_subplot(2,3,i)\r\n labels = list(tmpdf.keys())\r\n plt.xlim(2010, 2019) # 设置x的范围\r\n ax.stackplot(x, y, labels=labels) # 堆积面积图\r\n ax.set_xticks(range(2010, 2020, 3)) # 横坐标标签\r\n plt.xlabel('', fontdict=font2) # 横纵坐标\r\n plt.ylabel('', fontdict=font2)\r\n if i == 3: # 添加图例\r\n plt.legend(labels=labels,\r\n prop=font1, # Title for the legend\r\n bbox_to_anchor=(1.05, 1),\r\n loc='upper left',\r\n borderaxespad=0\r\n )\r\n\r\n # fig.tight_layout()#调整整体空白\r\n plt.subplots_adjust(wspace=0.3, hspace=0.2)#调整子图间距,wspace:横向空白,hspace:纵向空白\r\n plt.show()\r\n\r\n# 各省子图\r\ndef plotDTMProvincial():\r\n facetnum = 3 # 设置将要绘制的相关主题个数\r\n fig = plt.figure(figsize=(6, 4))\r\n x = np.arange(2010, 2020, dtype=int) # x坐标年份\r\n markes = ['-o', '-s', '-^', '-p', '-^', '-v', '-p', '-d', '-h', '-2', '-8', '-6'] #折线图标记\r\n for i in range(1, facetnum+1):\r\n sheetname = 'Sheet' + str(i)\r\n tmpdf = pd.read_excel(r'D:\\tmpWORKSTATION\\testdtm_prov1.xlsx', sheet_name=sheetname)\r\n labels = list(tmpdf.keys())\r\n y = tmpdf.transpose().values\r\n ax = fig.add_subplot(facetnum, 1, i)\r\n for j, yval in enumerate(y.tolist()):\r\n plt.xlim(2009.5, 2019.5) # 设置x的范围\r\n ax.plot(x, yval, markes[j])\r\n ax.set_xticks(range(2010, 2020, 1)) # 横坐标标签\r\n plt.legend(labels,\r\n loc='upper left',\r\n bbox_to_anchor=(1.01,1),\r\n borderaxespad=0,\r\n labelspacing=0.2)# vertical space between legend entries\r\n\r\n plt.subplots_adjust(wspace=0.3, hspace=0.3) # 调整子图间距,wspace:横向空白,hspace:纵向空白\r\n fig.subplots_adjust(right=0.8)\r\n fig.savefig('D:/test.png', dpi=600)\r\n # fig.show()\r\n\r\nif __name__ == '__main__':\r\n # plotDTMProvincial()\r\n plotDTMTotal()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Visualization/dtmVis.py","file_name":"dtmVis.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"44223711","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport csv\nfrom datetime import datetime\n\ndate = []\nclose_values = []\naverage_of_last_365_days = []\nstart = 0\nend = 366\nav = 0\nsecond_x_values = []\n\nwith open('BTC-USD.csv') as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n if 'Date' in row or 'null' in row:\n pass\n else:\n date.append(row[0])\n close_values.append(float(row[4]))\n\n\nwhile end != len(close_values)-1:\n av = 0\n for i in range(start,end):\n av += close_values[i]\n average_of_last_365_days.append(av/365)\n start += 1\n end += 1\n\n\nx_values = [datetime.strptime(d,'%d/%m/%Y').strftime('%m/%d/%Y') for d in date]\ny_values = close_values\n\ndel date[:367]\nsecond_x_values=[datetime.strptime(d,'%d/%m/%Y').strftime('%m/%d/%Y') for d in date]\nsecond_y_values=average_of_last_365_days\n\n\ndata = {'x':x_values,\n 'y':y_values\n }\n\nsecond_line = {'x':second_x_values,\n 'y':second_y_values}\n\n\ndf = pd.DataFrame({'x_value':x_values,'y_value':y_values})\nplt.plot('x_value','y_value',data=df,marker='',color='black',linewidth=2)\n\ndf = pd.DataFrame({'x_value':second_x_values,'y_value':second_y_values})\nplt.plot('x_value','y_value',data=df,marker='',color='red',linewidth=2)\n\nplt.show()\n","sub_path":"Data_analysis/plot_data_from_file_using_pandas.py","file_name":"plot_data_from_file_using_pandas.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432223003","text":"from constants import *\r\nfrom ZurichInstruments_UHFLI import ZurichInstruments_UHFLI\r\nimport sys\r\nimport os\r\nimport shutil\r\nimport qt\r\nimport progressbar\r\nimport numpy as np\r\nimport time\r\n\r\nuhf = ZurichInstruments_UHFLI('dev2232')\r\nrte = qt.instruments.create('RTE1104', 'RhodeSchwartz_RTE1104', address = 'TCPIP0::192.168.1.6::INSTR')\r\n# aps = qt.instruments.create('APSYN420',\t'AnaPico_APSYN420',\t\t\taddress = APSYN420_ADDRESS)\r\n# fsv = qt.instruments.create('FSV', 'RhodeSchwartz_FSV', address = FSV_ADDRESS)\r\n# smf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = SMF100_ADDRESS)\r\n# aps = qt.instruments.create('APSYN420',\t'AnaPico_APSYN420',\t\t\taddress = APSYN420_ADDRESS)\r\n\r\n\r\n# center_freq = 7.82067* GHz\r\n# span = 70*MHz\r\n# fsv.set_centerfrequencyc(enter_freq)\r\n# fsv.set_span(span)\r\n# fsv.set_bandwidth(10*Hz)\r\n\r\ndelay_length_start = 0\r\ndelay_length_stop = 0\r\ndelay_length_numpoints = 10\r\ndelay_length_array = np.linspace(delay_length_start, delay_length_stop, delay_length_numpoints)\r\n\r\ncontrol_start = 5.6365*GHz - 20*MHz\r\ncontrol_stop = 5.6365*GHz + 20*MHz\r\n# control_start = 5.6*GHz\r\n# control_stop = 5.68*GHz\r\ncontrol_numpoints = 11\r\ncontrol_array = np.linspace(control_start, control_stop, control_numpoints)\r\n\r\npower_start = 120\r\npower_stop = 120 #at SA -36.35 dBm, after 4dB cable and 20dB directional coupler\r\npower_numpoints = 1\r\npower_array = np.linspace(power_start, power_stop, power_numpoints)\r\n\r\ngauss_width = 32 #in sample points\r\n\r\n\r\ndelay_start = 0\r\ndelay_stop = 10*1800\r\ndelay_inc = 32\r\ndelay_array = np.arange(delay_start,delay_stop,delay_inc)\r\ndelay_numpoints = len(delay_array)#int((pulse_stop-pulse_start)/pulse_inc)\r\n# pulse_array = np.linspace(pulse_start, pulse_stop, pulse_numpoints+1)\r\n\r\n\r\ndef generate_meta_file(no_of_delay, delay):\r\n\tmetafile = open('%s.meta.txt' % data.get_filepath()[:-4], 'w')\r\n\tmetafile.write('#inner loop\\n%s\\n%s\\n%s\\n%s\\n'%\r\n\t\t\t(record_length, start_time/us, stop_time/us, 'Time(us)'))\r\n\tmetafile.write('#outer loop\\n%s\\n%s\\n%s\\n%s\\n'%\r\n\t\t\t(no_of_delay, delay, delay_array[0], 'Delay Length'))\r\n\tmetafile.write('#outermost loop (unused)\\n1\\n0\\n1\\nNothing\\n')\r\n\r\n\tmetafile.write('#for each of the values\\n')\r\n\tvalues = data.get_values()\r\n\ti=0\r\n\twhile i\", auth_token\n # is_valid, user_object = user_service.validate_and_get_user(auth_token)\n #\n # if not is_valid:\n # return {\"success\": False, \"message\": \"Invalid User !!\"}, 401\n\n if session['key'] != auth_token:\n return {\"success\": False, \"message\": \"Invalid User !!\"}\n\n request_data = request.get_json()\n username = session['username']\n\n result = question_post_handler.create_question(request_data, username)\n if result:\n response_dict = question_utils.get_question_dict(result)\n return jsonify({\"question\": response_dict})\n else:\n return {\"success\": False}\n\n def get(self, user_id=None):\n if user_id:\n question_objects = question_get_handler.get_question_for_user(user_id)\n else:\n question_objects = question_get_handler.get_all_questions()\n\n response_dicts = [question_utils.get_question_dict(x) for x in question_objects]\n return jsonify({\"questions\": response_dicts})\n","sub_path":"feed_service/service_apis/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"617329831","text":"#! /usr/bin/python\nimport sys\nimport os\nimport argparse\nimport array\nimport multiprocessing\n\n# Definicion de los argumentos\nparser = argparse.ArgumentParser(description='Tp2 - procesa ppm')\nparser.add_argument('-s', '--size', type=int, default=10,\n help='Bloque de lectura')\nparser.add_argument('-f', '--file', help='Archivo a procesar')\nparser.add_argument('-m', '--message', help='mensaje esteganografico')\nparser.add_argument('-t', '--offset', default=True,\n help='Offset en pixels del inicio del raster')\nparser.add_argument('-i', '--interleave', default=True,\n help='Interleave de modificacion en pixel')\nparser.add_argument('-o', '--output', default='Steganography.ppm', help='Estego-mensaje')\nparser.add_argument('-c', '--cifrado', default=0, help='Cifrado rot13')\n\nargs = parser.parse_args()\n\nqueuec = multiprocessing.Queue()\n\n\ndef main():\n # abrir archivo\n path = os.path.dirname(os.path.abspath(__file__))\n size = int(args.size)\n try:\n archivo = os.open(path + \"/\" + args.file, os.O_RDONLY)\n except FileNotFoundError:\n print(\"El archivo no se encuentra en el directorio\")\n sys.exit()\n leido = os.read(archivo, size)\n dimen = False\n path = os.path.dirname(__file__) + \"/\"\n try:\n with open(path + args.message, \"rb\") as archivo_msg:\n message = archivo_msg.read()\n except FileNotFoundError:\n print(\"El archivo no se encuentra en el directorio\")\n sys.exit()\n # sacar comentario\n i = 0\n if i == 0:\n for i in range(leido.count(b\"\\n# \")):\n barra_n_as = leido.find(b\"\\n# \")\n barra_n = leido.find(b\"\\n\", barra_n_as + 1)\n leido = leido.replace(leido[barra_n_as:barra_n], b\"\")\n # sacar encabezado\n primer_n = leido.find(b\"\\n\") + 1\n seg_n = leido.find(b\"\\n\", primer_n) + 1\n ultima_barra_n = leido.find(b\"\\n\", seg_n) + 1\n encabezado = leido[:ultima_barra_n].decode()\n if args.cifrado != 0:\n encabezado_new = encabezado + '#UMCOMPU2-C {} {} {}'.format(args.offset, args.interleave, len(message) + 4)\n verdadera = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n falsa = \"nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM\"\n transform = dict(zip(verdadera, falsa))\n message = ''.join(transform.get(char, char) for char in str(message))\n else:\n encabezado_new = encabezado + '#UMCOMPU2N {} {} {}'.format(args.offset, args.interleave, len(message) + 4)\n print(encabezado_new)\n # saco ancho y largo\n linea = leido.splitlines()\n for i in range(len(linea)):\n if dimen is False:\n word = linea[i].split()\n if len(word) == 2:\n width = int(word[0])\n height = int(word[1])\n dimen = True\n num_bytes = width * height * 3 // 8\n # guardo el cuerpo\n cuerpo = leido[ultima_barra_n:]\n # envio primer parte del cuerpo\n queuec.put(cuerpo)\n # creo hijos\n h_c = multiprocessing.Process(target=hide_info, args=(encabezado_new, queuec, num_bytes, message))\n # inicio los hijos\n h_c.start()\n # paso el resto del cuerpo\n while True:\n cuerpo = os.read(archivo, args.size)\n queuec.put(cuerpo)\n if len(cuerpo) != args.size:\n break\n queuec.put(\"Terminamos\")\n # uno al los hijos con el padre\n h_c.join()\n os.close(archivo)\n\n\ndef hide_info(encabezado_new, queuec, num_bytes, message):\n imagec = []\n cuerpo = b''\n k = 0\n start = 0\n if message != \" \":\n binario = ''.join(format(ord(x), '08b') for x in str(message))\n else:\n raise TypeError(\"Mensaje vacío. Por favor ingrese un mensaje\")\n\n # check if the num of bytes to encore is less than max bytes in the image\n if len(message) > num_bytes:\n raise ValueError(\"Error bytes insuficientes\")\n\n while True:\n mensaje = queuec.get()\n if mensaje == \"Terminamos\":\n break\n else:\n cuerpo = cuerpo + mensaje\n cuerpo_c = [i for i in cuerpo]\n x = len(binario)\n for j in range(0, len(cuerpo_c), int(args.interleave)):\n valor = cuerpo_c[j]\n if start < int(args.offset):\n start += 1\n imagec.append(cuerpo_c[j])\n else:\n if x > k:\n bit = binario[k]\n if valor % 2 != int(bit):\n if valor > 255:\n valor -= 1\n else:\n valor += 1\n k += 1\n imagec.append(valor)\n else:\n imagec.append(cuerpo_c[j])\n image_c = array.array('B', imagec)\n with open('{}'.format(args.output), 'wb') as f:\n f.write(bytearray(encabezado_new, 'ascii'))\n image_c.tofile(f)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"alumnos/58103-Scalco-Valentina/TP2/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"295451664","text":"import cv2\n\ndef show_image(img,size=800):\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', size, size)\n cv2.imshow('image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nvidcap = cv2.VideoCapture('NPF_2.avi')\n_,frame = vidcap.read()\nshow_image(frame)\n","sub_path":"PER_fly_load.py","file_name":"PER_fly_load.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181992924","text":"lg_diam = 500 * 0.55\nlg_rad = lg_diam/2\nlg_circ = PI * lg_diam\ncx = 500/2\ncy = 400/2\n\ndef setup():\n background(100)\n smooth()\n size(500,400)\n colorMode(HSB)\n \ndef draw():\n global lg_rad\n fill(0,10)\n rect(0,0,width,height)\n nbr_circles =int( map(mouseX, 0, width,6,50))\n sm_diam = (lg_circ/nbr_circles)\n myColor = map(mouseY,0,height, 150, 255)\n \n filter(BLUR, 3)\n fill(myColor,180, 190, 100)\n \n for i in range (1, nbr_circles+1):\n angle = i * TWO_PI / nbr_circles\n x = cx + cos(angle) * lg_rad\n y = cy + sin(angle) * lg_rad\n ellipse(x,y,sm_diam, sm_diam)\n \ndef keyPressed():\n if (key== \"s\"):\n saveFrame(\"myProcessing \" + str(frameCount) + \".jpg \")\n","sub_path":"Processing/Section 10/Listing79/Listing79.pyde","file_name":"Listing79.pyde","file_ext":"pyde","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"105589466","text":"import csv\nfrom collections import Counter\n\ninv = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\ndef import_inventory(filename):\n global inv\n with open(filename) as csvfile:\n new_inv ={}\n readCSV = csv.reader(csvfile, delimiter=\",\")\n for row in readCSV:\n item_name = []\n count = []\n for row in readCSV:\n item = row[0]\n num = row[1]\n count.append(num)\n item_name.append(item)\n new_inv[item] = int(num)\n imported_items= new_inv.keys()\n inv = Counter(new_inv) + Counter(inv)\n\n\n# list = [a, b, c]\n# dictionary = {}\n# counter = 0\n# for i in list:\n# dictionary[i] = counter\n# counter += 1\n#\n# dictionary = {a:1, b:2, c:3}\n#\n#\n# if item in inv:\n# inv[item] = inv.get(item) + int(num)\n# if item not in inv:\n# inv.update({item : num})\n# print(inv)\nimport_inventory(\"invent.txt\")\n\nprint(inv)\n\n\n# A more generic solution, which works for non-numeric values as well:\n#\n# a = {'a': 'foo', 'b':'bar', 'c': 'baz'}\n# b = {'a': 'spam', 'c':'ham', 'x': 'blah'}\n#\n# r = dict(a.items() + b.items() +\n# [(k, a[k] + b[k]) for k in set(b) & set(a)])\n# or even more generic:\n#\n# def combine_dicts(a, b, op=operator.add):\n# return dict(a.items() + b.items() +\n# [(k, op(a[k], b[k])) for k in set(b) & set(a)])\n# For example:\n#\n# a = {'a': 2, 'b':3, 'c':4}\n# b = {'a': 5, 'c':6, 'x':7}\n#\n# import operator\n# print combine_dicts(a, b, operator.mul)\n# {'a': 10, 'x': 7, 'c': 24, 'b': 3}\nA = {'a':1, 'b':2, 'c':3}\nB = {'b':3, 'c':4, 'd':5}\nc = {x: A.get(x, 0) + B.get(x, 0) for x in set(A).union(B)}\nprint(c)\n","sub_path":"csv_test.py","file_name":"csv_test.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"26679565","text":"#!/usr/bin/env python3\nimport argparse\nfrom subprocess import PIPE, run\nfrom urllib.parse import urlparse, unquote\nimport re\nimport os\nimport random\nimport threading\nimport multiprocessing\nimport requests\n\n\nPARSER = argparse.ArgumentParser(description=\"Simple wordlist generator\")\nPARSER.add_argument(\"-domain\", \"-d\", type=str, help=\"Domain of the target\", required=True)\nPARSER.add_argument(\"-threads\", \"-t\", nargs='?', default=multiprocessing.cpu_count(),\n type=int, help=\"Threads amount\", const=multiprocessing.cpu_count())\nPARSER.add_argument(\"-amount\", \"-a\", nargs='?', default=2000,\n type=int, help=\"Amount of gau urls to get\", const=2000)\n\nARGS = PARSER.parse_args()\nDOMAIN = ARGS.__dict__[\"domain\"]\nGAU_AMOUNT = ARGS.__dict__[\"amount\"]\nTHREADS_AMOUNT = ARGS.__dict__[\"threads\"]\n\nDENY_LIST = set()\nWORDS = set()\n\n\ndef out(command):\n \"Return output of the os command\"\n result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True, check=True)\n return result.stdout\n\n\ndef fill_deny_list():\n \"Exclude wordlists from 'denylists' folder\"\n for filename in os.listdir(os.path.dirname(os.path.realpath(__file__)) + \"/denylists/\"):\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/denylists/\" + filename, 'r') as f:\n for line in f:\n DENY_LIST.add(line[:-1].lower())\n\n\ndef add_words(links):\n for line in links:\n url = urlparse(line)\n path = unquote(url.path)\n for word in path.split(\"/\"):\n # Only path symbols, exclude from denylist and remove css pixel args (10px)\n if re.match(\"^[A-Za-z0-9_-]+$\", word) and word.lower() not in DENY_LIST and not re.match(\"^.*[0-9]px\", word):\n # Max domain part length is 63, reduces junk\n if len(word) <= 63 and word not in WORDS:\n WORDS.add(word)\n\n\ndef words_scrapping(url):\n \"Scrape all words on page\"\n for i in range(int(GAU_AMOUNT / THREADS_AMOUNT)):\n words = set()\n try:\n response = requests.get(url)\n words.update(unquote(response.text).replace(\n \"/\", \" \").split(\" \"))\n add_words(words)\n except:\n pass\n\n\nGAU_URLS = out(\"gau {} | head -n {}\".format(DOMAIN, GAU_AMOUNT)).splitlines()\n\nif not GAU_URLS:\n exit(0)\n\nfill_deny_list()\n\nthreads = []\nfor i in range(THREADS_AMOUNT):\n t = threading.Thread(target=words_scrapping,\n args=(random.choice(GAU_URLS),))\n t.start()\n threads.append(t)\n\nfor t in threads:\n t.join()\n\nfor word in WORDS:\n print(word.replace(\"\\n\", \"\"))\n","sub_path":"wordlist_generator.py","file_name":"wordlist_generator.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"563144245","text":"import cv2\nimport os\nimport time\nimport face_recognition\n\npath = os.path.join(\"img\", \"face_recognition\") # 模型数据图片目录\nsave_path = os.path.join(\"img\", \"test_result\")\nextend_width = 100\nmax_width = 700\n\ntotal_image_name = []\ntotal_face_encoding = []\n\n\ndef init_image_model():\n for fn in os.listdir(path): # fn 表示的是读取模型文件夹下各个文件的文件名\n file_extend_name = os.path.splitext(os.path.join(os.getcwd(), path, fn))[1]\n if file_extend_name != \".png\" and file_extend_name != \".jpg\" and file_extend_name != \".jpeg\":\n continue # 跳过非图片文件\n print(path + \"/\" + fn)\n total_face_encoding.append(\n face_recognition.face_encodings(\n face_recognition.load_image_file(os.path.join(path, fn)))[0]) # 调用库函数的方法,装载图片模型\n fn = fn[:(len(fn) - 4)] # 截取图片名(这里应该把images文件中的图片名命名为为人物名)\n total_image_name.append(fn) # 图片名字列表\n\n\ndef draw_face_rectangle(frame, face_locations, face_encodings):\n name = \"Unknown\"\n for (top, right, bottom, left), face_encoding in zip(\n face_locations, face_encodings):\n # 看看面部是否与已知人脸相匹配。\n for i, v in enumerate(total_face_encoding):\n match = face_recognition.compare_faces(\n [v], face_encoding, tolerance=0.45)\n name = \"Unknown\"\n if match[0]:\n name = total_image_name[i]\n break\n # 画出一个框,框住脸\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n # 画出一个带名字的标签,放在框下\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),\n cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,\n (255, 255, 255), 1)\n return frame\n\n\ndef detect_face(frame):\n # 调用库函数,查找图片当中的人脸和对应的坐标\n face_locations = face_recognition.face_locations(frame)\n face_encodings = face_recognition.face_encodings(frame, face_locations)\n return face_locations, face_encodings\n\n\ndef extend_face_size(frame, left, top, right, bottom, extend_limit_width=extend_width):\n # 对剪裁的区域进行扩张,可选传入指定扩张的pixel数目\n resize = calculate_size(frame, extend_limit_width)\n print((left, top), (right, bottom))\n left = left - resize[0]\n top = top - resize[1]\n right = right + resize[0]\n bottom = bottom + resize[1]\n return left, top, right, bottom\n\n\ndef fix_size(frame, left, top, right, bottom):\n # 当扩大的范围超过图片本身边界的时候,对范围大小进行调整\n image_width, image_height = get_shape(frame)\n if left < 0:\n left = 0\n if top < 0:\n top = 0\n if right > image_width:\n right = image_width\n if bottom > image_height:\n bottom = image_height\n return left, top, right, bottom\n\n\ndef get_shape(frame):\n # 获取图片的宽和高\n shape = frame.shape\n return shape[1], shape[0]\n\n\ndef calculate_size(frame, limit_width):\n image_width, image_height = get_shape(frame)\n if (image_width < limit_width):\n return (image_width, image_height) # 如果图片的尺寸小于上限,直接返回图片尺寸\n ratio = image_width / image_height # 计算图片的比例\n max_height = int(limit_width / ratio) # 根据传入给定的宽,通过比例,计算出对应的高\n resize = (limit_width, max_height) # 调整图片尺寸\n return resize\n\n\ndef gettime():\n return time.ctime().replace(\":\", \"_\")\n\n\ndef save_file(frame):\n save_time = gettime()\n save_file_name = os.path.join(os.getcwd() + save_path, save_time) + \".jpg\"\n static_file_name = \"/static/\" + save_time + \".jpg\"\n cv2.imwrite(save_file_name, frame)\n cv2.imwrite(os.path.join(os.getcwd(), \"static\", save_time) + \".jpg\", frame)\n return save_file_name, static_file_name\n\n\ndef compress_frame(frame, limit_width=max_width):\n resize = calculate_size(frame, limit_width)\n return cv2.resize(frame, resize, interpolation=cv2.INTER_AREA)\n\n\ninit_image_model()\n","sub_path":"tools/generate_model_tools.py","file_name":"generate_model_tools.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208210292","text":"def quadrilateral():\n print('For the Quadilateral ABCD:')\n XA = int(input('Enter the x coordinate of point A:'))\n YA = int(input('Enter the y coordinate of point A:'))\n XB = int(input('Enter the X coordinate of point B:'))\n YB = int(input('Enter the y coordinate of point B:'))\n XC = int(input('Enter the X coordinate of point C:'))\n YC = int(input('Enter the y coordinate of point C:'))\n XD = int(input('Enter the X coordinate of point D:'))\n YD = int(input('Enter the y coordinate of point D:'))\n if (XA-XB)==0:\n m1='z'\n else:\n m1 = (YA-YB)/(XA-XB)\n if (XB-XC)==0:\n m2='z'\n else:\n m2=(YB-YC)/(XB-XC)\n if (XC-XD)==0:\n m3='z'\n else:\n m3=(YC-YD)/(XC-XD)\n if (XA-XD)==0:\n m4='z'\n else:\n m4 = (YA-YD)/(XA - XD)\n\n def f(p,q,r,s):\n ((p-r)**2+(q-s)**2)**(1/2)\n AB=((XA-XB)**2+(YA-YB)**2)**(1/2)\n BC=((XB-XC)**2+(YB-YC)**2)**(1/2)\n CD=((XC-XD)**2+(YC-YD)**2)**(1/2)\n DA=((XD-XA)**2+(YD-YA)**2)**(1/2)\n AC=((XA-XC)**2+(YA-YC)**2)**(1/2)\n BD=((XB-XD)**2+(YB-YD)**2)**(1/2)\n if AB==BC and BC==CD and CD==DA:\n if AC==BD:\n print('Square')\n else:\n print('Rhombus')\n elif AB==CD and DA==BC and DA!=AB:\n if AC==BD:\n print('Rectangle')\n else:\n print('Parallelogram')\n elif m1==m3 and m2!=m4:\n\n print('Trapezium')\n elif m1!=m3 and m2==m4:\n print('Trapezium')\n elif AB==BC and CD==DA and AB!=CD:\n print('Kite')\n elif AB==DA and BC==CD and AB!=BC:\n print('Kite')\n else:\n print('Not a Special Quadrilateral')\n\n\n","sub_path":"Modules/quadrilateral_type.py","file_name":"quadrilateral_type.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37564696","text":"#Import socket module\nfrom socket import *\nimport sys #In order to terminate the program\n\nserverSocket = socket(AF_INET, SOCK_STREAM)\n\n#Prepare a server socket\nserverSocket.bind(('',44444))\nserverSocket.listen(1)\n\nwhile True:\n #Establish the connection\n print('Ready to server...')\n connectionSocket,addr = serverSocket.accept()\n\n try:\n message = connectionSocket.recv(1024).decode()\n\n filename = message.split()[1]\n f = open(filename[1:])\n outputdata = f.read()\n\n #Send one HTTP header line into socket\n connectionSocket.send(\"HTTP/1.1 200 OK\\r\\nContent-Type: text/html\\r\\n\\r\\n\".encode())\n\n #Send the content of the requested file to the client\n for i in range(0, len(outputdata)):\n connectionSocket.send(outputdata[i].encode())\n\n connectionSocket.send(\"\\r\\n\".encode())\n connectionSocket.close()\n except IOError:\n outputdata = \"
404 Not Found!

404 Not Found!

\"\n\n #Send response message for file not found (404)\n connectionSocket.send(\"HTTP/1.1 404 Not Found\\r\\nContent-Type:text/html\\r\\n\\r\\n\".encode())\n\n for i in range(0, len(outputdata)):\n connectionSocket.send(outputdata[i],encode())\n connectionSocket.send(\"\\r\\n\".encode())\n\n #Close client socket\n connectionSocket.close()\n\nserverSocket.close()\nsys.exit() #Terminate the program after sending the corresponding data\n","sub_path":"NYU/CS-GY6843/WebServer/WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"647486287","text":"from artiq.experiment import *\r\nimport numpy as np\r\nimport os \r\nimport time \r\n\r\nclass Spin_Echo_CPMG(EnvExperiment):\r\n \"\"\"Spin_Echo_CPMG\"\"\"\r\n def build(self):\r\n \r\n self.setattr_argument(\"CPMG_time\", NumberValue(10, scale=1,unit=\"times\",step=1))\r\n self.setattr_argument(\"Rabi_Time\", NumberValue(100, scale=1,unit=\"us\",step=1))\r\n self.setattr_argument(\"Gap_Start\", NumberValue(100, scale=1,unit=\"us\",step=1))\r\n self.setattr_argument(\"Gap_End\", NumberValue(100, scale=1,unit=\"us\",step=1))\r\n self.setattr_argument(\"Gap_Step\", NumberValue(100, scale=1,unit=\"ns\",step=1))\r\n \r\n \r\n \r\n self.setattr_device(\"core\")\r\n self.setattr_device(\"ttl0\")\r\n self.setattr_device(\"ttl1\")\r\n self.setattr_device(\"ttl2\")\r\n self.setattr_device(\"ttl4\")\r\n self.setattr_device(\"ttl5\")\r\n self.setattr_device(\"ttl8\")\r\n self.setattr_device(\"ttl9\")\r\n self.setattr_device(\"ttl11\")\r\n self.setattr_device(\"ttl12\")\r\n self.setattr_device(\"ttl30\")\r\n self.setattr_device(\"ttl31\")\r\n self.setattr_device(\"ttl32\")\r\n self.setattr_device(\"urukul0_ch0\")\r\n self.setattr_device(\"urukul0_ch1\")\r\n self.setattr_device(\"urukul0_cpld\")\r\n \r\n def prepare(self):\r\n \r\n self.parameter=self.get_dataset(\"para\")\r\n self.Rabi=self.get_dataset(\"Run_Uint.Rabi.Start\")\r\n self.Rabi_End=self.get_dataset(\"Run_Uint.Rabi.End\")\r\n self.Rabi_Step=self.get_dataset(\"Run_Uint.Rabi.Step\")\r\n self.Rabi_Frequency=self.get_dataset(\"Run_Uint.Rabi.Start\")\r\n self.Zeeman_Frequency=self.get_dataset(\"Run_Uint.Zeeman.Start\")\r\n #self.Zeeman_Frequency_End=self.get_dataset(\"Run_Uint.Zeeman.End\")\r\n #self.Zeeman_Frequency_Step=self.get_dataset(\"Run_Uint.Zeeman.Step\")\r\n #self.Zeeman_Repeat=self.get_dataset(\"Run_Uint.Zeeman.Repeat\")\r\n #self.Zeeman_Threshould=self.get_dataset(\"Run_Uint.Zeeman.Threshould\")\r\n self.Rabi_Threshould=self.get_dataset(\"Run_Uint.Rabi.Threshould\")\r\n\r\n self.Preparation_Frequency=self.get_dataset(\"Run_Uint.Preparation.Frequency\")\r\n self.Preparation_Attenuation=self.get_dataset(\"Run_Uint.Preparation.Attenuation\")\r\n self.Zeeman_Attenuation=self.get_dataset(\"Run_Uint.Zeeman.Attenuation\")\r\n \r\n \r\n self.Gap=self.Gap_Start\r\n self.length=int((self.Gap_End-self.Gap)/(self.Gap_Step/1000))+1\r\n # 化小数为整数\r\n self.CPMG_time=int(self.CPMG_time)\r\n \r\n print(self.length)\r\n print(self.CPMG_time)\r\n \r\n @kernel\r\n def run(self):\r\n \r\n self.core.reset()\r\n\r\n #刷新时间轴防止报错\r\n delay(2*ms)\r\n self.urukul0_cpld.init()\r\n self.urukul0_ch0.init()\r\n self.urukul0_ch1.init()\r\n self.urukul0_ch0.sw.on()#控制729的三种光频率与三种功率\r\n self.urukul0_ch1.sw.on()\r\n self.ttl0.input()\r\n self.ttl1.input()\r\n self.ttl2.output()\r\n self.ttl4.output()\r\n self.ttl5.output()\r\n self.ttl8.output()\r\n self.ttl9.output()\r\n self.ttl11.output()\r\n self.ttl12.output()\r\n self.ttl30.output()\r\n delay(2*ms)\r\n\r\n self.urukul0_ch0.set(self.Preparation_Frequency*MHz)#设置729态制备频率\r\n self.urukul0_ch0.set_att(self.Preparation_Attenuation)#设置729态制备功率\r\n self.urukul0_ch1.set_att(self.Zeeman_Attenuation)#设置729扫Zeeman功率\r\n \r\n delay(50*ms)\r\n \r\n if self.parameter==1:\r\n # self.length=int((self.Zeeman_Frequency_End-self.Zeeman_Frequency)/(self.Zeeman_Frequency_Step/1000))\r\n \r\n self.set_dataset(\"GapList\", np.full(self.length, np.nan), broadcast=True)\r\n self.set_dataset(\"D_List\", np.full(self.length, np.nan), broadcast=True)\r\n \r\n self.set_dataset(\"Data\", np.full(self.length, np.nan), broadcast=True)\r\n \r\n delay(1*ms)\r\n \r\n print(self.Rabi)\r\n print(self.Rabi_End)\r\n print(self.Rabi_Step/1000)\r\n \r\n delay(2*ms)\r\n \r\n t=0\r\n \r\n \r\n while self.Gap<=self.Gap_End:\r\n \r\n a=0\r\n \r\n delay(1*ms)\r\n for i in range(100):\r\n \r\n t_end=self.ttl0.gate_rising(20*ms)#从当前时刻开始记录上升沿,直到括号内的时间为止。\r\n t_edge=self.ttl0.timestamp_mu(t_end) \r\n \r\n \r\n if t_edge>0:#如果探测到触发信号的上\r\n at_mu(t_edge)\r\n \r\n delay(4*ms)\r\n print(t_edge)\r\n \r\n self.urukul0_ch1.set(self.Rabi_Frequency*MHz)\r\n #多普勒冷却\r\n self.ttl30.on()\r\n self.ttl4.on()#打开854Double Pass的AOM\r\n delay(2000*us)\r\n self.ttl30.off()\r\n self.ttl4.off()\r\n #态制备\r\n self.ttl8.on()#打开729\r\n delay(100*us)#持续态制备时长\r\n self.ttl8.off()\r\n self.ttl4.on()\r\n self.ttl5.on()#将z方向的397光打开\r\n self.ttl12.on()\r\n delay(100*us)\r\n self.ttl4.off()\r\n self.ttl5.off()\r\n self.ttl12.off()#关掉397Double Pass的光\r\n #边带冷却\r\n #边带冷却次数\r\n self.ttl4.on()#打开854Double Pass的AOM\r\n for e in range(10):\r\n delay(8*us)\r\n self.ttl8.on()#打开729\r\n delay(1*us)\r\n self.ttl8.off()\r\n \r\n self.ttl4.off()#关闭854Double Pass的AOM\r\n \r\n #态操作\r\n \r\n \r\n for e in range(10):\r\n delay(self.Gap*us)\r\n self.ttl8.on()#打开729\r\n delay(self.Rabi_Time*us)\r\n self.ttl8.off()\r\n \r\n '''\r\n self.ttl8.on()#打开729\r\n delay(self.Rabi*us)\r\n self.ttl8.off()\r\n '''\r\n #态探测\r\n self.ttl2.on()#打开397Double Pass的AOM\r\n self.ttl5.on()#打开397态探测的AOM\r\n self.ttl9.on()\r\n gate_end_mu=self.ttl1.gate_rising(5700*us)\r\n self.ttl2.off()#打开397Double Pass的AOM\r\n self.ttl5.off()#打开397态探测的AOM\r\n self.ttl9.off()# \r\n num_rising_edges=self.ttl1.count(gate_end_mu)\r\n \r\n self.set_dataset(\"Photon_Count\",num_rising_edges, broadcast=True)\r\n #计数上升沿 \r\n if num_rising_edges>self.Rabi_Threshould:\r\n a+=1\r\n \r\n self.core.reset()\r\n \r\n \r\n \r\n D=1-a/100\r\n \r\n self.mutate_dataset(\"GapList\", t, self.Gap)\r\n self.mutate_dataset(\"D_List\", t, D)\r\n \r\n t+=1\r\n \r\n self.Gap+=self.Gap_Step/1000\r\n \r\n \r\n def analyze(self):\r\n\r\n try:\r\n name=time.strftime(\"%F\")\r\n filename=\"E:/data/\"+str(name) \r\n os.mkdir(filename)\r\n except:\r\n pass\r\n \r\n D_List=self.get_dataset(\"D_List\")\r\n FrequncyList=self.get_dataset(\"GapList\")\r\n \r\n \r\n name1=time.strftime(\"%H-%M-%S\")+\"-SpinEchoCPMG\"\r\n filename1=filename+\"/\"+str(name1)\r\n \r\n file=open(filename1+\".txt\",\"a\")\r\n str4=\"Fre\"\r\n str5=\"Jump\"\r\n str6=str4+\" \"+str5+\"\\n\"\r\n file.write(str6)\r\n for i in range(self.length):\r\n str1=str(D_List[i])\r\n str2=str(FrequncyList[i])\r\n str3=str2+\" \"+str1+\"\\n\"\r\n file.write(str3)\r\n \r\n file.close()\r\n \r\n \r\n ","sub_path":"repository/spin_echo_CPMG.py","file_name":"spin_echo_CPMG.py","file_ext":"py","file_size_in_byte":8826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"259705483","text":"import networkx as nx\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nfrom pylab import mpl\nmpl.rcParams['font.sans-serif'] = ['SimHei']\n\ndef maxVector(a,b): # 返回一个新向量,新向量由输入的两个向量对应位置的较大值构成\n c = []\n temp1 = a > b\n temp1.astype(int)\n temp2 = a < b\n temp2.astype(int)\n c = temp1 * a + temp2 * b\n return c\n\ndef generateData(BA,p):\n TA = BA.sum()\n IL = np.random.uniform(p * BA.sum() /N,BA,(N))\n IL= IL / IL.sum() * p * BA.sum()\n IB = np.random.uniform(p * BA.sum() /N,BA,(N))\n IB = IB / IB.sum() * p * BA.sum()\n remain1 = BA - IL\n remain2 = BA - IB\n propotion3 = np.random.uniform(0,1,(N,2))\n propotion4 = np.random.uniform(0,1,(N,2))\n propotion5 = propotion3 + propotion4\n propotion3 = propotion3 / propotion5\n propotion4 = propotion4 / propotion5\n I = remain1 * propotion3[:,0]\n M = remain1 * propotion4[:,0]\n D = remain2 * propotion3[:,1]\n NW = remain2 * propotion4[:,1]\n # 构建资产负债表\n data = np.vstack((I,M,IL,IB,D,NW,BA)) # 按列合并矩阵\n data = data.T\n header = ['投资','流动性资产','银行间贷款','银行间借款','存款','净资产','总资产']\n bank = []\n for i in range(N):\n bank.append('银行' + str(i))\n Data = pd.DataFrame(data,columns=header,index=bank)\n return Data\n\ndef constraint1(L):\n IL_estimate = sum(L)\n return sum(abs(IL - IL_estimate))\n\ndef constraint2(L):\n L = L.T\n IB_estimate = sum(L)\n return sum(abs(IB - IB_estimate))\n\ndef constraint3(L):\n return sum(abs(L.diagonal()))\n\ndef RAS(X):\n for i in range(1000):\n temp = X.copy()\n row = IB / sum(X.T)\n row = np.array([row] * N)\n X = X * row.T\n column = IL / sum(X)\n column = np.array([column] * N)\n X = X * column\n if constraint1(X) + constraint2(X) + constraint3(X) <= 1e-10:\n print('status : success')\n print('iteration : ',i)\n break\n if sum(sum(abs(temp - X))) < 1e-10:\n print('function out of tolerance')\n print('iteration : ',i)\n break\n if i == 999:\n print('out of max iteration')\n return X\n\ndef entropy(L):\n y = 0\n for i in range(N):\n for j in range(i):\n y = y + math.log(L[i,j] / X[i,j]) * L[i,j]\n y = y + math.log(L[j,i] / X[j,i]) * L[j,i]\n return y\n\ndef clearVector(data,L,shock): # 返回清算向量与违约银行\n IB = data['银行间借款']\n IB = IB.as_matrix()\n E = (data['投资'] + data['流动性资产']) * (1 - shock) - data['存款']\n E = E.as_matrix()\n pi = np.zeros(N)\n for index,value in enumerate(L):\n if IB[index] == 0:\n pi = np.row_stack((pi,np.zeros(N)))\n else:\n pi = np.row_stack((pi,value/IB[index]))\n pi = np.delete(pi,0,axis=0)\n ib = IB\n newib = -maxVector(-IB,-maxVector(np.dot(ib,pi) + E,np.zeros(N)))\n delta = abs(ib - newib)\n fundamental = set()\n if delta.sum() > 0.01:\n fundamental = set(np.where(ib - newib > 0)[0]) # 基础违约银行集合\n while delta.sum() > 0.01:\n ib = newib\n newib = -maxVector(-IB,-maxVector(np.dot(ib,pi) + E,np.zeros(N)))\n delta = abs(ib - newib)\n contagion = set(np.where(ib - newib > 0)[0])\n contagion = contagion.difference(fundamental) # 传染违约银行集合\n return newib,fundamental,contagion\n\nglobal N # 银行个数\nN = 50\n\nBA = abs(np.random.uniform(1,2,N)) * 1000\np_max = N * min(BA)/BA.sum()\nleverage = np.linspace(0.01,p_max,100)\nfundamental_number = []\ncontagion_number = []\ndefault_number = []\nLGD_avg = []\nshock = np.ones(N) * 0.2 # 给出冲击\nfor p in leverage:\n data = generateData(BA,p) # 用数据的第三种生成方式得到的资产负债表数据\n # 利用数据1进行计算\n IB = np.array(data['银行间借款'])\n IL = np.array(data['银行间贷款'])\n ib = IB / IB.sum()\n il = IL / IL.sum()\n X = np.dot(ib.reshape((N,1)),il.reshape((1,N))) * IL.sum()\n X = X - np.diag(X.diagonal())\n L = RAS(X)\n ib,fundamental,contagion = clearVector(data,L,shock) # 得到清算向量与违约银行集合\n LGD_avg.append((1 - ib.sum() / data['银行间借款'].sum())) # 计算平均违约损失率\n fundamental_number.append((len(fundamental)))\n contagion_number.append((len(contagion)))\ndefault_number = np.array(fundamental_number) + np.array(contagion_number)\ncontagion_frequence = np.array(contagion_number) / N # 计算违约传染概率\ncontagion_frequence[contagion_frequence < 0.05] = 0\n\nfig1 = plt.figure(dpi=300)\nplt.title('完全网络结构:违约银行数量————杠杆')\nplt.plot(leverage,fundamental_number)\nplt.plot(leverage,contagion_number)\nplt.plot(leverage,default_number)\nplt.xlabel('杠杆')\nplt.ylabel('违约银行数量')\nplt.legend(['基础违约银行数量','传染违约银行数量','总违约银行数量'])\nfig1.savefig('D:/Documents/GitHub/practice/数据模型3/完全网络结构:违约银行数量————杠杆')\n\nfig2 = plt.figure(dpi=300)\nplt.title('完全网络结构:传染概率————杠杆')\nplt.plot(leverage,contagion_frequence)\nplt.xlabel('杠杆')\nplt.ylabel('传染概率')\nfig2.savefig('D:/Documents/GitHub/practice/数据模型3/完全网络结构:传染概率————杠杆')\n\nfig3 = plt.figure(dpi=300)\nplt.title('完全网络结构:平均违约损失率————杠杆')\nplt.plot(leverage,LGD_avg)\nplt.xlabel('杠杆')\nplt.ylabel('平均违约损失率')\nfig3.savefig('D:/Documents/GitHub/practice/数据模型3/完全网络结构:平均违约损失率————杠杆')","sub_path":"数据模型3/完全网络结构杠杆水平.py","file_name":"完全网络结构杠杆水平.py","file_ext":"py","file_size_in_byte":5790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53927157","text":"#096 - Preenchimento de Vetor III\n\n#Introdução\nN = []\nx = float(input(''))\nN.append(x)\n\n#Desenvolvimento\nfor i in range(100):\n x /= 2\n N.append(x)\n#Conclusão\n print('N[%d] = %.4f' %(i, N[i]))","sub_path":"URI/096[URI 1178] - Preenchimento de Vetor III.py","file_name":"096[URI 1178] - Preenchimento de Vetor III.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375357879","text":"# 若用户文件不存在 需要创造文件\nimport json\nimport os\n\nuser_file = \"user.ini\"\n\n# 定义json文件的格式\n# {\"users\":[{\"name\":name,\"pwd\":pwd},{},{}]}\n\nif not os.path.exists(user_file):\n os.close(os.open(user_file, os.O_CREAT))\n\n\n# 因为有注册 所有不需要预定义一些用户!!!\n# 根据前端需要创建函数\ndef query_by_name_pwd(name: str, pwd: str):\n if os.path.getsize(user_file) == 0:\n return None\n # 读取所有的用户\n # 通过迭代查询\n # 如果查询到 返回user\n with open(user_file, \"r\", encoding=\"utf-8\") as json_io:\n users = json.load(json_io)\n\n for user in users.get(\"users\"):\n if user.get(\"name\") == name and user.get(\"pwd\") == pwd:\n return name, pwd\n # 未匹配任何用户\n return None\n\n\n# 检索用户名\ndef query_by_name(name: str):\n if os.path.getsize(user_file) == 0:\n return False\n\n with open(user_file, \"r\", encoding=\"utf-8\") as json_io:\n users = json.load(json_io)\n\n for user in users.get(\"users\"):\n if user.get(\"name\") == name:\n return True\n\n return False\n\n\ndef write_json(name: str, pwd: str):\n\n if os.path.getsize(user_file) == 0:\n with open(user_file, \"w\", encoding=\"utf-8\") as json_io:\n dict_str = {\"users\": [{\"name\": name, \"pwd\": pwd}]}\n json.dump(dict_str, json_io)\n else:\n # 先读取所有 追加后 写出到文件\n with open(user_file, \"r\", encoding=\"utf-8\") as json_io:\n users = json.load(json_io)\n\n # 向users追加\n users.get(\"users\").append({\"name\": name, \"pwd\": pwd})\n\n # 文件写回\n with open(user_file, \"w\", encoding=\"utf-8\") as json_io:\n json.dump(users, json_io)\n","sub_path":"basics/day12_17/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23516248","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/shared/tooltips/marathon.py\nfrom CurrentVehicle import g_currentVehicle\nfrom gui import makeHtmlString\nfrom gui.marathon.marathon_constants import MARATHON_STATE, MARATHON_WARNING\nfrom gui.shared.formatters import text_styles, icons\nfrom gui.shared.tooltips import TOOLTIP_TYPE, formatters\nfrom gui.shared.tooltips.common import BlocksTooltipData\nfrom helpers import dependency\nfrom helpers.i18n import makeString as _ms\nfrom skeletons.gui.game_control import IMarathonEventsController\n\nclass MarathonEventTooltipData(BlocksTooltipData):\n _marathonsCtrl = dependency.descriptor(IMarathonEventsController)\n\n def __init__(self, context):\n super(MarathonEventTooltipData, self).__init__(context, TOOLTIP_TYPE.QUESTS)\n self._setContentMargin(top=2, bottom=3, left=1, right=1)\n self._setMargins(afterBlock=0)\n self._setWidth(303)\n\n def _packBlocks(self, questType, prefix, *args, **kwargs):\n self._marathonEvent = self._marathonsCtrl.getMarathon(prefix)\n self.__tooltipData = self._marathonEvent.getTooltipData()\n self.__iconsData = self._marathonEvent.getIconsData()\n items = super(MarathonEventTooltipData, self)._packBlocks()\n state = self._marathonEvent.getState()\n items.append(self._getHeader(state))\n items.append(self._getBody(state))\n if state != MARATHON_STATE.NOT_STARTED and self._marathonEvent.data.showFlagTooltipBottom:\n items.append(self._getBottom(state))\n return items\n\n def _getHeader(self, _):\n icon, text = self._marathonEvent.getTooltipHeader()\n if icon:\n formattedText = '{} {}'.format(icons.makeImageTag(icon, width=16, height=16), text_styles.main(text))\n else:\n formattedText = '{}'.format(text_styles.main(text))\n return formatters.packImageTextBlockData(title=text_styles.highTitle(_ms(self.__tooltipData.header)), img=self.__iconsData.tooltipHeader, txtPadding=formatters.packPadding(top=25), txtOffset=20, txtGap=-8, desc=formattedText)\n\n def _getBody(self, state):\n if state == MARATHON_STATE.FINISHED:\n text = text_styles.main(_ms(self.__tooltipData.bodyExtra, day=self._marathonEvent.getExtraDaysToBuy()))\n else:\n text = text_styles.main(self.__tooltipData.body)\n return formatters.packTextBlockData(text=text, padding=formatters.packPadding(left=20, top=10, bottom=20, right=10))\n\n def _getBottom(self, state):\n vehicle = g_currentVehicle.item\n isObtained = self._marathonEvent.isVehicleObtained()\n if isObtained:\n statusLabel = text_styles.bonusAppliedText(icons.makeImageTag(self.__iconsData.libraryOkIcon, vSpace=-2) + ' ' + _ms(self.__tooltipData.extraStateCompleted))\n return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',\n 'message': statusLabel}), padding=formatters.packPadding(bottom=20))\n if state == MARATHON_STATE.IN_PROGRESS:\n warning = self._marathonEvent.checkForWarnings(vehicle)\n if warning == MARATHON_WARNING.WRONG_BATTLE_TYPE:\n return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',\n 'message': text_styles.critical(_ms(self.__tooltipData.errorBattleType))}), padding=formatters.packPadding(bottom=20))\n if warning == MARATHON_WARNING.WRONG_VEH_TYPE:\n return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',\n 'message': text_styles.critical(_ms(self.__tooltipData.errorVehType))}), padding=formatters.packPadding(bottom=20))\n currentStep, allStep = self._marathonEvent.getMarathonProgress()\n if allStep:\n return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',\n 'message': text_styles.middleTitle(_ms(self.__tooltipData.extraStateSteps, currentStep=currentStep, allStep=text_styles.main(allStep)))}), padding=formatters.packPadding(bottom=20))\n else:\n discount = self._marathonEvent.getMarathonDiscount()\n return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',\n 'message': text_styles.bonusPreviewText(_ms(self.__tooltipData.extraStateDiscount, discount=discount))}), padding=formatters.packPadding(bottom=20))\n","sub_path":"source/res/scripts/client/gui/shared/tooltips/marathon.py","file_name":"marathon.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221793269","text":"#Goal: An input like 10:00 should output \"It's ten o clock\"\r\n#A bit of a mess, could be optimized but I made it at 4:30 am\r\ntime_number = input(\"Please input a time in the form xx:xx \")\r\n\r\noutput = \"It's\" #Begining of the string output\r\n\r\ntime_number = time_number.split(\":\") #Splits the input into the two number parts (\"11:30\" -> [\"11\",\"30\"])\r\nprint(time_number)\r\n\r\nhour = [\"twelve\",\"one\",\"two\",\"three\",\"four\",\"five\",\"six\",\"seven\",\"eight\",\"nine\",\"ten\",\"eleven\"]\r\nminute_tens = [\"o'\",\"ten\",\"twenty\",\"thirty\",\"forty\",\"fifty\"]\r\nminute_spec = [\"eleven\",\"twelve\",\"thirteen\",\"fourteen\",\"fifteen\",\"sixteen\",\"seventeen\",\"eighteen\",\"nineteen\",]\r\nminute = hour[1:-1] #steals the regular numbers from the List hour\r\nminute.insert(0,\"clock\")\r\noutput = output+\" \"+hour[int(time_number[0])-12] #tacks on the hour part of the time\r\nprint(int(time_number[1]),type(int(time_number[1])))\r\nif int(time_number[1]) > 10 and int(time_number[1]) < 20:\r\n output = output+\" \"+minute_spec[int(time_number[1][1])-1]\r\nelif time_number[1] == \"10\":\r\n output = output + \" \" + \"ten\"\r\nelse:\r\n output = output+\" \"+minute_tens[int(time_number[1][0])]+\" \"+minute[int(time_number[1][1])]\r\n\r\nif int(time_number[0]) >= 0 and int(time_number[0]) <= 11:\r\n output = output+\" am\"\r\nelse:\r\n output = output+\" pm\"\r\n\r\nprint(output)\r\n","sub_path":"Talking Clock.py","file_name":"Talking Clock.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280416751","text":"#!/usr/bin/env python3\n\nimport base64\nimport hmac\nimport json\nimport math\nimport os\nimport random\nimport sys\n\nPREFIX = \"ugra_it_is_too_powerful_rsa_right_\"\nSECRET = b\"indication-strength-nominate-knowledge-security\"\nSALT_SIZE = 11\n\ndef get_flag(user_id):\n return PREFIX + hmac.new(SECRET, user_id.encode(), \"sha256\").hexdigest()[:SALT_SIZE]\n\n\ndef get_phi(n):\n phi = int(n > 1 and n)\n for p in range(2, int(n ** .5) + 1):\n if not n % p:\n phi -= phi // p\n while not n % p:\n n //= p\n if n > 1:\n phi -= phi // n\n return phi\n\n\ndef solve(a, b):\n # find solution for ax + by == 1 that 0 < x < n\n\n if a == 0:\n return (0, 1)\n\n x, y = solve(b % a, a)\n x, y = y - (b // a) * x, x\n\n while x < 0:\n x += b\n y -= a\n\n return (x, y)\n\n\ndef inverse(a, n):\n x, _ = solve(a, n)\n return x\n\n\ndef is_prime(n):\n # Base check\n\n prime_list = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,\n 59, 61, 67, 71, 73, 79, 83, 89, 97]\n\n for prime in prime_list:\n if n % prime == 0:\n return False\n\n # Miller-Rabin primality test\n\n s = 0\n r = n - 1\n while r & 1 == 0:\n s += 1\n r //= 2\n\n for _ in range(128):\n a = random.randrange(2, n - 1)\n x = pow(a, r, n)\n if x != 1 and x != n - 1:\n j = 1\n while j < s and x != n - 1:\n x = pow(x, 2, n)\n if x == 1:\n return False\n j += 1\n if x != n - 1:\n return False\n\n return True\n\n\ndef generate_prime_number(length):\n found_prime = False\n\n while not found_prime:\n p = random.getrandbits(length)\n p |= (1 << length - 1) | 1\n\n found_prime = is_prime(p)\n\n return p\n\n\ndef encrypt(x):\n three = random.randint(0, 1)\n while True:\n p = generate_prime_number(13 if three == 1 else 9)\n q = generate_prime_number(13 if three == 1 else 9)\n r = 1 if three == 1 else generate_prime_number(8)\n\n if p == q or q == r or r == p:\n continue\n\n n = p * q * r\n phi = (p - 1) * (q - 1) * max(r - 1, 1)\n break\n\n while True:\n c = random.randint(phi // 4, phi - 1)\n d = random.randint(phi // 4, phi - 1)\n e = pow(c, d, phi)\n if math.gcd(e, phi) != 1:\n continue\n\n ie = inverse(e, phi)\n ab = pow(x, ie, n)\n break\n\n while True:\n ib = random.randint(3, phi - 1)\n if math.gcd(ib, phi) != 1:\n continue\n\n b = inverse(ib, phi)\n a = pow(ab, ib, n)\n break\n\n return a, (b, n), (c, d)\n\n\ndef generate():\n if len(sys.argv) < 3:\n print(\"Usage: generate.py user_id target_dir <...>\", file=sys.stderr)\n sys.exit(1)\n\n user_id = sys.argv[1]\n target_dir = sys.argv[2]\n\n random.seed(hmac.new(SECRET, user_id.encode(), \"sha256\").digest())\n flag = get_flag(user_id)\n\n dump = {\n \"common_key\": [(1, 50041451)],\n \"private_key\": [(1, 1)]\n }\n encrypted_flag = [7694194]\n\n for c1, c2, c3 in zip(flag[3::3], flag[4::3], flag[5::3]):\n x = 65536 * ord(c1) + 256 * ord(c2) + ord(c3)\n\n e, common, private = encrypt(x)\n\n encrypted_flag.append(e)\n dump[\"common_key\"].append(common)\n dump[\"private_key\"].append(private)\n\n with open(os.path.join(target_dir, \"powerful.key\"), \"wb\") as f:\n f.write(base64.b64encode(json.dumps(dump).encode()))\n\n json.dump({\n \"flags\": [flag],\n \"substitutions\": {},\n \"urls\": [],\n \"bullets\": [f\"Флаг: {' '.join(map(str, encrypted_flag))}\"]\n }, sys.stdout)\n\n\nif __name__ == \"__main__\":\n generate()\n","sub_path":"tasks/powerful/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382098801","text":"\"\"\"All classes/methods/tools to help to adapt to v2 `preview`\"\"\"\n\nimport uuid\n\n\ndef adapt_preview_data(uid,\n connection_id,\n table,\n ds_id,\n preview_data):\n \"\"\"\n Adapts vnext data table preview data to v2.\n\n Args:\n uid (str): UID.\n connection_id (str): Connection id.\n table (dict): Table object.\n ds_id (int): Datasource ID.\n preview_data (dict): Preview data payload.\n\n Returns:\n dict: Adapted data.\n \"\"\"\n\n database = table['database']\n table_id = table['id']\n table_name = table['name']\n\n # 'columns' are table columns defined by user when\n # a table is saved.\n #\n # When this function is called at preview time (before\n # table is saved, 'columsn' is None; otherwise it\n # contains definitions provided by user.\n #\n # The adaptation code deals with both cases: when 'columns'\n # are available, type and formatting options are got from it,\n # otherwise it's got from fields of the preview_data (which\n # determines the default type of columns).\n\n columns = table.get('columns')\n\n rows = preview_data['data']\n rows.insert(0, [fld['id'] for fld in preview_data['fields']])\n data = {\n 'data': preview_data['data'],\n 'table': _adapt_table_structure(table_id, table_name, columns, preview_data['fields']),\n 'tableId': table_name,\n 'timezone': None,\n 'colSum': len(preview_data['data']),\n 'connectionId': connection_id,\n 'createTime': None,\n 'creatorId': None,\n 'dataBaseName': database,\n 'dsId': ds_id,\n 'lastModifiedDate': None,\n 'lastUpdateTime': None,\n 'name': table_name,\n 'operateType': '',\n 'remotePath': '',\n 'remoteStatus': '1',\n 'rowCount': None,\n 'sourceId': '',\n 'uid': uid,\n 'updateFrequency': None,\n 'updateHour': None,\n 'updateStatus': None,\n 'updateTime': None\n }\n return data\n\n\ndef _adapt_table_structure(table_id, table_name, table_columns, data_columns):\n\n if not table_columns:\n # populate dummy table columns for easier processing\n table_columns = [{}] * len(data_columns)\n\n assert len(table_columns) == len(data_columns)\n\n def _get_custom_name(table_column, data_column): # pylint: disable=unused-argument\n return table_column.get('customName')\n\n def _get_column_type(table_column, data_column):\n if table_column:\n return table_column.get('dataType').upper()\n return data_column['type'].upper()\n\n def _get_data_format(table_column, data_column):\n if table_column:\n return table_column.get('formatOptions', {}).get('dateFormat', '')\n return 'yyyy-MM-dd' if data_column['type'] == 'date' else ''\n\n def _get_date_front(table_column, data_column):\n if table_column:\n return table_column.get('formatOptions', {}).get('dateStartsWith', None)\n return 'year' if data_column['type'] == 'date' else None\n\n def _get_data_format_type(table_column, data_column): # pylint: disable=unused-argument\n if table_column:\n currency_symbol = table_column.get('formatOptions', {}).get('currencySymbol')\n if currency_symbol:\n return f'{currency_symbol}###'\n return ''\n\n return {\n 'code': table_name,\n 'colSum': str(len(data_columns)),\n 'columnCount': len(data_columns),\n 'columns': [\n {\n 'code': data_column['name'],\n 'columnType': _get_column_type(table_column, data_column),\n 'customDateColumn': False,\n 'customName': _get_custom_name(table_column, data_column),\n 'dataType': None,\n\n 'dataFormat': _get_data_format(table_column, data_column),\n 'dataFormatType': _get_data_format_type(table_column, data_column),\n 'dateFront': _get_date_front(table_column, data_column),\n\n 'display': table_column.get('include', True),\n 'id': str(uuid.uuid1()),\n 'index': index,\n 'isCustom': None,\n 'name': data_column['name'],\n 'ordinalPosition': 0,\n 'primaryKey': False,\n 'remarks': None,\n 'separator': None,\n\n # TODO: check if this is needed\n 'type': 'metrics' if data_column['type'] == 'number' else 'dimension'\n }\n for index, (table_column, data_column) in enumerate(zip(table_columns, data_columns))],\n 'headIndex': 0,\n 'headMode': 'assign',\n 'headType': 'row',\n 'id': table_id,\n 'ignoreCol': [],\n 'ignoreColEnd': None,\n 'ignoreColStart': None,\n 'ignoreRow': [],\n 'ignoreRowEnd': 0,\n 'ignoreRowStart': 1,\n 'rowSum': None,\n 'tableName': table_name,\n 'tableType': None\n }\n","sub_path":"backend-master/src/api-gateway/main/services/adaptors/preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357013626","text":"from flask import Flask\nfrom web.public import course\nfrom api.item import i_user\nfrom ext import db\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb.init_app(app)\n\n@app.before_first_request\ndef create_tables():\n db.create_all()\n\n@app.route('/')\ndef hello():\n return \"Hello\"\n\napp.register_blueprint(course, url_prefix = \"/api/v1/course\")\n\napp.register_blueprint(i_user, url_prefix = \"/api/v1/i_user\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321764438","text":"# 태극기 그리는 파이썬 그래픽 예제\r\n# 고급예제6.py\r\n# ver 1.0 by Won, Jongho\r\n# ver 2.0 by Oh, Sangmoon\r\n\r\nimport turtle as t\r\nimport math as m\r\n\r\nwidth = 1200 # 태극기 밑변 (높이의 3/2)\r\nheight = 800 # 태극기 높이 (밑변의 2/3)\r\ndiameter = height / 2 # 태극원 지름 = 높이 / 2\r\nradius = height / 4 # 태극원 반지름 = 높이 / 4\r\n\r\n# 대각선 길이 구하기 (= 1432)\r\ndiagonal_length = m.sqrt(width*width + height*height)\r\n# 대각선 각도 구하기 (= 33.69도)\r\ndiagonal_degree = m.atan2(height,width) * 180/m.pi\r\n\r\nt.title(\"Korean Flag\") # 그래픽 창 제목 지정\r\nt.setup(width, height) # 그래픽 창 크기 지정\r\nt.bgcolor(\"white\") # 배경색 지정\r\n\r\n# 괘의 한 선 그리기 \r\ndef draw_k():\r\n n = [radius/2,radius/6,radius,radius/6,radius/2]\r\n t.pendown()\r\n t.color(\"black\", \"black\")\r\n t.begin_fill()\r\n for size in n:\r\n t.left(90)\r\n t.forward(size)\r\n t.end_fill()\r\n t.penup()\r\n\r\n# 괘 한 선의 중간을 흰색으로 그리기 (분리된 괘)\r\ndef draw_k_white():\r\n n = [radius/24+1,radius/6+2,radius/12+2,radius/6+2,radius/24+1] \r\n t.color(\"white\", \"white\")\r\n t.begin_fill()\r\n t.right(90)\r\n t.forward(1)\r\n for size in n:\r\n t.left(90)\r\n t.forward(size)\r\n t.end_fill() \r\n\r\n# 건괘 그리기 \r\ndef draw_gun():\r\n for r in range(20, 27, 3):\r\n t.home() \r\n t.right(diagonal_degree+180)\r\n t.forward(radius*r/12)\r\n draw_k()\r\n\r\n# 리괘 그리기\r\ndef draw_ri():\r\n for r in range(20, 27, 3):\r\n t.home() \r\n t.right(-diagonal_degree+180)\r\n t.forward(radius*r/12)\r\n draw_k()\r\n if( r == 23 ):\r\n draw_k_white()\r\n\r\n# 감괘 그리기\r\ndef draw_kam():\r\n for r in range(20, 27, 3):\r\n t.home() \r\n t.left(diagonal_degree)\r\n t.forward(radius*r/12)\r\n draw_k()\r\n if( r != 23 ):\r\n draw_k_white()\r\n\r\n# 곤괘 그리기\r\ndef draw_kon():\r\n for r in range(20, 27, 3):\r\n t.home() \r\n t.left(-diagonal_degree)\r\n t.forward(radius*r/12)\r\n draw_k()\r\n draw_k_white()\r\n\r\n# 4괘 모두 그리기 \r\ndef draw_4k():\r\n t.penup() \r\n draw_gun() # 건\r\n draw_ri() # 리\r\n draw_kam() # 감\r\n draw_kon() # 곤 \r\n\r\n# 태극 원형 모양 그리기\r\ndef draw_taegeuk():\r\n t.home()\r\n t.right(diagonal_degree)\r\n t.forward(radius)\r\n t.left(90)\r\n t.color(\"#C60C30\", \"#C60C30\") # 붉은색\r\n t.begin_fill()\r\n t.circle(radius, 180)\r\n t.end_fill()\r\n t.color(\"#003478\", \"#003478\") # 청색\r\n t.begin_fill()\r\n t.circle(radius, 180)\r\n t.end_fill()\r\n t.home()\r\n t.left(90 - diagonal_degree)\r\n t.begin_fill()\r\n t.color(\"#C60C30\", \"#C60C30\") # 붉은색\r\n t.circle(radius / 2)\r\n t.end_fill()\r\n t.home()\r\n t.right(90 + diagonal_degree)\r\n t.begin_fill()\r\n t.color(\"#003478\", \"#003478\") # 청색\r\n t.circle(radius / 2)\r\n t.end_fill()\r\n\r\n#---------------------------------------------\r\n# 메인 처리 \r\n#---------------------------------------------\r\n\r\n# 태극 모양 그리기 \r\ndraw_taegeuk()\r\n\r\n# 4괘 그리기\r\ndraw_4k()\r\n\r\n# 거북이 커서 숨김\r\nt.hideturtle() \r\n","sub_path":"고급예제6.py","file_name":"고급예제6.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296655825","text":"#!/usr/bin/env python3\n\nimport claims\nimport tabulate\n\nreports = claims.Report()\n\nstat_all = len(reports)\nreports_fails = [i for i in reports if i['status'] in claims.Case.FAIL_STATUSES]\nstat_failed = len(reports_fails)\nreports_claimed = [i for i in reports_fails if i['testActions'][0].get('reason')]\nstat_claimed = len(reports_claimed)\n\nprint(\"\\nOverall stats\")\nprint(tabulate.tabulate(\n [[stat_all, stat_failed, stat_claimed]],\n headers=['all reports', 'failures', 'claimed failures']))\n\nrules = claims.Ruleset()\nrules_reasons = [r['reason'] for r in rules]\nreports_per_reason = {'UNKNOWN': stat_failed-stat_claimed}\nreports_per_reason.update({r:0 for r in rules_reasons})\nfor report in reports_claimed:\n reason = report['testActions'][0]['reason']\n if reason not in reports_per_reason:\n reports_per_reason[reason] = 0\n reports_per_reason[reason] += 1\n\nprint(\"\\nHow various reasons for claims are used\")\nreports_per_reason = sorted(reports_per_reason.items(), key=lambda x: x[1], reverse=True)\nreports_per_reason = [(r, c, r in rules_reasons) for r, c in reports_per_reason]\nprint(tabulate.tabulate(\n reports_per_reason,\n headers=['claim reason', 'number of times', 'is it in current knowleadgebase?']))\n\nreports_per_class = {}\nfor report in reports:\n class_name = report['className']\n if class_name not in reports_per_class:\n reports_per_class[class_name] = {'all': 0, 'failed': 0}\n reports_per_class[class_name]['all'] += 1\n if report in reports_fails:\n reports_per_class[class_name]['failed'] += 1\n\nprint(\"\\nHow many failures are there per class\")\nprint(tabulate.tabulate(\n sorted([(c, r['all'], r['failed'], float(r['failed'])/r['all']) for c,r in reports_per_class.items()],\n key=lambda x: x[3], reverse=True),\n headers=['class name', 'number of reports', 'number of failures', 'failures ratio'],\n floatfmt=\".3f\"))\n\nreports_per_method = {}\nfor report in reports:\n method = report['className'].split('.')[2]\n if method not in reports_per_method:\n reports_per_method[method] = {'all': 0, 'failed': 0}\n reports_per_method[method]['all'] += 1\n if report in reports_fails:\n reports_per_method[method]['failed'] += 1\n\nprint(\"\\nHow many failures are there per method (CLI vs. API vs. UI)\")\nprint(tabulate.tabulate(\n sorted([(c, r['all'], r['failed'], float(r['failed'])/r['all']) for c,r in reports_per_method.items()],\n key=lambda x: x[3], reverse=True),\n headers=['method', 'number of reports', 'number of failures', 'failures ratio'],\n floatfmt=\".3f\"))\n","sub_path":"claimstats.py","file_name":"claimstats.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"455706815","text":"import time\nfrom os import environ\nimport resources\nfrom sys import stdout, exit\nfrom typing import Any\nimport boto3\nimport logging\nimport pytest\nimport subprocess\n\n# mypy later maybe\nS3ServiceResource = Any\nSqsServiceResource = Any\n\nBUCKET_PREFIX = environ[\"BUCKET_PREFIX\"]\nassert BUCKET_PREFIX == \"local-grapl\"\n\n\ndef _upload_analyzers(s3_client: S3ServiceResource) -> None:\n \"\"\"\n Basically reimplementing upload_local_analyzers.sh\n Janky, since Jesse will have an analyzer-uploader service pretty soon.\n \"\"\"\n to_upload = [\n (\n \"/home/grapl/etc/local_grapl/suspicious_svchost/main.py\",\n \"analyzers/suspicious_svchost/main.py\",\n ),\n (\n \"/home/grapl/etc/local_grapl/unique_cmd_parent/main.py\",\n \"analyzers/unique_cmd_parent/main.py\",\n ),\n ]\n bucket = f\"{BUCKET_PREFIX}-analyzers-bucket\"\n for (local_path, s3_key) in to_upload:\n logging.info(f\"S3 uploading {local_path}\")\n with open(local_path, \"r\") as f:\n s3_client.put_object(Body=f.read(), Bucket=bucket, Key=s3_key)\n\n\ndef _upload_test_data(s3_client: S3ServiceResource) -> None:\n logging.info(f\"Running upload-sysmon-logs\")\n\n # i hate this lol\n # but it's probably better than mucking with path and importing that module...\n subprocess.run(\n [\n \"python3\",\n \"/home/grapl/etc/local_grapl/bin/upload-sysmon-logs.py\",\n \"--bucket_prefix\",\n BUCKET_PREFIX,\n \"--logfile\",\n \"/home/grapl/etc/sample_data/eventlog.xml\",\n \"--use-links\",\n \"True\",\n ]\n )\n\n\ndef _create_s3_client() -> S3ServiceResource:\n return boto3.client(\n \"s3\",\n endpoint_url=\"http://s3:9000\",\n aws_access_key_id=\"minioadmin\",\n aws_secret_access_key=\"minioadmin\",\n )\n\n\ndef _create_sqs_client() -> SqsServiceResource:\n # mostly cribbed from upload-sysmon-logs\n return boto3.client(\n \"sqs\",\n endpoint_url=\"http://sqs:9324\",\n region_name=\"us-east-1\",\n aws_access_key_id=\"minioadmin\",\n aws_secret_access_key=\"minioadmin\",\n )\n\n\ndef main() -> int:\n logging.basicConfig(stream=stdout, level=logging.INFO)\n\n s3_client = _create_s3_client()\n sqs_client = _create_sqs_client()\n\n resources.wait_for(\n [\n # for uploading analyzers\n resources.WaitForS3Bucket(s3_client, f\"{BUCKET_PREFIX}-analyzers-bucket\"),\n # for upload-sysmon-logs.py\n resources.WaitForS3Bucket(s3_client, f\"{BUCKET_PREFIX}-sysmon-log-bucket\"),\n resources.WaitForSqsQueue(sqs_client, \"grapl-sysmon-graph-generator-queue\"),\n ]\n )\n\n _upload_analyzers(s3_client)\n _upload_test_data(s3_client)\n\n logging.info(\"Waiting 60s for the pipeline to do its thing...\")\n time.sleep(60)\n\n return pytest.main(\n [\n \"-s\", # disable stdout capture\n ]\n )\n\n\nif __name__ == \"__main__\":\n exit(main())\n","sub_path":"src/python/grapl_e2e_tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"367500427","text":"from django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom django.views import View\nfrom django.utils import timezone\n\nfrom .models import Organizer, Conference, Event\n\n\nclass IndexView(View):\n template_name = 'CFP/index.html'\n def get(self, request):\n upcomming_deadline_list = Event.objects.order_by('submission_due').filter(submission_due__gte = timezone.now())[:5]\n upcomming_conference_list = Event.objects.order_by('start_date').filter(start_date__gte = timezone.now())[:4]\n context = {\n 'upcomming_deadline_list': upcomming_deadline_list,\n 'upcomming_conference_list': upcomming_conference_list\n }\n return render(request, self.template_name, context)\n\n\nclass OrganizerView(View):\n template_name = 'CFP/organizer.html'\n def get(self, request, *args, **kwargs):\n organizer = get_object_or_404(Organizer, abbreviation__iexact = self.kwargs['organizer'])\n conference_list = get_list_or_404(Conference, organizer = organizer)\n for conference in conference_list:\n event_list = get_list_or_404(Event, conference = conference)\n conference.event_list = event_list\n context = {\n 'organizer': organizer,\n 'conference_list': conference_list,\n }\n return render(request, self.template_name, context)\n\n\nclass ConferenceView(View):\n template_name = 'CFP/conference.html'\n def get(self, request, *args, **kwargs):\n organizer = get_object_or_404(Organizer, abbreviation__iexact = self.kwargs['organizer'])\n conference = get_object_or_404(Conference, abbreviation__iexact = self.kwargs['conference'])\n event_list = get_list_or_404(Event, conference = conference)\n context = {\n 'organizer': organizer,\n 'conference': conference,\n 'event_list': event_list\n }\n return render(request, self.template_name, context)\n\n\nclass EventView(View):\n template_name = 'CFP/event.html'\n def get(self, request, *args, **kwargs):\n organizer = get_object_or_404(Organizer, abbreviation__iexact = self.kwargs['organizer'])\n conference = get_object_or_404(Conference, abbreviation__iexact = self.kwargs['conference'])\n event = get_object_or_404(Event, conference = conference, start_date__year = self.kwargs['year'])\n context = {\n 'organizer': organizer,\n 'conference': conference,\n 'event': event\n }\n return render(request, self.template_name, context)\n","sub_path":"CFP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483641308","text":"__author__ = 'gaa8664'\nimport openpyxl\n\n\ndef get_new_code(sheet, old_code):\n new_code = None\n for row in range(sheet.max_row):\n if row <= 1:\n continue\n code = sheet.cell(row=row, column=2)\n if old_code == code.value:\n try:\n new_code = sheet.cell(row=row, column=3)\n new_code = new_code.value\n\n break\n except Exception as ex:\n new_code = ''\n return new_code\n\n\ndef update_lic():\n code_file = openpyxl.load_workbook(\"D:\\Project\\excel\\Employee Wise Templates Allocation.xlsx\")\n lic_file = openpyxl.load_workbook(\"D:\\Project\\excel\\LIC Detail.xlsx\",read_only=False)\n code_sheet = code_file.get_sheet_by_name(\"Template Allocation\")\n lic_sheet = lic_file.get_sheet_by_name(\"LIC Detail\")\n\n for row in range(lic_sheet.max_row):\n if row <= 1:\n continue\n old_code = lic_sheet.cell(row=row, column=1)\n new_code = lic_sheet.cell(row=row, column=2)\n val = get_new_code(code_sheet, old_code.value)\n new_code.value = val\n lic_file.save(\"D:\\Project\\excel\\LIC Detail1.xlsx\")\n\nif __name__ == '__main__':\n update_lic()\n\n\n\n\n\n\n\n","sub_path":"LearningPython/excel/LICNewCodeMapping.py","file_name":"LICNewCodeMapping.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"571796752","text":"import math\n\ndef median(n):\n s = len(n)\n m = math.floor(s/2)\n if (s % 2 == 0):\n return ((n[m]+n[m-1])/2)\n else:\n return n[m]\n\ndef q1(n):\n s = len(n)\n m = math.floor(s/2)\n return median(n[0:m])\n\ndef q2(n):\n return median(n)\n\ndef q3(n):\n s = len(n)\n m = math.floor(s/2)\n if (s % 2 == 0):\n return median(n[m:s])\n else:\n return median(n[m+1:s])\n\n# main \nn = input()\nx = [int(a) for a in input().split()]\nf = [int(a) for a in input().split()]\n\nar = []\n\nfor i, e in enumerate(x):\n ar.extend([x[i]] * f[i])\n\nar = sorted(ar)\n\nprint(\"{:.1f}\".format(q3(ar) - q1(ar)))","sub_path":"10_days_of_statistics/01_interquartile_range.py","file_name":"01_interquartile_range.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"2933866","text":"import numpy as np\nfrom mode_transmission_network.autoregression import extract_autoregression_feature\n\ndef get_all_fluctuation_modes(time_series_dict, window_length):\n timestamp_list = time_series_dict['timestamp_list']\n idx_to_timestamp_dict = time_series_dict['idx_to_timestamp_dict']\n data_list = time_series_dict['data_list']\n time_series_length = len(timestamp_list)\n fluctuation_mode_list = []\n # 0,1,2,...,\n for i in range(time_series_length - window_length):\n this_data = data_list[i: i + window_length]\n this_timestamp = timestamp_list[i: i + window_length]\n this_data_np_array = np.asarray(this_data)\n print(extract_autoregression_feature(this_data_np_array, this_timestamp))\n\n\n\n","sub_path":"mode_transmission_network/extract_fluctuation_modes.py","file_name":"extract_fluctuation_modes.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519165022","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional\nfrom .annotation import (\n AdminData,\n Annotation,\n DocumentationBlock,\n VariationPoint,\n)\nfrom .boolean import Boolean\nfrom .can_nm_node import CanNmNode\nfrom .category_string import CategoryString\nfrom .communication_cluster_subtypes_enum import CommunicationClusterSubtypesEnum\nfrom .flexray_nm_node import FlexrayNmNode\nfrom .identifier import Identifier\nfrom .integer import Integer\nfrom .j_1939_nm_node import J1939NmNode\nfrom .multi_language_overview_paragraph import MultiLanguageOverviewParagraph\nfrom .multilanguage_long_name import MultilanguageLongName\nfrom .positive_integer import PositiveInteger\nfrom .ref import Ref\nfrom .short_name_fragment import ShortNameFragment\nfrom .time_value import TimeValue\nfrom .udp_nm_node import UdpNmNode\n\n__NAMESPACE__ = \"http://autosar.org/schema/r4.0\"\n\n\n@dataclass\nclass CanNmCluster:\n \"\"\"\n Can specific NmCluster attributes.\n\n :ivar short_name: This specifies an identifying shortName for the\n object. It needs to be unique within its context and is intended\n for humans but even more for technical reference.\n :ivar short_name_fragments: This specifies how the\n Referrable.shortName is composed of several shortNameFragments.\n :ivar long_name: This specifies the long name of the object. Long\n name is targeted to human readers and acts like a headline.\n :ivar desc: This represents a general but brief (one paragraph)\n description what the object in question is about. It is only one\n paragraph! Desc is intended to be collected into overview\n tables. This property helps a human reader to identify the\n object in question. More elaborate documentation, (in particular\n how the object is built or used) should go to \"introduction\".\n :ivar category: The category is a keyword that specializes the\n semantics of the Identifiable. It affects the expected existence\n of attributes and the applicability of constraints.\n :ivar admin_data: This represents the administrative data for the\n identifiable object.\n :ivar introduction: This represents more information about how the\n object in question is built or is used. Therefore it is a\n DocumentationBlock.\n :ivar annotations: Possibility to provide additional notes while\n defining a model element (e.g. the ECU Configuration Parameter\n Values). These are not intended as documentation but are mere\n design notes.\n :ivar communication_cluster_ref: Association to a\n CommunicationCluster in the topology description.\n :ivar nm_channel_id: This attribute has the status \"removed\" and\n shall not be used any longer. Old description: Channel\n identification number of the corresponding channel. Must be\n unique over all NmClusters.\n :ivar nm_channel_sleep_master: This parameter shall be set to\n indicate if the sleep of this network can be absolutely decided\n by the local node only and that no other nodes can oppose that\n decision.\n :ivar nm_nodes: The upper multiplicity of this role has been\n increased to * due to resolving an atpVariation stereotype. The\n previous value was -1.\n :ivar nm_node_detection_enabled: Enables the Request Repeat Message\n Request support. Only valid if nmNodeIdEnabled is set to true.\n :ivar nm_node_id_enabled: Enables the source node identifier.\n :ivar nm_pnc_participation: Defines whether this NmCluster\n contributes to the partial network mechanism.\n :ivar nm_repeat_msg_ind_enabled: Switch for enabling the Repeat\n Message Bit Indication.\n :ivar nm_synchronizing_network: If this parameter is true, then this\n network is a synchronizing network for the NM coordination\n cluster which it belongs to. The network is expected to call\n Nm_SynchronizationPoint() at regular intervals.\n :ivar variation_point: This element was generated/modified due to an\n atpVariation stereotype.\n :ivar nm_busload_reduction_active: It determines if bus load\n reduction for the respective CanNm channel is active or not.\n :ivar nm_car_wake_up_bit_position: Specifies the bit position of the\n CarWakeUp within the NmPdu.\n :ivar nm_car_wake_up_filter_enabled: Please note that this parameter\n is deprecated and will be removed in future. Old description: If\n this attribute is set to true the CareWakeUp filtering is\n supported. In this case only the CarWakeUp bit within the NmPdu\n with source node identifier nmCarWakeUpFilterNodeId is\n considered as CarWakeUp request.\n :ivar nm_car_wake_up_filter_node_id: Source node identifier for\n CarWakeUp filtering.\n :ivar nm_car_wake_up_rx_enabled: Please note that this parameter is\n deprecated and will be removed in future. Old description: If\n set to true this attribute enables the support of CarWakeUp bit\n evaluation in received NmPdus.\n :ivar nm_cbv_position: Defines the position of the control bit\n vector within the NmPdu (Byte position). If this attribute is\n not configured, the Control Bit Vector is not used.\n :ivar nm_channel_active: Please note that this attribute is\n deprecated and will be removed in future. This switch determines\n if the respective CanNm channel is active or not. Indicates\n whether a particular CanNm channel shall be initialized (TRUE)\n or not (FALSE). If this parameter is set to FALSE the respective\n NM instance shall not be used during runtime.\n :ivar nm_immediate_nm_cycle_time: Defines the immediate NmPdu cycle\n time in seconds which is used for nmImmediateNmTransmissions\n NmPdu transmissions. This parameter is only valid if\n CanNmImmediateNmTransmissions is greater one.\n :ivar nm_immediate_nm_transmissions: Defines the number of immediate\n NmPdus which shall be transmitted. If the value is zero no\n immediate NmPdus are transmitted. The cycle time of immeditate\n NmPdus is defined by nmImmediateNmCycleTime.\n :ivar nm_message_timeout_time: Timeout of an NmPdu in seconds. It\n determines how long the NM shall wait with notification of\n transmission failure while communication errors occur on the\n bus.\n :ivar nm_msg_cycle_time: Period of a NmPdu in seconds. It determines\n the periodic rate in the periodic transmission mode with bus\n load reduction and is the basis for transmit scheduling in the\n periodic transmission mode without bus load reduction.\n :ivar nm_network_timeout: Network Timeout for NmPdus in seconds It\n denotes the time how long the CanNm shall stay in the Network\n Mode before transition into Prepare Bus-Sleep Mode shall take\n place.\n :ivar nm_nid_position: Defines the byte position of the source node\n identifier within the NmPdu. If this attribute is not\n configured, the Node Identification is not used.\n :ivar nm_remote_sleep_indication_time: Timeout for Remote Sleep\n Indication in seconds. It defines the time how long it shall\n take to recognize that all other nodes are ready to sleep.\n :ivar nm_repeat_message_time: Timeout for Repeat Message State in\n seconds. Defines the time how long the NM shall stay in the\n Repeat Message State.\n :ivar nm_user_data_length: Defines the length of the user data\n contained in the NmPdu. Please note that this attribute is\n deprecated and will be removed in future.\n :ivar nm_wait_bus_sleep_time: Timeout for bus calm down phase in\n seconds. It denotes the time how long the CanNm shall stay in\n the Prepare Bus-Sleep Mode before transition into Bus-Sleep Mode\n shall take place.\n :ivar s: Checksum calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine if\n an ArObject has changed. The checksum has no semantic meaning\n for an AUTOSAR model and there is no requirement for AUTOSAR\n tools to manage the checksum.\n :ivar t: Timestamp calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine\n the last change of an ArObject. The timestamp has no semantic\n meaning for an AUTOSAR model and there is no requirement for\n AUTOSAR tools to manage the timestamp.\n :ivar uuid: The purpose of this attribute is to provide a globally\n unique identifier for an instance of a meta-class. The values of\n this attribute should be globally unique strings prefixed by the\n type of identifier. For example, to include a DCE UUID as\n defined by The Open Group, the UUID would be preceded by \"DCE:\".\n The values of this attribute may be used to support merging of\n different AUTOSAR models. The form of the UUID (Universally\n Unique Identifier) is taken from a standard defined by the Open\n Group (was Open Software Foundation). This standard is widely\n used, including by Microsoft for COM (GUIDs) and by many\n companies for DCE, which is based on CORBA. The method for\n generating these 128-bit IDs is published in the standard and\n the effectiveness and uniqueness of the IDs is not in practice\n disputed. If the id namespace is omitted, DCE is assumed. An\n example is \"DCE:2fac1234-31f8-11b4-a222-08002b34c003\". The uuid\n attribute has no semantic meaning for an AUTOSAR model and there\n is no requirement for AUTOSAR tools to manage the timestamp.\n \"\"\"\n class Meta:\n name = \"CAN-NM-CLUSTER\"\n\n short_name: Optional[Identifier] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n \"required\": True,\n }\n )\n short_name_fragments: Optional[\"CanNmCluster.ShortNameFragments\"] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME-FRAGMENTS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n long_name: Optional[MultilanguageLongName] = field(\n default=None,\n metadata={\n \"name\": \"LONG-NAME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n desc: Optional[MultiLanguageOverviewParagraph] = field(\n default=None,\n metadata={\n \"name\": \"DESC\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n category: Optional[CategoryString] = field(\n default=None,\n metadata={\n \"name\": \"CATEGORY\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n admin_data: Optional[AdminData] = field(\n default=None,\n metadata={\n \"name\": \"ADMIN-DATA\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n introduction: Optional[DocumentationBlock] = field(\n default=None,\n metadata={\n \"name\": \"INTRODUCTION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n annotations: Optional[\"CanNmCluster.Annotations\"] = field(\n default=None,\n metadata={\n \"name\": \"ANNOTATIONS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n communication_cluster_ref: Optional[\"CanNmCluster.CommunicationClusterRef\"] = field(\n default=None,\n metadata={\n \"name\": \"COMMUNICATION-CLUSTER-REF\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_channel_id: Optional[PositiveInteger] = field(\n default=None,\n metadata={\n \"name\": \"NM-CHANNEL-ID\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_channel_sleep_master: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-CHANNEL-SLEEP-MASTER\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_nodes: Optional[\"CanNmCluster.NmNodes\"] = field(\n default=None,\n metadata={\n \"name\": \"NM-NODES\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_node_detection_enabled: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-NODE-DETECTION-ENABLED\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_node_id_enabled: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-NODE-ID-ENABLED\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_pnc_participation: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-PNC-PARTICIPATION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_repeat_msg_ind_enabled: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-REPEAT-MSG-IND-ENABLED\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_synchronizing_network: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-SYNCHRONIZING-NETWORK\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n variation_point: Optional[VariationPoint] = field(\n default=None,\n metadata={\n \"name\": \"VARIATION-POINT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_busload_reduction_active: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-BUSLOAD-REDUCTION-ACTIVE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_car_wake_up_bit_position: Optional[PositiveInteger] = field(\n default=None,\n metadata={\n \"name\": \"NM-CAR-WAKE-UP-BIT-POSITION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_car_wake_up_filter_enabled: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-CAR-WAKE-UP-FILTER-ENABLED\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_car_wake_up_filter_node_id: Optional[PositiveInteger] = field(\n default=None,\n metadata={\n \"name\": \"NM-CAR-WAKE-UP-FILTER-NODE-ID\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_car_wake_up_rx_enabled: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-CAR-WAKE-UP-RX-ENABLED\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_cbv_position: Optional[Integer] = field(\n default=None,\n metadata={\n \"name\": \"NM-CBV-POSITION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_channel_active: Optional[Boolean] = field(\n default=None,\n metadata={\n \"name\": \"NM-CHANNEL-ACTIVE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_immediate_nm_cycle_time: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-IMMEDIATE-NM-CYCLE-TIME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_immediate_nm_transmissions: Optional[PositiveInteger] = field(\n default=None,\n metadata={\n \"name\": \"NM-IMMEDIATE-NM-TRANSMISSIONS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_message_timeout_time: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-MESSAGE-TIMEOUT-TIME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_msg_cycle_time: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-MSG-CYCLE-TIME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_network_timeout: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-NETWORK-TIMEOUT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_nid_position: Optional[Integer] = field(\n default=None,\n metadata={\n \"name\": \"NM-NID-POSITION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_remote_sleep_indication_time: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-REMOTE-SLEEP-INDICATION-TIME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_repeat_message_time: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-REPEAT-MESSAGE-TIME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_user_data_length: Optional[Integer] = field(\n default=None,\n metadata={\n \"name\": \"NM-USER-DATA-LENGTH\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n nm_wait_bus_sleep_time: Optional[TimeValue] = field(\n default=None,\n metadata={\n \"name\": \"NM-WAIT-BUS-SLEEP-TIME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n s: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"S\",\n \"type\": \"Attribute\",\n }\n )\n t: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"T\",\n \"type\": \"Attribute\",\n \"pattern\": r\"([0-9]{4}-[0-9]{2}-[0-9]{2})(T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|([+\\-][0-9]{2}:[0-9]{2})))?\",\n }\n )\n uuid: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"UUID\",\n \"type\": \"Attribute\",\n }\n )\n\n @dataclass\n class ShortNameFragments:\n short_name_fragment: List[ShortNameFragment] = field(\n default_factory=list,\n metadata={\n \"name\": \"SHORT-NAME-FRAGMENT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class Annotations:\n annotation: List[Annotation] = field(\n default_factory=list,\n metadata={\n \"name\": \"ANNOTATION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class CommunicationClusterRef(Ref):\n dest: Optional[CommunicationClusterSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class NmNodes:\n can_nm_node: List[CanNmNode] = field(\n default_factory=list,\n metadata={\n \"name\": \"CAN-NM-NODE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n flexray_nm_node: List[FlexrayNmNode] = field(\n default_factory=list,\n metadata={\n \"name\": \"FLEXRAY-NM-NODE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n j_1939_nm_node: List[J1939NmNode] = field(\n default_factory=list,\n metadata={\n \"name\": \"J-1939-NM-NODE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n udp_nm_node: List[UdpNmNode] = field(\n default_factory=list,\n metadata={\n \"name\": \"UDP-NM-NODE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n","sub_path":"autosar/models/can_nm_cluster.py","file_name":"can_nm_cluster.py","file_ext":"py","file_size_in_byte":21112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460727962","text":"#\n# Copyright (C) 2016 Yi-Wei Ci\n#\n# Distributed under the terms of the MIT license.\n#\n\nimport os\nfrom base64 import b64encode\nfrom vdtools.dev.driver import Driver\nfrom vdtools.lib.util import readlink\nfrom vdtools.lib.modes import MODE_OVP, MODE_SWITCH\n\nHOME = '~/vdev/dev/fileloader'\n\nclass FileLoader(Driver):\n def __init__(self, name=None):\n Driver.__init__(self, name=name, mode=MODE_OVP | MODE_SWITCH, freq=1)\n\n def setup(self):\n path = self._get_path()\n os.system('mkdir -p %s' % path)\n self._files = None\n self._active = False\n\n def _get_path(self):\n path = os.path.join(HOME, self.get_name())\n return readlink(path)\n\n def _load(self):\n path = self._get_path()\n for name in os.listdir(path):\n file_path = os.path.join(path, name)\n with open(file_path) as f:\n buf = f.read()\n if buf:\n yield {'name':self.get_name(), 'content':b64encode(buf)}\n\n def get(self):\n if not self._active:\n return\n try:\n return self._files.next()\n except StopIteration:\n self._active = False\n\n def open(self):\n self._files = self._load()\n self._active = True\n","sub_path":"vdtools/drivers/fileloader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598249743","text":"import numpy as np\nimport matplotlib.pyplot as plt\ny = np.asarray([0.4, 3.9, 1.3])\npuv = np.asarray([1.8, 2.7, 3.1])\nci = np.asarray([0.3, 1.7, 4.3])\npiuv = puv - ci\n\nd1 = (np.linalg.norm(puv-ci)) * (np.linalg.norm(puv-ci)) * (np.linalg.norm(puv-y)) * (np.linalg.norm(puv-y))\nd2 = np.dot((puv-ci), (puv-y)) * np.dot((puv-ci), (puv-y))\n\nd1v = (np.linalg.norm(puv-ci)) * (np.linalg.norm(puv-ci)) * (np.dot(puv, puv)-2*np.dot(puv, y)+np.dot(y, y))\n\nP = np.matmul(np.asmatrix(piuv).transpose(), np.asmatrix(piuv))\nd2v = np.matmul(np.asmatrix(puv), np.matmul(P, np.asmatrix(puv).transpose())) - 2*np.matmul(np.asmatrix(puv), np.matmul(P, np.asmatrix(y).transpose())) + np.matmul(np.asmatrix(y), np.matmul(P, np.asmatrix(y).transpose()))\nl = d1*d1 - d1v\nprint('d')\n\nsigma = 0.06\ncurve = []\nfor i in range(-1000, 1000):\n ii = i/100.0\n curve.append(np.exp(-ii*ii/(2*sigma**2)) * (ii/(sigma)))\n #curve.append(np.exp(-ii * ii / (2 * sigma ** 2)) * np.sin(ii/sigma))\n #curve.append(np.exp(-ii*ii/(sigma**2)) - 1*np.abs(ii*ii/(sigma**2))*np.exp(-ii*ii/(sigma**2)))\nplt.plot(curve)\nplt.show()","sub_path":"test_vector.py","file_name":"test_vector.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218625485","text":"# -*- coding: utf-8 -*-\n\nfrom pymongo import UpdateOne, DESCENDING\nfrom factor.base_factor import BaseFactor\nfrom data.finance_report_crawler import FinanceReportCrawler\nfrom data.data_module import DataModule\nfrom util.stock_util import get_all_codes\nfrom util.database import DB_CONN\n\n\"\"\"\n实现市盈率因子的计算和保存\n\"\"\"\n\n\nclass PEFactor(BaseFactor):\n def __init__(self):\n BaseFactor.__init__(self, name='pe')\n\n def compute(self, begin_date, end_date):\n \"\"\"\n 计算指定时间段内所有股票的该因子的值,并保存到数据库中\n :param begin_date: 开始时间\n :param end_date: 结束时间\n \"\"\"\n dm = DataModule()\n\n # 获取所有股票\n codes = get_all_codes()\n\n for code in codes:\n print('计算市盈率, %s' % code)\n df_daily = dm.get_k_data(code, autype=None, begin_date=begin_date, end_date=end_date)\n\n if df_daily.index.size > 0:\n df_daily.set_index(['date'], 1, inplace=True)\n\n update_requests = []\n for date in df_daily.index:\n finance_report = DB_CONN['finance_report'].find_one(\n {'code': code, 'report_date': {'$regex': '\\d{4}-12-31'}, 'announced_date': {'$lte': date}},\n sort=[('announced_date', DESCENDING)]\n )\n\n if finance_report is None:\n continue\n\n # 计算滚动市盈率并保存到daily_k中\n eps = 0\n if finance_report['eps'] != '-':\n eps = finance_report['eps']\n\n # 计算PE\n if eps != 0:\n pe = round(df_daily.loc[date]['close'] / eps, 3)\n\n print('%s, %s, %s, eps: %5.2f, pe: %6.2f' %\n (code, date, finance_report['announced_date'], finance_report['eps'], pe),\n flush=True)\n\n update_requests.append(\n UpdateOne(\n {'code': code, 'date': date},\n {'$set': {'code': code, 'date': date, 'pe': pe}}, upsert=True))\n\n if len(update_requests) > 0:\n save_result = self.collection.bulk_write(update_requests, ordered=False)\n print('股票代码: %s, 因子: %s, 插入:%4d, 更新: %4d' %\n (code, self.name, save_result.upserted_count, save_result.modified_count), flush=True)\n\n\nif __name__ == '__main__':\n # 执行因子的提取任务\n PEFactor().compute('2016-01-01', '2017-12-31')\n","sub_path":"xiaoxiang/08.第八课:技术型和基本面因子的编写_财务因子/第8课代码/factor/pe_factor.py","file_name":"pe_factor.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486244023","text":"import time\r\nfrom watchdog.observers import Observer\r\nfrom watchdog.events import FileSystemEventHandler\r\nimport PyPDF2\r\nimport os, os.path\r\nimport glob\r\nimport threading\r\nimport sched\r\nimport requests\r\nimport re\r\nimport win32print\r\nimport shutil\r\nimport win32api\r\nimport PyPDF2\r\n\r\nclass Watcher:\r\n DIRECTORY_TO_WATCH = \"printer-lis/\"\r\n\r\n def __init__(self):\r\n self.observer = Observer()\r\n\r\n def run(self):\r\n event_handler = Handler()\r\n self.observer.schedule(event_handler, self.DIRECTORY_TO_WATCH, recursive=True)\r\n self.observer.start()\r\n try:\r\n while True:\r\n time.sleep(3)\r\n except:\r\n self.observer.stop()\r\n print(\"Error\")\r\n\r\n self.observer.join()\r\n\r\n\r\nclass Handler(FileSystemEventHandler):\r\n\r\n @staticmethod\r\n def on_any_event(event):\r\n if event.is_directory:\r\n return None\r\n\r\n elif event.event_type == 'created':\r\n # Take any action here when a file is first created.\r\n #print(\"Received created event - %s.\" % event.src_path)\r\n time.sleep(3)\r\n list = os.listdir('printer-lis')\r\n print(list)\r\n pdf_dir=\"printer-lis\"\r\n number_files = len(list)\r\n if number_files > 0:\r\n for k in range(0,number_files):\r\n string1 = 'printer-lis'\r\n string = 'printer-lis/' + str(list[k])\r\n f = open(string, 'rb')\r\n fileReader = PyPDF2.PdfFileReader(f)\r\n title=fileReader.getDocumentInfo()[\"/Title\"]\r\n f.close()\r\n if title == \"Barcodes\":\r\n win32print.SetDefaultPrinter('zebraa')\r\n else:\r\n win32print.SetDefaultPrinter('HP LaserJet Professional P1102 (Copy 1)')\r\n\r\n print(\"printing file \"+ string +\" on \"+str(win32print.GetDefaultPrinter()))\r\n win32api.ShellExecute(0, \"print\", os.path.join(string1,str(list[k])), None, \".\", 0)\r\n time.sleep(3)\r\n\r\n files = glob.glob('printer-lis/*')\r\n for f in files:\r\n os.remove(f)\r\n\r\n #elif event.event_type == 'modified':\r\n # Taken any action here when a file is modified.\r\n #print(\"Received modified event - %s.\" % event.src_path)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n w = Watcher()\r\n w.run()\r\n","sub_path":"print-script_old.py","file_name":"print-script_old.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"101535636","text":"# -*- coding: utf-8 -*-\n\"\"\"Fetch metadata from plugins and dump it to temporary JSON file.\n\nThis fetches metadata from plugins defined using different build systems\n * setuptools/pip: setup.json\n * poetry: pyproject.toml\n * flit: pyproject.toml\n\"\"\"\n# pylint: disable=missing-function-docstring\n\nimport urllib\nimport json\nimport os\nimport sys\nimport traceback\nfrom collections import OrderedDict\nimport six\n\nimport requests\nimport requests_cache\nimport tomlkit\nfrom . import PLUGINS_METADATA, PLUGINS_FILE_ABS, status_dict, PLUGINS_METADATA_KEYS\n\nif os.environ.get('CACHE_REQUESTS'):\n # Set environment variable CACHE_REQUESTS to cache requests for 1 day for faster testing\n # e.g.: export CACHE_REQUESTS=1\n requests_cache.install_cache('demo_cache', expire_after=60 * 60 * 24)\n\nGITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS') == 'true'\nLOG = [] # global log messages\nPLUGIN_LOG = [] # per-plugin log messages\n\n\ndef report(string):\n \"\"\"Write to stdout and log.\n\n Used to display log in actions.\n \"\"\"\n if GITHUB_ACTIONS:\n # Set the step ouput error message which can be used, e.g., for display as part of an issue comment.\n PLUGIN_LOG.append(string)\n print(string)\n\n\ndef get_hosted_on(url):\n try:\n requests.get(url, timeout=30)\n except Exception:\n raise ValueError(\"Unable to open 'code_home' url: '{}'\".format(url))\n\n netloc = urllib.parse.urlparse(url).netloc\n\n # Remove port (if any)\n netloc = netloc.partition(':')[0]\n\n # Remove subdomains (this only works for domain suffixes of length 1!)\n # TODO: fix it for domains like yyy.co.uk # pylint: disable=fixme\n netloc = '.'.join(netloc.split('.')[-2:])\n\n return netloc\n\n\ndef fetch_plugin_info(url):\n \"\"\"Fetches plugin metadata in different formats.\n\n setup.json (for pip/setuptools)\n pyproject.toml (for poetry/flit)\n \"\"\"\n try:\n response = requests.get(url)\n response.raise_for_status(\n ) # raise an exception for all 4xx/5xx errors\n except Exception: # pylint: disable=broad-except\n report(\n ' > WARNING! Unable to retrieve plugin info from: {}'.format(url))\n report(traceback.format_exc())\n return None\n\n if 'pyproject.toml' in url:\n try:\n pyproject = tomlkit.parse(response.content)\n except tomlkit.exceptions.TOMLKitError:\n report(' > WARNING! Unable to parse TOML')\n\n for buildsystem in ('poetry', 'flit'):\n if buildsystem in pyproject['tool']:\n return (buildsystem, pyproject)\n report(' > WARNING! Unknown build system in pyproject.toml')\n else:\n try:\n return ('setuptools', json.loads(response.content))\n except ValueError:\n report(' > WARNING! Unable to parse JSON')\n\n return None\n\n\ndef get_aiida_version_setup_json(setup_json):\n \"\"\"Get AiiDA version that this plugin is compatible with.\n \"\"\"\n import requirements # pylint: disable=import-outside-toplevel\n\n try:\n install_requires = setup_json['install_requires']\n reqs = requirements.parse('\\n'.join(install_requires))\n\n aiida_specs = []\n for req in reqs:\n # note: this also catches aiida-core[extra1]\n if req.name in ['aiida-core', 'aiida_core', 'aiida']:\n aiida_specs += req.specs\n\n if not aiida_specs:\n report(' > WARNING! AiiDA version not specified')\n return None\n\n # precedence of version specs, from high to low\n precedence = ['==', '>=', '>', '<=', '<']\n sort_order = {precedence[i]: i for i in range(len(precedence))}\n aiida_specs = sorted(aiida_specs,\n key=lambda r: sort_order.get(r[0], 10))\n\n # first index: operator (e,g, '>=')\n # second index: version (e.g. '0.12.0rc2')\n # In the future, this can be used to e.g. display a banner for 1.0-compatible plugins\n return ','.join([s[0] + s[1] for s in aiida_specs])\n\n except KeyError:\n return None\n\n\ndef get_aiida_version_poetry(pyproject):\n \"\"\"Get AiiDA version that this plugin is compatible with from a pyproject.toml.\n \"\"\"\n from poetry.semver import parse_constraint # pylint: disable=import-outside-toplevel\n\n try:\n deps = pyproject['tool']['poetry']['dependencies']\n except KeyError:\n return None\n\n for name, data in deps.items():\n if name not in ['aiida-core', 'aiida_core', 'aiida']:\n continue\n\n try: # data is either a dict {\"version\": ..., \"extras\": [\"..\", ], }\n version = data['version']\n except TypeError: # or directly the version string\n version = data\n\n break\n else:\n report(' > WARNING! AiiDA version not specified')\n return None\n\n try:\n return str(parse_constraint(version))\n except ValueError:\n report(\n ' > WARNING: Invalid version encountered in Poetry pyproject.toml for aiida-core'\n )\n\n return None\n\n\ndef get_plugin_info(plugin_info):\n \"\"\"Fetch metadata from plugin_info url.\n\n This adds the keys:\n * entry_points\n * metadata\n * aiida_version\n\n \"\"\"\n infos = {\n 'entry_points': {},\n 'metadata': None,\n 'aiida_version': None,\n }\n\n if plugin_info is None:\n return infos\n\n buildsystem, data = plugin_info\n\n if buildsystem not in ['setuptools', 'poetry', 'flit']:\n report(\" > WARNING! build system '{}' is not supported\".format(\n buildsystem))\n return infos\n\n try:\n if buildsystem == 'setuptools':\n infos['entry_points'].update(\n data['entry_points']) # updating it gives us a copy\n elif buildsystem == 'poetry':\n infos['entry_points'].update({\n group: ['{} = {}'.format(k, v) for k, v in entries.items()]\n for group, entries in data['tool']['poetry']\n ['plugins'].items()\n })\n elif buildsystem == 'flit':\n infos['entry_points'].update({\n group: ['{} = {}'.format(k, v) for k, v in entries.items()]\n for group, entries in data['tool']['flit']\n ['entrypoints'].items()\n })\n except KeyError:\n pass\n\n if buildsystem == 'setuptools':\n infos['metadata'] = {\n k: data[k] if k in data else ''\n for k in PLUGINS_METADATA_KEYS\n }\n\n # pylint: disable=unsupported-assignment-operation\n infos['aiida_version'] = get_aiida_version_setup_json(data)\n infos['metadata']['classifiers'] = data[\n 'classifiers'] if 'classifiers' in data else []\n\n if 'Framework :: AiiDA' not in infos['metadata']['classifiers']: # pylint: disable=unsubscriptable-object\n report(\" > WARNING: Missing classifier 'Framework :: AiiDA'\")\n\n elif buildsystem == 'poetry':\n # all the following fields are mandatory in Poetry\n infos['metadata'] = {\n 'version':\n data['tool']['poetry']['version'],\n 'description':\n data['tool']['poetry']['description'],\n # the authors is a list of the strings of the form \"name \"\n 'author':\n ', '.join(\n a.split('<')[0].strip()\n for a in data['tool']['poetry']['authors']),\n }\n infos['aiida_version'] = get_aiida_version_poetry(data)\n elif buildsystem == 'flit':\n # version is not part of the metadata but expected to available in /__init__.py:__version__\n # description is available as a reference in `description-file` (requires another fetch)\n # author is a mandatory field in Flit\n infos['metadata'] = {\n 'author': data['tool']['flit']['metadata']['author'],\n 'version': '',\n 'description': '',\n }\n report(\n ' > WARNING! version & description metadata and AiiDA version'\n ' are not (yet) parsed from the Flit buildsystem pyproject.toml')\n\n return infos\n\n\ndef complete_plugin_data(plugin_data):\n \"\"\"Update plugin data dictionary.\n\n * add metadata, aiida_version and entrypoints from plugin_info\n * add package_name if missing\n * add hosted_on\n & more\n used for rendering.\"\"\"\n global LOG, PLUGIN_LOG # pylint:disable=global-statement\n\n if 'package_name' not in list(plugin_data.keys()):\n plugin_data['package_name'] = plugin_data['name'].replace('-', '_')\n\n report(f' - {plugin_data[\"package_name\"]}')\n\n # Get link to setup.json file (set to None if not retrievable)\n try:\n plugin_info_link = plugin_data['plugin_info']\n except KeyError:\n report(' > WARNING: Missing plugin_info key!')\n plugin_data['plugin_info'] = None\n else:\n plugin_data['plugin_info'] = fetch_plugin_info(plugin_info_link)\n\n plugin_data['hosted_on'] = get_hosted_on(plugin_data['code_home'])\n\n plugin_data.update(get_plugin_info(plugin_data['plugin_info']))\n\n # note: for more validation, it might be sensible to switch to voluptuous\n if plugin_data['development_status'] not in list(status_dict.keys()):\n report(\" > WARNING: Invalid state '{}'\".format(\n plugin_data['development_status']))\n\n if 'documentation_url' in plugin_data:\n validate_doc_url(plugin_data['documentation_url'])\n\n validate_plugin_entry_points(plugin_data)\n\n if 'WARNING' in '\\n'.join(PLUGIN_LOG):\n LOG += PLUGIN_LOG\n PLUGIN_LOG = []\n\n return plugin_data\n\n\ndef validate_doc_url(url):\n \"\"\"Validate that documentation URL provides valid HTTP response.\"\"\"\n try:\n response = requests.get(url)\n response.raise_for_status(\n ) # raise an exception for all 4xx/5xx errors\n except Exception: # pylint: disable=broad-except\n report(\n ' > WARNING! Unable to reach documentation URL: {}'.format(url))\n report(traceback.print_exc(file=sys.stdout))\n\n\ndef validate_plugin_entry_points(plugin_data):\n \"\"\"Validate that all entry points registered by the plugin start with the registered entry point root.\"\"\"\n\n if 'entry_point_prefix' in plugin_data:\n entry_point_root = plugin_data['entry_point_prefix']\n if not 'aiida_' + plugin_data['entry_point_prefix'].lower(\n ) == plugin_data['package_name'].lower():\n report(\n f\" > WARNING: Prefix \\'{plugin_data['entry_point_prefix']}\\' does not follow naming convention.\"\n )\n else:\n # plugin should not specify any entry points\n entry_point_root = 'MISSING'\n\n for ept_group, ept_list in plugin_data['entry_points'].items():\n # we only restrict aiida's entry point groups\n if not ept_group.startswith('aiida.'):\n continue\n for ept in ept_list:\n ept_string, _path = ept.split('=')\n ept_string = ept_string.strip()\n if not ept_string.startswith(entry_point_root):\n report(\n f\" > WARNING: Entry point '{ept_string}' does not start with prefix '{entry_point_root}.'\"\n )\n\n\ndef fetch_metadata():\n\n with open(PLUGINS_FILE_ABS) as handle:\n plugins_raw_data = json.load(handle)\n\n plugins_metadata = OrderedDict()\n\n for plugin_name, plugin_data in sorted(six.iteritems(plugins_raw_data)):\n plugins_metadata[plugin_name] = complete_plugin_data(plugin_data)\n\n with open(PLUGINS_METADATA, 'w') as handle:\n json.dump(plugins_metadata, handle, indent=2)\n report(' - {} dumped'.format(PLUGINS_METADATA))\n\n if GITHUB_ACTIONS:\n print('::set-output name=error::' + '%0A'.join(LOG))\n","sub_path":"aiida_registry/fetch_metadata.py","file_name":"fetch_metadata.py","file_ext":"py","file_size_in_byte":11730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590023312","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass SourceControlSyncJobStreamById(Model):\n \"\"\"Definition of the source control sync job stream by id.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar id: Resource id.\n :vartype id: str\n :param source_control_sync_job_stream_id: The sync job stream id.\n :type source_control_sync_job_stream_id: str\n :param summary: The summary of the sync job stream.\n :type summary: str\n :ivar time: The time of the sync job stream.\n :vartype time: datetime\n :param stream_type: The type of the sync job stream. Possible values\n include: 'Error', 'Output'\n :type stream_type: str or ~azure.mgmt.automation.models.StreamType\n :param stream_text: The text of the sync job stream.\n :type stream_text: str\n :param value: The values of the job stream.\n :type value: dict[str, object]\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'time': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'source_control_sync_job_stream_id': {'key': 'properties.sourceControlSyncJobStreamId', 'type': 'str'},\n 'summary': {'key': 'properties.summary', 'type': 'str'},\n 'time': {'key': 'properties.time', 'type': 'iso-8601'},\n 'stream_type': {'key': 'properties.streamType', 'type': 'str'},\n 'stream_text': {'key': 'properties.streamText', 'type': 'str'},\n 'value': {'key': 'properties.value', 'type': '{object}'},\n }\n\n def __init__(self, *, source_control_sync_job_stream_id: str=None, summary: str=None, stream_type=None, stream_text: str=None, value=None, **kwargs) -> None:\n super(SourceControlSyncJobStreamById, self).__init__(**kwargs)\n self.id = None\n self.source_control_sync_job_stream_id = source_control_sync_job_stream_id\n self.summary = summary\n self.time = None\n self.stream_type = stream_type\n self.stream_text = stream_text\n self.value = value\n","sub_path":"sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/source_control_sync_job_stream_by_id_py3.py","file_name":"source_control_sync_job_stream_by_id_py3.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396357599","text":"from typing import TypeVar, Sequence, Mapping, Set, Tuple, Optional\nimport numpy as np\nimport random\nfrom tabular_RL import tabularRL\nfrom policy import Policy\nfrom helper import getSingleRV, getReturnsTerminating\n\nclass TD0():\n def __init__(self, \n tabular_RL : tabularRL, \n number_of_episodes : int, \n number_of_steps : int, \n learning_rate : float,\n learning_rate_decay : float):\n self.tabular_RL = tabular_RL\n self.number_of_episodes = number_of_episodes\n self.number_of_steps = number_of_steps\n self.learning_rate = learning_rate\n self.learning_rate_decay = learning_rate_decay\n\n def getValueFunctionPrediction(self, policy : Policy):\n value_function_prediction = {s: 0.0 for s in self.tabular_RL.state_action_relation.keys()}\n act_gen_dict = {s: getSingleRV(policy.get_state_probabilities(s))\n for s in self.tabular_RL.state_action_relation.keys()}\n episodes = 0\n updates = 0\n\n while episodes < self.number_of_episodes:\n state = self.tabular_RL.generateState()\n steps = 0\n terminate = False\n while not terminate:\n action = act_gen_dict[state]()\n next_state, reward = self.tabular_RL.state_action_nextstate_reward_relation[state][action]()\n value_function_prediction[state] += self.learning_rate * \\\n (updates / self.learning_rate_decay + 1) ** (-0.5) * \\\n (reward + self.tabular_RL.gamma * value_function_prediction[next_state] - value_function_prediction[state])\n updates += 1\n steps += 1\n terminate = steps >= self.number_of_steps or state in self.tabular_RL.terminal_states\n state = next_state\n episodes += 1\n return value_function_prediction\n\nif __name__ == '__main__':\n # model parameters are same as those used in DP algorithms\n state_action_relation = {1: [1, 2], 2: [1, 2], 3: [1, 2]}\n terminal_state = [3]\n state_action_nextstate_reward_relation = {\n 1: {\n 1: {\n 1: (0.3, 5.0), 2: (0.3, 5.0), 3: (0.4, 5.0)\n },\n 2: {\n 1: (0.5, 15.0), 2: (0.1, 15.0), 3: (0.4, 15.0)\n }\n },\n 2: {\n 1: {\n 1: (0.6, 3.0), 2: (0.1, 3.0), 3: (0.3, 3.0)\n },\n 2: {\n 1: (0.2, -3.0), 2: (0.3, -3.0), 3: (0.5, -3.0)\n }\n },\n 3: {\n 1: {\n 3: (1.0, 0.0)}, 2: {3: (1.0, 0.0)\n }\n }\n }\n policy = {\n 1: {\n 1: 0.5, 2: 0.5\n }, \n 2: {\n 1: 0.2, 2: 0.8\n }, \n 3: {\n 1: 1.0, 2: 0.0\n }\n }\n \n gamma = 0.8\n number_of_episodes = 100\n number_of_steps = 100\n learning_rate = 0.1\n learning_rate_decay = 1e5\n \n tabular_RL = tabularRL(state_action_relation, state_action_nextstate_reward_relation, terminal_state, gamma)\n td = TD0(tabular_RL, number_of_episodes, number_of_steps, learning_rate, learning_rate_decay)\n val_func = td.getValueFunctionPrediction(Policy(policy))\n","sub_path":"Assignments/0_Basic_DP_RL/RL_basics/tabular_1step_TD.py","file_name":"tabular_1step_TD.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482482168","text":"#######################################\r\n# writen by FETTAH Taha & SEBBAH hala\r\n#######################################\r\n\r\n# In[1]:\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport cv2\r\nimport random\r\nimport tensorflow as tf\r\nfrom keras import applications\r\nfrom keras.layers import GlobalAveragePooling2D,Dropout , Dense \r\nfrom keras.models import Model\r\n\r\n\r\n# In[2]:\r\n\r\n\r\nCATEGORIES = []\r\nimgSize = 64\r\nData =[]\r\n\r\nDATADIR = \"C:/Users/hala/Downloads/carsData/Dacia/daciaNet\"\r\nlistDir = os.listdir(DATADIR)\r\nfor category in listDir:\r\n CATEGORIES.append(category)\r\n \r\n\r\n\r\n# In[3]:\r\n\r\n\r\ndef create_training_data():\r\n \r\n for category in CATEGORIES:\r\n path = os.path.join(DATADIR,category)\r\n class_num = CATEGORIES.index(category)\r\n for img in os.listdir(path):\r\n try:\r\n image= cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)\r\n if((image is None)==False):\r\n newImage = cv2.resize(image , (imgSize , imgSize))\r\n Data.append([newImage , class_num])\r\n except Exception as e:\r\n pass\r\n\r\n\r\n# In[4]:\r\n\r\n\r\nData = []\r\ntrainingData = []\r\ntestData = []\r\ncreate_training_data()\r\nnumClasse = 0\r\nindex1 = 0\r\nindex2=0\r\nwhile(index2 self.__len__():\n raise StopIteration\n self._current += 1\n return self[self._current - 1]\n\n def __getitem__(self, key):\n result = self.get()\n skip = result.get('_start')\n limit = result.get('_count')\n if skip is not None and limit is not None:\n # this mean linkedin have been paginating the result\n new_skip = 0\n if key < skip or key >= skip + limit:\n new_skip = int(key / limit) * limit\n key -= new_skip\n result = self.skip(new_skip).limit(limit).get()\n return result.get('values', []).__getitem__(key)\n\n def reset(self):\n self._fetched_result = None\n self._current = 0\n\n # {{{ Filtering methods and aliases\n\n @_watch('filters')\n def filter(self, **kwargs):\n return self._filter(**kwargs)\n\n def _filter(self, **kwargs):\n '''Allows filtering over the api result. this correspond to the GET parameters\n that can be pass to the api. If filters change, any previous cached result is\n erased and another call to the api is made\n '''\n self.filters.update({\n k.replace('_', '-'): self._handle_filtering_values(v)\n for k, v in kwargs.iteritems() if k in self.accepted_keywords\n })\n return self\n\n def limit(self, limit):\n '''Alias for filtering using the 'count' GET parameter\n '''\n if not 'count' in self.accepted_keywords:\n raise UnavailableMethodForEndpointError('limit', self.endpoint)\n return self._filter(count=limit)\n\n def skip(self, skip):\n '''Alias for filtering using the 'skip' GET parameter\n '''\n if not 'start' in self.accepted_keywords:\n raise UnavailableMethodForEndpointError('count', self.endpoint)\n return self._filter(start=skip)\n\n def sort(self, keyword):\n '''Alias for filtering using the 'sort' GET parameter\n '''\n if not 'sort' in self.accepted_keywords:\n raise UnavailableMethodForEndpointError('sort', self.endpoint)\n return self.filter(sort=keyword)\n\n # }}}\n\n def count(self):\n '''alias for __len__\n '''\n return self.__len__()\n\n def first(self):\n try:\n return self[0]\n except:\n return None\n\n @_watch('selectors')\n def select(self, *selectors):\n '''Set the linekdin selectors to use during the api call\n '''\n self.selectors = selectors\n return self\n","sub_path":"pylinkedin/user_api_queryset.py","file_name":"user_api_queryset.py","file_ext":"py","file_size_in_byte":8402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61283370","text":"from statistics import mean\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef fit(x,y):\r\n\r\n x=np.array(x,dtype=np.float64)\r\n y=np.array(y,dtype=np.float64)\r\n \r\n ms= ((mean(x)*mean(y))-(mean(x*y)))/((mean(x)*mean(x))-mean(x*x))\r\n bs= mean(y)-(ms*mean(x))\r\n return ms,bs\r\n\r\nx=[70,79,64,80,97,59,80,75,80,80,85,60]\r\ny=[177,160,165,174,175,167,175,177,175,165,180,150]\r\n\r\n#rt=fit(x,y) #rt is a tuple, rt[0]=m, rt[1]=b\r\n#m=rt[0]\r\n#b=rt[1]\r\n\r\n(m,b)=fit(x,y)\r\nxp=84 #test x value\r\nyp=m*xp+b #predicted y value\r\nprint(yp)\r\n\r\n\r\nbestFitx=np.arange(60,101,10)\r\nbestFity= m*bestFitx +b\r\n\r\nplt.plot(bestFitx,bestFity,'r')\r\n\r\nplt.scatter(xp,yp)\r\n\r\nprint(y)\r\ny=np.array(y,dtype=np.float64)\r\nymean=mean(y)\r\n\r\nyp=[ymean for i in range(len(x))]\r\n\r\nplt.plot(x,yp)\r\n\r\nplt.scatter(x,y)\r\nplt.xlabel('Weights(kg)')\r\nplt.ylabel('Heights(cm)')\r\n\r\nplt.show()\r\n","sub_path":"linear_regression_without_sklearn.py","file_name":"linear_regression_without_sklearn.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"289128604","text":"\nimport serial\nimport time\nfrom time import gmtime\nimport calendar\nfrom datetime import datetime, date, timedelta\nimport csv\nimport os\n\ncom = 'COM12'\nser = serial.Serial(com,9600) #port on computer\n\n#filepath = 'C:\\\\Users\\\\LArTPC\\\\Desktop\\\\temphumid\\\\data\\\\464_data\\\\'\nfilepath = '\\\\\\MU2E-CART1\\\\Users\\\\Public\\\\temp_humid_data\\\\464_main\\\\'\nfilename = '464_' + datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")+ '.csv'\ndate0 = date.today()\n\n\nf = open(filepath+filename,\"w\")\nprint( '464 temperature and humidity')\ntime.sleep(3)\n\n#send character '5' to arduino\n#when arduino gets it, it takes data and sends\nf.write('date, temp (C), RH (%), epoch time (s)\\n') \n#try:\nwhile True:\n ser.write(b'5') # send arduino the number '5' in ascii\n time.sleep(10)\t# wait 2 seconds\n data = str(ser.readline() ) # get data from arduino serial\n data = data[2:]\n x = data.split(',')\n temp = x[0]\n humid = x[1]\n humid = humid[:5]\n \n print( \"464 main -- Temp = \" + temp + \" C Humid = \" + humid +\"%\")\n\n # write to file\n f.write(datetime.now().strftime(\"%Y-%m-%d_%H%M%S\") )\n f.write(',')\n f.write(temp)\n f.write(',')\n f.write(humid)\n f.write(',')\t\t\n f.write(str(time.time()))\n f.write('\\n')\n f.flush()\n\n #start new file after midnight\n datecheck = date.today() - date0\n if(datecheck.days) > 0:\n filename = '464_' + datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")+ '.csv'\n date0 = date.today()\n f.close()\n f = open(filepath+filename,\"w\")\n \n\nf.close() # close file\nser.close()\n","sub_path":"temp_humid_sensor/scripts/temphumid_464.py","file_name":"temphumid_464.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"275600444","text":"import logging\nimport os\n\nfrom contextlib import contextmanager\nfrom zipfile import ZipFile, ZIP_DEFLATED\n\nlogger = logging.getLogger()\n\n\n@contextmanager\ndef cd(newdir):\n prevdir = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(prevdir)\n\n\ndef get_file_paths(directory):\n \"\"\"\n Gets file paths with absolute file paths for copying the files and a relative file path for\n where the file should be located in the datapack relative to the directory.\n \"\"\"\n paths = {}\n with cd(directory):\n for dirpath, _, filenames in os.walk(\"./\"):\n for f in filenames:\n paths[os.path.abspath(os.path.join(dirpath, f))] = os.path.join(dirpath, f)\n return paths\n\n\ndef requires_zip(file_format):\n zipped_formats = [\"KML\", \"ESRI Shapefile\"]\n if file_format in zipped_formats:\n return True\n\n\ndef create_zip_file(in_file, out_file):\n \"\"\"\n\n :param in_file: The file to be compressed.\n :param out_file: The result.\n :return: The archive.\n \"\"\"\n logger.debug(\"Creating the zipfile {0} from {1}\".format(out_file, in_file))\n with ZipFile(out_file, \"a\", compression=ZIP_DEFLATED, allowZip64=True) as zipfile:\n if os.path.isdir(in_file):\n # Shapefiles will be all of the layers in a directory.\n # When this gets zipped they will all be in the same zip file. Some applications (QGIS) will\n # read this without a problem whereas ArcGIS will need the files extracted first.\n file_paths = get_file_paths(in_file)\n for absolute_file_path, relative_file_path in file_paths.items():\n if os.path.isfile(absolute_file_path):\n zipfile.write(absolute_file_path, arcname=os.path.basename(relative_file_path))\n else:\n zipfile.write(in_file)\n return out_file\n\n\ndef get_zip_name(file_name):\n basename, ext = os.path.splitext(file_name)\n if ext == \".kml\":\n return basename + \".kmz\"\n return basename + \".zip\"\n","sub_path":"eventkit_cloud/utils/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"629378984","text":"#!/usr/bin/python3\nimport os\nimport sys\nfrom parse_json import get_tweet\nimport argparse\nimport json\nimport traceback\n\nparser = argparse.ArgumentParser(\n description=\n \"Convert folder contains tweet json files info a simple single json file for downloader.\"\n)\nparser.add_argument(\"-o\",\n \"--output\",\n dest=\"output\",\n required=False,\n metavar=\"OUTPUT_DIR\",\n default=\"download.json\",\n type=str,\n help=\"Output json filename\")\n\nparser.add_argument(\"folder\",\n type=str,\n metavar=\"JSON_DIR\",\n help=\"Tweet json files source directory\")\n\nparser.add_argument(\"-q\",\n \"--quiet\",\n dest=\"quiet\",\n help=\"do not display info\",\n action=\"store_true\")\n\nparser.add_argument(\"-n\",\n \"--extract-nomedia\",\n dest=\"nomedia\",\n help=\"only output nomedia tweets content\",\n action=\"store_true\")\n\nif __name__ == \"__main__\":\n tweets = []\n args = parser.parse_args()\n\n for fn in os.listdir(args.folder):\n if os.path.isfile(args.folder + \"/\" + fn):\n id = fn.split('.')[0]\n if id.isdigit() is False or fn.split('.')[1] != 'json':\n continue\n try:\n tweet = get_tweet(id, args.folder, quiet=args.quiet)\n except KeyboardInterrupt:\n print(\"User Interrupt, exiting.\")\n exit()\n except:\n print(\"Error with parsing id \" + id)\n traceback.print_exc()\n continue\n if not args.nomedia:\n if not args.quiet and len(tweet) > 1:\n print(f\"[INFO] {id} is a thread, has {len(tweet)} tweets\")\n tweets.extend(tweet)\n else:\n for t in tweet:\n if id == t['id_str']:\n if t['medias'] == []:\n print(\"id: \" + id)\n print(t)\n if args.nomedia:\n exit()\n uniq_list = []\n seen = set()\n for t in tweets:\n if t['id'] not in seen:\n seen.add(t['id'])\n uniq_list.append(t)\n\n uniq_list.sort(key=lambda x: int(x[\"id\"]))\n uniq_list.sort(key=lambda x: x[\"user\"][\"screen_name\"])\n # filter no media\n for u in uniq_list:\n if u['medias'] == []:\n uniq_list.remove(u)\n\n with open(args.output, \"w\") as f:\n json.dump(uniq_list, f, ensure_ascii=False)\n if not args.quiet:\n print(f\"[OUPUT] {args.output} wrote.\")\n","sub_path":"cock_list.py","file_name":"cock_list.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"228015214","text":"class Node: # класс элемента\n def __init__(self, value=None, next_=None): # инициализируем\n self.value = value # значением\n self.next = next_ # и ссылкой на следующий элемент\n\n def __str__(self):\n return \"Node value = \" + str(self.value)\n\n\nclass LinkedList: # класс списка\n def __init__(self): # инициализируем пустым\n self.first = None\n self.last = None\n\n def clear(self): # очищаем список\n self.__init__()\n\n def __str__(self): # функция печати\n R = ''\n\n pointer = self.first # берем первый указатель\n while pointer is not None: # пока указатель не станет None\n R += str(pointer.value) # добавляем значение в строку\n pointer = pointer.next # идем дальше по указателю\n if pointer is not None: # если он существует добавляем пробел\n R += ' '\n return R\n\n def pushleft(self, value):\n if self.first is None:\n self.first = Node(value)\n self.last = self.first\n else:\n new_node = Node(value, self.first)\n self.first = new_node\n\n def pushright(self, value):\n if self.first is None:\n self.first = Node(value)\n self.last = self.first\n else:\n new_node = Node(value)\n self.last.next = new_node\n self.last = new_node\n\n def popright(self):\n if self.first is None:\n return None\n elif self.first == self.last:\n node = self.first # сохраняем его\n self.__init__() # очищаем\n return node # и возвращаем сохраненный элемент\n else:\n node = self.last # сохраняем последний\n pointer = self.first # создаем указатель\n while pointer.next is not node:\n pointer = pointer.next\n pointer.next = None # обнуляем указатели, чтобы\n self.last = pointer\n return node # возвращаем сохраненный\n\n def popleft(self):\n if self.first is None:\n return None\n elif self.first == self.last:\n node = self.first # сохраняем его\n self.__init__() # очищаем\n return node # и возвращаем сохраненный элемент\n else:\n node = self.first # сохраняем первый элемент\n self.first = self.first.next # меняем указатель на первый элемент\n return node # возвращаем сохраненный\n\n\nLL = LinkedList()\n\nLL.pushright(1)\nLL.pushleft(2)\nLL.pushright(3)\nLL.popright()\nLL.pushleft(4)\nLL.pushright(5)\nLL.popleft()\n\nprint(LL)","sub_path":"17.6.3.py","file_name":"17.6.3.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6602520","text":"import argparse\nimport sys\nimport os\nimport scipy.io as sio\nimport pandas as pd\nfrom settings import local_dir, sub_dirs, raw_measurements, axis_labels, segment_labels, joint_labels, \\\n sensor_labels, measure_to_len_map, seg_join_sens_map\n\n\n\"\"\"Section below encompasses all the required arguments for the script to run. Note that the default behaviour of the \nscript is to operate on complete files, rather than 'single-act' files produced by the 'mat_act_div.py' script; hence, \nthe optional '--single_act' argument must be specified if it wants to operate on those files to ensure the correct \nfiles are retrieved.\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument(\"dir\", help=\"Specifies which source directory to use so as to process the files contained within \"\n \"them accordingly. Must be one of '6minwalk-matfiles', '6MW-matFiles', \"\n \"'NSAA', 'NMB', or 'allmatfiles'.\")\nparser.add_argument(\"fn\", help=\"Specifies the short name (e.g. 'D11') of the file that we wish to extract the specified \"\n \"raw measurements. Specify 'all' for all the files available in the 'local_dir'.\")\nparser.add_argument(\"measurements\", help=\"Specifies the measurements to extract from the source .mat file. Separate \"\n \"each measurement to extract by a comma, or provide 'all' for all measurements.\")\nparser.add_argument(\"--single_act\", type=bool, nargs=\"?\", const=True,\n help=\"Specify if the files to operate on are 'single act' files.\")\nparser.add_argument(\"--single_act_concat\", type=bool, nargs=\"?\", const=True,\n help=\"Specify if the files to operate on are 'single act concat' files.\")\nargs = parser.parse_args()\n\n\n#Sets 'local_dir' to the correct directory name, based on the argument passed in for 'dir' and whether or not the\n#optional arguments '--single_act' or '--single_act_concat' were set or not.\nif args.dir + \"\\\\\" in sub_dirs:\n if args.dir == \"6minwalk-matfiles\":\n local_dir += args.dir + \"\\\\all_data_mat_files\\\\\"\n elif args.dir == \"6MW-matFiles\" or args.dir == \"allmatfiles\" or args.dir == \"left-out\" or args.dir == \"NMB\":\n local_dir += args.dir + \"\\\\\"\n else:\n local_dir += args.dir + \"\\\\matfiles\\\\\"\n if args.single_act:\n local_dir += \"act_files\\\\\"\n elif args.single_act_concat:\n local_dir += \"act_files_concat\\\\\"\nelse:\n print(\"First arg ('dir') must be a name of a subdirectory within source dir and must be one of \"\n \"'6minwalk-matfiles', '6MW-matFiles', 'NSAA', 'NMB', or 'allmatfiles'.\")\n sys.exit()\n\n#Gets the names of all the files within the 'local_dir' directory and filters them to a list of one element if\n#the 'fn' argument corresponds to a file within this, or a list of all '.mat' files within this directory if the 'fn'\n#argument is 'all'\nfile_names = os.listdir(local_dir)\nif any(args.fn in fn for fn in file_names):\n full_file_names = [local_dir + [fn for fn in file_names if args.fn in fn][0]]\nelif args.fn == \"all\":\n full_file_names = [local_dir + file_name for file_name in file_names if file_name.endswith(\".mat\")]\n #Filters out the 'AllTasks' files from 'allmatfiles' for space reasons\n if args.dir == \"allmatfiles\":\n full_file_names = [ffn for ffn in full_file_names if \"AllTasks\" not in ffn and \"ttest\" not in ffn]\nelse:\n print(\"Second arg ('fn') must be the short name of a file (e.g. 'D2' or 'all') within\", local_dir)\n sys.exit()\n\n\n#Sets measures to all possible measurement names if the argument given is 'all', or get all parts of the 'measurements'\n#argument, splits it up by commas, and adds the measurement names to a list. E.g., if the script was run as\n#'python ext_raw_measures.py NSAA all position,acceleration,jointAngle', then measures would now contain:\n#['position', 'acceleration', 'jointAngle']\nif args.dir == \"allmatfiles\" and args.measurements != \"jointAngle\":\n print(\"Third arg ('measurements') must be 'jointAngle when 'dir' argument is \\'allmatfiles\\'\")\n sys.exit()\nmeasures = []\nif args.measurements == \"all\":\n measures = raw_measurements\nelse:\n for measure in args.measurements.split(\",\"):\n if measure in raw_measurements:\n measures.append(measure)\n else:\n print(\"'\" + measure + \"' not a valid 'measurement' name. Must be 'all' or one of:\", raw_measurements)\n sys.exit()\n\n#For each of the measurements to extract from the source file(s), create a unique subdirectory within 'local_dir'\n#with a name equal to the measurement name\nfor measure in measures:\n if not os.path.exists(local_dir + measure):\n os.mkdir(local_dir + measure)\n\n#For each of the files that we wish to extract the raw measurements of (given as a short file name in 'fn' or all\n#available filenames if 'fn' is set as 'all'...\nfor full_file_name in full_file_names:\n print(\"\\nExtracting\", measures, \"from '\" + full_file_name + \"'...\")\n #Loads the file given the file name and extracts the table data from within the '.mat' structure\n mat_file = sio.loadmat(full_file_name)\n if not args.dir == \"allmatfiles\":\n tree = mat_file[\"tree\"]\n try:\n frame_data = tree[0][0][6][0][0][10][0][0][3][0]\n except IndexError:\n try:\n frame_data = tree[0][0][6][0][0][10][0][0][2][0]\n except IndexError:\n frame_data = tree[0][0][6][0][0][9][0][0][2][0]\n #Gets the names of each of the columns within\n col_names = frame_data.dtype.names\n # Extract single outer-list wrapping for vectors and double outer-list values for single values\n try:\n frame_data = [[elem[0] if len(elem[0]) != 1 else elem[0][0] for elem in row] for row in frame_data]\n except IndexError:\n # Accounts for missing 'contact' values in certain rows of some '.mat' files by ignoring the 'contact' column\n new_frame_data = []\n for m, row in enumerate(frame_data):\n # Ignore rows that don't have 'normal' as their 'type' cell\n if row[3][0] != \"normal\":\n continue\n row_data = []\n for i in range(len(row)):\n if i == len(row) - 1:\n row_data.append([\"\", \"\"])\n elif len(row[i][0]) != 1:\n row_data.append(row[i][0])\n else:\n row_data.append(row[i][0][0])\n new_frame_data.append(row_data)\n frame_data = new_frame_data\n import numpy as np\n print(np.shape(frame_data))\n sys.exit()\n else:\n frame_data = mat_file[\"jointangle\"]\n col_names = None\n\n #Creates a DataFrame from the data extracted from the source '.mat' file in question, skipping the first 3 rows\n #if it's not a 'single-act' file (as these just correspond to 'setup' rows)\n if args.single_act or args.single_act_concat:\n df = pd.DataFrame(frame_data, columns=col_names).iloc[:]\n else:\n df = pd.DataFrame(frame_data, columns=col_names).iloc[3:]\n\n #For each measurement to extract from the file, gets a list of names of features for that measurement (23 segments\n #labels, #22 joint labels, or 17 sensor labels), create a list of column names for each column of extracted\n #data from the file, gets the necessary columns from the DataFrame corresponding to the measurement in question,\n #creates a new DataFrame from this with the index names being the short-file name (e.g. 'D11'), and writes this\n #to a .csv file within 'local_dir' with a name corresponding to its source file name and extracted measurement\n for measure in measures:\n measurement_names = seg_join_sens_map[measure_to_len_map[measure]]\n headers = [\"(\" + measurement_name + \") : (\" + axis + \"-axis)\"\n for measurement_name in measurement_names for axis in axis_labels]\n if args.dir == \"allmatfiles\":\n measure_data = frame_data\n else:\n try:\n measure_data = [list(data) for data in df.loc[:, measure].values]\n #Accounts for files where we expect to see more measurements than their exists within the '.mat' file (i.e.\n #in an 'AD' file where we expect all measurements but instead only contains 'jointAngle' and\n #'jointAngleXZY' measurements\n except KeyError:\n print(\"Measurement '\" + measure + \"' that we expect to see within '\" + full_file_name +\n \"' are not present, skipping this measurement...\")\n continue\n if full_file_name.split(\"\\\\\")[-1].startswith(\"All\"):\n short_file_name = full_file_name.split(\"\\\\\")[-1].split(\"-\")[1]\n else:\n short_file_name = full_file_name.split(\"\\\\\")[-1].split(\"-\")[0]\n measure_df = pd.DataFrame(measure_data, index=[short_file_name for i in range(len(measure_data))])\n\n new_file_name = local_dir + measure + \"\\\\\" + full_file_name.split(\"\\\\\")[-1].split(\".mat\")[0] + \\\n \"_\" + measure + \".csv\"\n #If file already exists, remove it in preparation for new file being written\n if os.path.exists(new_file_name):\n os.remove(new_file_name)\n print(\"Writing '\" + new_file_name + \"' to '\" + local_dir + measure + \"\\\\'\")\n measure_df.to_csv(new_file_name, header=headers)\n","sub_path":"report_stuff/report_parts/msc-template/code/extrawmeasures.py","file_name":"extrawmeasures.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598021773","text":"import random\n\nclass conway:\n def __init__(self, numLists,numInts,genMethod):\n self.store = []\n self.numLists = numLists\n self.numInts = numInts\n self.genMethod = genMethod\n\n for i in range (0, numLists,1):\n temp = []\n\n for j in range(0,numInts,1):\n if genMethod == \"zeros\":\n temp = temp + [0]\n else:\n temp = temp +[random.randint(0,1)]\n\n self.store = self.store + [temp]\n\n def getDisp(self):\n accum = \"\"\n\n for i in range (0,self.numLists,1):\n for j in range (0,self.numInts,1):\n if self.store[i][j] == 0:\n accum = accum + \" \"\n else:\n accum = accum + \"*\"\n\n accum = accum + \"\\n\"\n\n return accum\n\n def printDisp(self):\n string = self.getDisp()\n print(string)\n return True\n\n def setPos(self,row,col,val):\n if val != 0 and val != 1:\n return False\n else:\n self.store[row][col] = val\n return True\n\n def getNeighbours(self,row,col):\n left = 0\n right = 0\n up = 0\n down = 0\n rows = []\n cols = []\n accum = []\n \n if col == 0:\n left = len(self.store[row])-1\n right = 1\n elif col == len(self.store[row])-1:\n left = col - 1\n right = 0\n else:\n left = col - 1\n right = col + 1\n cols = [left,col,right]\n\n if row == 0:\n up = len(self.store)-1\n down = 1\n elif row == len(self.store)-1:\n up = row - 1\n down = 0\n else:\n up = row - 1\n down = row + 1\n rows = [up,row,down]\n\n counter = 0\n for i in range(0,3,1):\n for j in range(0,3,1):\n if counter != 4:\n accum = accum + [self.store[rows[i]][cols[j]]]\n counter = counter + 1\n \n return accum\n","sub_path":"labs/w6/conwaylib.py","file_name":"conwaylib.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"497643172","text":"#/usr/bin/python\n#-*- coding: utf-8 -*-\n__author__ = 'hcm'\n\nimport time\nfrom tkinter import *\n\nfrom src.ballgame.Ball import Ball\n\nfrom ethan.MyApp.ballgame.Paddle import Paddle\n\nif __name__ == \"__main__\":\n tk = Tk()\n tk.title = (\"Ball Game\")\n tk.resizable(0,0)\n tk.wm_attributes(\"-topmost\",1)\n\n canvas = Canvas(tk,width=500,height=500,bd=0)\n canvas.pack()\n tk.update()\n\n #实例化一个球拍\n paddle = Paddle(canvas,'red')\n #实例化一个小球\n ball = Ball(canvas,paddle,'blue')\n\n while 1:\n #ball.draw()\n #paddle.draw()\n\n #增加游戏结束的判断,检查小球是否撞到了屏幕的底端\n if ball.hit_button == False:\n ball.draw()\n paddle.draw()\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n\n canvas.mainloop()","sub_path":"test/ballgame/TestGame.py","file_name":"TestGame.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"13151206","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport copy\nimport dash\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom datetime import datetime as dt\nimport matplotlib.pyplot as plt\nimport plotly\nimport datetime\nimport matplotlib.pyplot as plt\nimport re\nimport base64\nfrom io import BytesIO\nimport statsmodels.api as sm\n\n# create the app object\napp = dash.Dash(\n __name__, meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}]\n)\nserver = app.server\n\n\n\n\n\n##############################################################\n# Data\n\ndf1 = pd.read_csv('transactions_1.csv')\ndf2 = pd.read_csv('transactions_2.csv')\ndf = pd.concat ([df1,df2], axis=0, ignore_index=True).drop(['Unnamed: 0'], axis=1)\n\ndf['date']= pd.to_datetime(df['date']).dt.strftime('%Y-%m-%d')\n\ndf['year'] = pd.DatetimeIndex(df['date']).year\ndf['month'] = pd.DatetimeIndex(df['date']).month\n\nsales = df.copy()\nsales['date']= pd.to_datetime(sales['date'])\nsales['date'] = sales['date'].dt.year.astype('str') + '-' + sales['date'].dt.month.astype('str') + '-01'\nsales['date'] = pd.to_datetime(sales['date'])\nsales = sales.groupby(['customer_id', 'date']).size().reset_index()\nsales.columns = ['customer_id', 'date', 'Order_count']\n\nsales_all = sales.groupby(['date'])['Order_count'].sum().reset_index()\n\n\nlist1=list(df['product_id'].unique())\nlist1.insert(0, \"All\")\n\n\nlist2=list(df['customer_id'].unique())\nlist2.insert(0, \"All\")\n\n##############################################################\n# Create app layout\n\napp.layout = html.Div(\n [\n dcc.Store(id=\"aggregate_data\"),\n # empty Div to trigger javascript file for graph resizing\n html.Div(id=\"output-clientside\"),\n############################################################# \n# first container (title)\n \n html.Div([ \n # logo\n html.Div( \n className=\"one-third column\",),\n # main title\n html.Div([html.Div([ html.H3( \"Monthly Sales\",\n style={\"margin-bottom\": \"0px\"},),\n html.H5(\"Overview and Prediction\", style={\"margin-top\": \"0px\"}),\n ]) ],\n className=\"one-half column\",style={\"margin-top\": \"0px\", \"margin-right\": \"29.5%\"},\n id=\"title\",),\n \n \n ],\n id=\"header\",\n className=\"row flex-display\",\n style={\"margin-bottom\": \"25px\"},\n ),\n\n########################################################## \n# second container (Sales, histogram) \n \n dcc.Tabs([\n dcc.Tab(label='Overview', children=[\n \n \n \n html.Div([\n # map\n html.Div( [ \n html.Label([\"Product ID\", dcc.Dropdown(\n id=\"dropdown-product\",\n options=[ {'label':i, 'value':i} for i in list1],\n value=\"All\",\n searchable=True,\n style={\"margin-left\": \"10px\",\"margin-right\": \"15px\",\"margin-top\": \"10px\"},\n )]), \n html.Label([\"Customer ID\", dcc.Dropdown(\n id=\"dropdown-customer\",\n options=[ {'label':i, 'value':i} for i in list2],\n value=\"All\",\n searchable=True,\n style={\"margin-left\": \"10px\",\"margin-right\": \"15px\",\"margin-top\": \"5px\"},\n )]),\n html.Label([\"Year\", dcc.Dropdown(\n id=\"dropdown-year\",\n options=[ {'label':i, 'value':i} for i in df['year'].unique()],\n value=2018,\n searchable=True,\n style={\"margin-left\": \"10px\",\"margin-right\": \"15px\",\"margin-top\": \"5px\"},\n )]),\n \n \n ],\n className=\"pretty_container three columns\",),\n \n \n \n html.Div(\n [\n \n dcc.Graph(id=\"graph-sales\", \n \n config={'displayModeBar': False}),\n \n \n \n ],\n className=\"pretty_container nine columns\",\n \n ),\n \n \n \n ],\n className=\"row flex-display\",\n ), \n \n \n \n \n \n ]),\n dcc.Tab(label='Modeling', children=[\n \n \n html.Div([\n \n html.Div( [ \n html.Label([\"Customer ID\", dcc.Dropdown(\n id=\"dropdown-customer1\",\n options=[ {'label':i, 'value':i} for i in list2],\n value='All',\n searchable=True,\n style={\"margin-left\": \"10px\",\"margin-right\": \"15px\",\"margin-top\": \"10px\"},\n #multi=True\n )]),\n \n \n \n ],\n className=\"pretty_container three columns\",),\n \n \n \n html.Div(\n [\n \n dcc.Graph(id=\"graph-model\", \n \n config={'displayModeBar': False}),\n \n \n \n ],\n className=\"pretty_container nine columns\",\n \n ),\n \n \n \n ],\n className=\"row flex-display\",\n ), \n \n \n \n ]),\n \n ])\n \n \n \n \n \n ],\n id=\"mainContainer\",\n style={\"display\": \"flex\", \"flex-direction\": \"column\"},\n)\n\n\n##########################################################\n# Visualizing\n\n@app.callback(Output(\"graph-sales\", \"figure\"),\n [ Input(\"dropdown-product\", \"value\"), \n Input(\"dropdown-customer\", \"value\"),\n Input(\"dropdown-year\", \"value\")\n \n ]) \ndef sales_figure( productid,customerid,year):\n \n if productid == \"All\":\n if customerid == \"All\":\n df_filtered=df[(df['year']==year)]\n df_monthly = pd.DataFrame(df_filtered.groupby( 'month').size().rename('order_counts').reset_index())\n else:\n df_filtered=df[(df['year']==year)&(df['customer_id']==customerid)]\n df_monthly = pd.DataFrame(df_filtered.groupby( 'month').size().rename('order_counts').reset_index())\n elif customerid == \"All\":\n df_filtered=df[(df['year']==year)&(df['product_id']==productid)]\n df_monthly = pd.DataFrame(df_filtered.groupby( 'month').size().rename('order_counts').reset_index())\n else:\n df_filtered=df[(df['year']==year) & (df['product_id']==productid)&(df['customer_id']==customerid)] \n df_monthly = pd.DataFrame(df_filtered.groupby( 'month').size().rename('order_counts').reset_index())\n \n \n \n##########################################################\n# Visualization\n\n traces = []\n \n trace = go.Scatter(\n x=df_monthly['month'], \n y=df_monthly['order_counts'],\n mode = 'lines',\n line = dict(shape = 'linear', width= 2),\n connectgaps = True\n )\n traces.append(trace)\n \n \n layout=go.Layout(title_text ='Monthly Sales for '+productid+' in year '+str(year),\n xaxis = dict(title = 'Month',tickmode = 'array',\n tickvals = np.arange(1,13),\n ticktext = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec']),\n yaxis = dict(title = 'Sales')) \n figure = dict(data = traces, layout = layout)\n \n \n return figure\n\n##########################################################\n# Modeling\n\n@app.callback(Output(\"graph-model\", \"figure\"),\n [ \n Input(\"dropdown-customer1\", \"value\"),\n \n \n ]) \ndef model_figure( customerid):\n \n \n if customerid == \"All\": \n customerx = sales_all\n else: \n\n customerx = sales[sales['customer_id'] == customerid]\n customerx= customerx.drop('customer_id', axis=1)\n \n y = customerx.set_index('date')\n mod = sm.tsa.statespace.SARIMAX(y,\n order=(1, 1, 1),\n seasonal_order=(1, 1, 1, 12),\n #enforce_stationarity=False,\n enforce_invertibility=False)\n results = mod.fit()\n\n pred = results.get_prediction(start=pd.to_datetime('2019-01-01'), dynamic=False)\n pred_ci = pred.conf_int()\n pred_df = pd.DataFrame(pred.predicted_mean)\n pred_df[pred_df < 0] = 0\n pred_ci[pred_ci < 0] = 0\n \n \n##########################################################\n traces = []\n \n trace1 = go.Scatter(\n name='Actual Sales',\n x=y.index, \n y=y['Order_count'],\n mode = 'lines',\n line = dict(shape = 'linear', width= 2),\n connectgaps = True\n )\n \n trace2 = go.Scatter(\n name='Predicted Sales',\n x=pred_df.index, \n y=pred_df['predicted_mean'],\n mode='lines',\n line=dict(color='red'),\n )\n \n trace3 = go.Scatter(\n name='Upper Bound',\n x=pred_ci.index,\n y=pred_ci.iloc[:, 1],\n mode='lines',\n marker=dict(color=\"#444\"),\n line=dict(width=0),\n showlegend=False\n )\n trace4 = go.Scatter(\n name='Lower Bound',\n x=pred_ci.index,\n y=pred_ci.iloc[:, 0],\n marker=dict(color=\"#444\"),\n line=dict(width=0),\n mode='lines',\n fillcolor='rgba(68, 68, 68, 0.3)',\n fill='tonexty',\n showlegend=False\n )\n\n traces.append(trace1)\n traces.append(trace2)\n traces.append(trace3)\n traces.append(trace4)\n\n \n layout=go.Layout(title_text ='Monthly Sales for '+customerid,\n xaxis = dict(title = 'Date',tickmode = 'array'),\n yaxis = dict(title = 'Sales'))\n \n figure = dict(data = traces, layout = layout)\n \n return figure\n\n \n\n# Main\nif __name__ == \"__main__\":\n app.run_server(debug=False)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Dashboard.py","file_name":"Dashboard.py","file_ext":"py","file_size_in_byte":11634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100366116","text":"\"\"\"\nFrom: https://oj.leetcode.com/problems/clone-graph/\nAuthor: Jing Zhou\nDate: Jul 12, 2014\nThought: the traversal of graph. BFS. use of a queue.\n\"\"\"\n\n\n\n# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n def cloneGraph(self, node):\n if not node:\n return node\n mapNode = {}\n newGraph = UndirectedGraphNode(node.label)\n mapNode[node] = newGraph\n q = collections.deque()\n q.append(node)\n while q:\n nd = q.popleft()\n for nb in nd.neighbors:\n if nb not in mapNode:\n newnb = UndirectedGraphNode(nb.label)\n mapNode[nb] = newnb\n mapNode[nd].neighbors.append(newnb)\n q.append(nb)\n else:\n mapNode[nd].neighbors.append(mapNode[nb])\n return newGraph\n","sub_path":"week17/Jing/p_clone_graph.py","file_name":"p_clone_graph.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291416086","text":"import collections\nimport sys\n# sys.stdin=open('a.txt','r')\nt = int(input())\np=998244353\nwhile t:\n\tt-=1\n\tn = int(input())\n\ttwon=n\n\tm = {}\n\ta = [int(x) for x in input().split()]\n\tfor aa in a:\n\t\tif aa not in m:\n\t\t\tm[aa]=0\n\t\tm[aa]+=1\n\tsum=0\n\tm = collections.OrderedDict(sorted(m.items()))\n\n\ti=1\n\tfor key in m:\n\t\tif key!=i:\n\t\t\tbreak\n\t\telse:\n\t\t\ti+=1\n\t# print('i',i)\n\tprod=1\n\tfor key in m:\n\t\tif key>i:\n\t\t\tbreak\n\t\ttwon-=m[key]\n\t\tsum+=(key*pow(2,twon,p)*prod)%p\n\t\tprod=(prod*(pow(2,m[key],p)-1))%p;\n\t\t# print('sum',sum)\n\tsum+=i*(prod)*pow(2,twon,p)\n\tprint(sum%p//1)\n\n","sub_path":"codechef/Lunchtime/apr-2020/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624069492","text":"import os\nimport shutil\nfrom urllib.parse import urljoin\n\nfrom django.urls import reverse\nfrom celery import shared_task\n\nfrom kantele import settings as config\nfrom jobs.post import update_db, taskfail_update_db\n\n# Updating stuff in tasks happens over the API, assume no DB is touched. This\n# avoids setting up auth for DB\n\n\n@shared_task(bind=True)\ndef convert_to_mzml(self, fn, fnpath, outfile, sf_id, servershare, reporturl, failurl):\n \"\"\"This will run on remote in other repo (proteomics-tasks) so there is no\n need to be no code in here, the task is an empty shell with only the\n task name\"\"\"\n return True\n\n\n@shared_task(bind=True)\ndef scp_storage(self, mzmlfile, rawfn_id, dsetdir, servershare, reporturl, failurl):\n \"\"\"This will run on remote in other repo (proteomics-tasks) so there is no\n need to be no code in here, the task is an empty shell with only the\n task name\"\"\"\n return True\n\n\n@shared_task(bind=True, queue=config.QUEUE_STORAGE)\ndef rename_storage_location(self, srcpath, dstpath, storedfn_ids):\n \"\"\"This expects one dataset per dir, as it will rename the whole dir\"\"\"\n print('Renaming dataset storage {} to {}'.format(srcpath, dstpath))\n try:\n shutil.move(os.path.join(config.STORAGESHARE, srcpath), os.path.join(config.STORAGESHARE, dstpath))\n except:\n taskfail_update_db(self.request.id)\n raise\n # Go through dirs in path and delete empty ones caused by move\n splitpath = srcpath.split(os.sep)\n for pathlen in range(0, len(splitpath))[::-1]:\n # no rmdir on the leaf dir (would be pathlen+1) since that's been moved\n checkpath = os.path.join(config.STORAGESHARE, os.sep.join(splitpath[:pathlen]))\n if not os.listdir(checkpath):\n try:\n os.rmdir(checkpath)\n except:\n taskfail_update_db(self.request.id)\n raise\n postdata = {'fn_ids': storedfn_ids, 'dst_path': dstpath,\n 'task': self.request.id, 'client_id': config.APIKEY}\n url = urljoin(config.KANTELEHOST, reverse('jobs:updatestorage'))\n try:\n update_db(url, json=postdata)\n except RuntimeError:\n # FIXME cannot move back shutil.move(dst, src)\n raise\n\n\n@shared_task(bind=True, queue=config.QUEUE_STORAGE)\ndef move_file_storage(self, fn, srcshare, srcpath, dstpath, fn_id, newname=False):\n src = os.path.join(config.SHAREMAP[srcshare], srcpath, fn)\n if newname:\n dst = os.path.join(config.STORAGESHARE, dstpath, newname)\n else:\n dst = os.path.join(config.STORAGESHARE, dstpath, fn)\n print('Moving file {} to {}'.format(src, dst))\n dstdir = os.path.split(dst)[0]\n if not os.path.exists(dstdir):\n try:\n os.makedirs(dstdir)\n except FileExistsError:\n # Race conditions may happen, dir already made by other task\n pass\n except Exception:\n taskfail_update_db(self.request.id)\n raise\n elif not os.path.isdir(dstdir):\n taskfail_update_db(self.request.id)\n raise RuntimeError('Directory {} is already on disk as a file name. '\n 'Not moving files.')\n try:\n shutil.move(src, dst)\n except Exception as e:\n taskfail_update_db(self.request.id)\n raise RuntimeError('Could not move file tot storage:', e)\n postdata = {'fn_id': fn_id, 'servershare': config.STORAGESHARENAME,\n 'dst_path': dstpath, 'newname': os.path.basename(dst),\n 'client_id': config.APIKEY, 'task': self.request.id}\n url = urljoin(config.KANTELEHOST, reverse('jobs:updatestorage'))\n try:\n update_db(url, json=postdata)\n except RuntimeError:\n shutil.move(dst, src)\n raise\n print('File {} moved to {}'.format(fn_id, dst))\n\n\n@shared_task(bind=True, queue=config.QUEUE_STORAGE)\ndef move_stored_file_tmp(self, fn, path, fn_id):\n src = os.path.join(config.STORAGESHARE, path, fn)\n dst = os.path.join(config.TMPSHARE, fn)\n print('Moving stored file {} to tmp'.format(fn_id))\n try:\n shutil.move(src, dst)\n except Exception:\n taskfail_update_db(self.request.id)\n raise\n postdata = {'fn_id': fn_id, 'servershare': config.TMPSHARENAME,\n 'dst_path': '', 'client_id': config.APIKEY,\n 'task': self.request.id}\n url = urljoin(config.KANTELEHOST, reverse('jobs:updatestorage'))\n try:\n update_db(url, json=postdata)\n except RuntimeError:\n shutil.move(dst, src)\n raise\n print('File {} moved to tmp and DB updated'.format(fn_id))\n","sub_path":"datasets/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424808951","text":"#!/usr/bin/env python3\r\nimport os\r\n\r\nstuOpenFile = 'StudentFile.txt'\r\n\r\n\r\ndef menu():\r\n print(\"\"\"\r\n |----------学生管理系统---------|\r\n\r\n |===========主功能菜单==========|\r\n | |\r\n | 1,录入学生成绩 |\r\n | 2,查询学生成绩 |\r\n | 3,删除学生成绩 |\r\n | 4,修改学生成绩 |\r\n | 5,显示所有成绩 |\r\n | 6,退出系统 |\r\n | |\r\n |===============================|\r\n \"\"\")\r\n\r\n\r\n\"\"\"\r\n# 添加功能\r\n# * 添加学生姓名,学号,班级,成绩\r\n#! 姓名和学号不能为空\r\n#! 学号不能重复\r\n#! 以追加写入的方式写入文件\r\n\"\"\"\r\n\r\n\r\ndef addInfo():\r\n while True:\r\n sName = input('输入学生名字:')\r\n if not sName: # *判断姓名有没有输入,如果没有,重新调用当前函数\r\n print('名字不能为空,请重新输入')\r\n addInfo()\r\n else: # *如果输入了,进入函数正常流程\r\n if not os.path.exists(stuOpenFile): # !如果要打开的文件不存在\r\n Id = 0 # !Id初始化为0\r\n Id += 1 # !Id自加1\r\n stuNum = input('请输入学号:')\r\n ifEmpty(stuNum) # *判断有没有输入学号,如果没有,调用当前函数\r\n # !如果要打开的文件存在但是里面没有内容\r\n elif os.path.exists(stuOpenFile) and os.path.getsize(stuOpenFile) == 0:\r\n Id = 0 # !依旧初始化id为0\r\n Id += 1 # !id自加1\r\n stuNum = input('请输入学号:')\r\n ifEmpty(stuNum) # *判断学号有没有输入\r\n else: # *如果要打开的文件存在并且里面有内容\r\n openFile = open(stuOpenFile, 'r+') # !打开文件并赋值给openFile\r\n textLines = openFile.readlines() # !将文件内所有内容全部赋值给textLines变量,此时textLines变量类型为列表\r\n for i in textLines: # !遍历i在textLines变量内\r\n textStrLines = i # !将i循环赋值给textStrLines变量,此刻textStrLines变量为字符串\r\n # !跳出循环后,textStrLines变量的值是列表最后一条数据,此时将其转成字典\r\n textDictLines = eval(textStrLines)\r\n # !转成字典后将其Id取出,这个Id是文件内最后一个Id,赋值给textDictId\r\n textDictId = textDictLines.get('Id')\r\n Id = textDictId + 1 # !新数据的Id为textDictId+1\r\n stuNum = input('请输入学号:')\r\n ifEmpty(stuNum) # *判断学号有没有输入\r\n for j in textLines: # !如果学号有输入,开始判断是不是重复,和获取Id是一样的逻辑\r\n textStrNum = j\r\n textDictNum = eval(textStrNum)\r\n # !唯一不一样的是,这里是循环体内每循环一次就进行一次赋值\r\n textDictNums = textDictNum.get('stuNum')\r\n if stuNum == textDictNums: # !每赋值一次就进行一次重复判断\r\n print('学生学号重复,请重新输入')\r\n openFile.close()\r\n addInfo()\r\n openFile.close() # !关闭文件\r\n bj = input('输入学生班级:')\r\n linux = input('请输入学生Linux成绩:')\r\n php = input('请输入学生PHP成绩:')\r\n python = input('请输入学生Python成绩:')\r\n # !将所有用户输入的信息全部写入一个字典\r\n stu = {'Id': Id, 'sname': sName, 'bj': bj, 'stuNum': stuNum,\r\n 'linux': linux, 'php': php, 'python': python}\r\n stuInfo = str(stu) # !将字典转为字符串\r\n saveInfo(stuInfo) # !调用函数,传入转为字符串的变量,开始写入文件\r\n num = input('y/Y 继续 n/N 退出:')\r\n thisFunction = addInfo\r\n ifTure(num, thisFunction) # *调用函数判断用户退出或继续\r\n\r\n\r\n\"\"\"\r\n# 查询单个学生信息\r\n# * 为了避免同名,以学号查询\r\n#! 输入学号,学号不能为空\r\n#! 将文件内容以列表形式复制\r\n#! 循环这个列表,每次循环赋值出来成为一个字符串\r\n#! 将字符串转为字典,取字典stuNum键值对,对比学号和值是否相等\r\n#! 如果相等,打印这个字典里所有值\r\n#! 如果相等,继续循环,如果全部不\r\n\"\"\"\r\n\r\n\r\ndef search():\r\n format_title = '{:^6}{:^12}\\t{:^10}{:^10}{:^10}{:^10}{:^10}'\r\n format_data = '{:^6}{:^14}\\t{:^10}{:^16}{:^4}{:^18}{:^6}'\r\n if not os.path.exists(stuOpenFile): # *如果没有要打开的文件,提示文件没有\r\n print('没有这个文件')\r\n elif os.path.exists(stuOpenFile) and os.path.getsize(stuOpenFile) == 0: # *如果文件为空,提示信息没有\r\n print('学生信息为空')\r\n else:\r\n stuName = input('请输入学生姓名:')\r\n if not stuName: # *如果用户没有输入名字\r\n print('学生姓名不能为空,请重新输入')\r\n search()\r\n else: # *用户输入了\r\n openFile = open(stuOpenFile, 'r+') # *打开文件\r\n stuInfo = openFile.readlines() # *遍历文件信息到一个变量\r\n stuNum = input('请输入学生学号:')\r\n if not stuNum: # *如果用户没有输入学号\r\n print('学号不能为空,请重新输入')\r\n search()\r\n else: # *用户输入了学号\r\n for i in stuInfo:\r\n stuStrInfo = i\r\n stuDictInfo = eval(stuStrInfo)\r\n if stuName != stuDictInfo.get('sname') or stuNum != stuDictInfo.get('stuNum'):\r\n continue # !同时判断学号和名字在不在,如果有一个不在,就继续循环\r\n # !如果同时都存在,开始获取学生信息\r\n elif stuName == stuDictInfo.get('sname') and stuNum == stuDictInfo.get('stuNum'):\r\n print(format_title.format('ID', '姓名', '班级', '学号',\r\n 'linux成绩', 'php成绩', 'python成绩')) # !格式化打印信息头\r\n stuId = stuDictInfo.get('Id') # *获取学生id\r\n stuName = stuDictInfo.get('sname') # *获取学生姓名\r\n stuBj = stuDictInfo.get('bj') # *获取学生班级\r\n stuNum = stuDictInfo.get('stuNum') # *获取学生学号\r\n stuLinux = stuDictInfo.get('linux') # *获取学生linux成绩\r\n stuPHP = stuDictInfo.get('php') # *获取学生php成绩\r\n stuPython = stuDictInfo.get('python') # *获取学生python成绩\r\n print(format_data.format(stuId, stuName, stuBj,\r\n stuNum, stuLinux, stuPHP, stuPython)) # !格式化打印信息\r\n num = input('y/Y 继续 n/N 退出:')\r\n thisFunction = search\r\n ifTure(num, thisFunction) # *调用函数判断用户退出或继续\r\n print('学生信息不存在,请重新输入') # !如果学生姓名和学号均不在文件里,输出学生信息不存在,请重新输入\r\n search()\r\n\r\n\r\n\"\"\"\r\n# 删除功能\r\n# * 为了方便删除和删除时对同名判断正确,先输出所有信息,使用姓名和学号检测的方式删除\r\n#! 获取用户输入姓名和学号,每一个都要进行判空\r\n#! 主要逻辑点是对文件的操作,这一点具体写在了代码里\r\n\"\"\"\r\n\r\n\r\ndef delete():\r\n show() # !展示所有信息,方便删除\r\n openFile = open(stuOpenFile, 'r+')\r\n textLines = openFile.readlines()\r\n openFile.close()\r\n stuName = input('请输入学生姓名:') # !姓名是一定要输入的\r\n if not stuName:\r\n print('姓名不能为空,请重新输入')\r\n delete()\r\n else:\r\n stuNum = input('请输入学生学号:') # !为了防止重名,需要验证学号\r\n if not stuNum:\r\n print('学号不能为空,请重新输入')\r\n delete()\r\n else:\r\n for i in textLines:\r\n stuStrLines = i\r\n stuDictInfo = eval(stuStrLines)\r\n # !如果名字和学号有一个对不上号,就继续循环\r\n if stuName != stuDictInfo.get('sname') or stuNum != stuDictInfo.get('stuNum'):\r\n continue\r\n # !如果名字和学号都对得上,就将其赋值到一个变量\r\n elif stuName == stuDictInfo.get('sname') and stuNum == stuDictInfo.get('stuNum'):\r\n stuDel = stuDictInfo\r\n # ! 当名字和学号对得上,已经将要修改的字典提取出来时,以覆盖写入方式打开文件\r\n openFile = open(stuOpenFile, 'w')\r\n openFile.write('') # ! 清空文件\r\n openFile.close() # ! 关闭文件\r\n for i in textLines:\r\n openFile = open(stuOpenFile, 'a+') # * 以追加写入的方式打开文件\r\n stuList = i # ! 循环将旧信息列表字符串写入一个变量,准备写入文件\r\n stuDict = eval(stuList) # ! 将字符串转为字典\r\n # !如果被提取出来的字典和准备写入文件的信息一致,一致的那条信息不写入文件,继续循环\r\n if stuDict == stuDel:\r\n continue\r\n else: # ! 如果信息不一致,进入写入文件环节\r\n # !判断准备写入文件的信息里有没有id大于被提取出来的信息的id\r\n if stuDict.get('Id') > stuDel.get('Id'):\r\n # *如果有,这个Id减一并赋值给新的变量\r\n Id = stuDict.get('Id') - 1\r\n sname = stuDict.get('sname')\r\n bj = stuDict.get('bj')\r\n stuNum = stuDict.get('stuNum')\r\n linux = stuDict.get('linux')\r\n php = stuDict.get('linux')\r\n python = stuDict.get('python')\r\n # *将新的id和旧内容重新写入一个字典\r\n newStuDict = {'Id': Id, 'sname': sname, 'bj': bj,\r\n 'stuNum': stuNum, 'linux': linux, 'php': php, 'python': python}\r\n print('删除成功')\r\n newStuStr = str(newStuDict) # *字典转为字符串,写入文件\r\n openFile.write(newStuStr + '\\n')\r\n openFile.close()\r\n else: # ! 如果id不大于被提取出来的id,直接转为字符串后写入文件\r\n stuStr = str(stuDict)\r\n openFile.write(stuStr + '\\n')\r\n if stuName != stuDictInfo.get('sname') and stuNum != stuDictInfo.get('stuNum'):\r\n print('学生信息不存在,请重新输入')\r\n delete()\r\n num = input('y/Y 继续 n/N 退出:')\r\n thisFunction = modify\r\n ifTure(num, thisFunction) # *调用函数判断用户退出或继续\r\n\r\n\r\n\"\"\"\r\n# 修改功能\r\n# * 通过学号方式修改,避免同名,为修改方便,显示所有信息\r\n#! 对学号进行判空\r\n#! 对学号进行判断是否存在于从文件提取出来的信息,存在,开始修改,不存在,继续循环,循环结束依旧没有,提示信息不存在\r\n#! 修改后,要对文件进行清空,接着再把信息列表转字符串后逐条遍历写入\r\n\"\"\"\r\n\r\n\r\ndef modify():\r\n show()\r\n openFile = open(stuOpenFile, 'r+')\r\n textLines = openFile.readlines() # 将文件所有内容以列表写入一个变量\r\n openFile.close() # !这里关闭一下打开的文件,方便之后操作\r\n stuNum = input('请输入学号:') # *为了避免重名,使用学号进行修改\r\n if not stuNum:\r\n print('学号不能为空,请重新输入')\r\n modify()\r\n else:\r\n for i in textLines:\r\n stuList = i\r\n stuDict = eval(stuList)\r\n if stuNum == stuDict['stuNum']: # !如果存在输入的学号在已有信息里,开始修改\r\n stuId = stuDict['Id']\r\n stuName = stuDict['sname']\r\n stuBj = stuDict['bj']\r\n stuLinux = input('请输入学生Linux成绩:')\r\n if not stuLinux: # *如果用户没有输入新的成绩,将继续使用旧成绩\r\n stuLinux = stuDict['linux']\r\n stuPHP = input('请输入学生PHP成绩:')\r\n if not stuPHP:\r\n stuPHP = stuDict['php']\r\n stuPython = input('请输入学生python��绩:')\r\n if not stuPython:\r\n stuPython = stuDict['python']\r\n stuDicts = {'Id': stuId, 'sname': stuName, 'bj': stuBj, 'stuNum': stuNum,\r\n 'linux': stuLinux, 'php': stuPHP, 'python': stuPython}\r\n openFile = open(stuOpenFile, 'w') # !已覆盖写入的方式打开文件\r\n openFile.write('') # !将文件内容请客\r\n openFile.close() # !关闭文件\r\n for i in textLines:\r\n openFile = open(stuOpenFile, 'a+') # !将文件已追加写入的方式打开\r\n stuList = i\r\n stuDict = eval(stuList)\r\n if stuNum != stuDict.get('stuNum'): # !如果stuNum不等于\r\n stuStr = str(stuDict) # !将当前整个追加入文件\r\n openFile.write(stuStr + '\\n')\r\n else: # !如果等于了\r\n stuStr = str(stuDicts) # !将修改好的追加写入,替换了原有的\r\n openFile.write(stuStr + '\\n')\r\n elif stuNum != stuDict['stuNum']: # !如果不存在,继续循环\r\n continue\r\n else: # !如果都不存在,返回信息不存在,调用当前函数\r\n print('学号信息不存在,请重新输入')\r\n modify()\r\n openFile.close()\r\n num = input('y/Y 继续 n/N 退出:')\r\n thisFunction = modify\r\n ifTure(num, thisFunction) # *调用函数判断用户退出或继续\r\n\r\n\r\n\"\"\"\r\n# 查看所有信息功能\r\n# * 打开文件后,将信息全部以列表写入一个遍历\r\n# * 遍历这个列表,每次遍历将字符串赋值给变量,变量转为字符串,将字符串键值对的值打印输出\r\n\"\"\"\r\n\r\n\r\ndef show():\r\n format_title = '{:^6}{:^12}\\t{:^10}{:^10}{:^10}{:^10}{:^10}' # 格式化信息\r\n format_data = '{:^6}{:^14}\\t{:^10}{:^16}{:^4}{:^18}{:^6}'\r\n if not os.path.exists(stuOpenFile): # *如果文件不存在,提示文件不存在\r\n print('文件不存在')\r\n main()\r\n # *如果文件没有内容,提示没有学生信息\r\n elif os.path.exists(stuOpenFile) and os.path.getsize(stuOpenFile) == 0:\r\n print('当前没有学生信息')\r\n main()\r\n else:\r\n print(format_title.format('ID', '姓名', '班级',\r\n '学号', 'linux成绩', 'php成绩', 'python成绩'))\r\n openFile = open(stuOpenFile, 'r+') # !文件以读的方式打开\r\n stuTextLines = openFile.readlines() # !读取全部以列表赋值给一个变量\r\n for i in stuTextLines: # *遍历这个列表\r\n stuStrLines = i # *将遍历出来的字符串给到一个遍历\r\n stuDictLines = eval(stuStrLines) # ! 将字符串转为字典给到一个变量\r\n # !将该变量里键的值单独赋值给到每个对应键名的变量\r\n stuId = stuDictLines.get('Id')\r\n stuNams = stuDictLines.get('sname')\r\n stuBj = stuDictLines.get('bj')\r\n stuNum = stuDictLines.get('stuNum')\r\n stuLinux = stuDictLines.get('linux')\r\n stuPHP = stuDictLines.get('php')\r\n stuPython = stuDictLines.get('python')\r\n # *输出信息,然后继续循环\r\n print(format_data.format(stuId, stuNams, stuBj,\r\n stuNum, stuLinux, stuPHP, stuPython))\r\n openFile.close() # 循环结束,关闭文件\r\n\r\n\r\n\"\"\"\r\n# * 添加功能的调用函数,用于判断学号是否为空\r\n\"\"\"\r\n\r\n\r\ndef ifEmpty(stuNum):\r\n if not stuNum:\r\n print('学号不能为空,请重新输入')\r\n addInfo()\r\n\r\n\r\n\"\"\"\r\n# 判断用户是否退出\r\n# * num参数:获取用户的选择\r\n# * thisFunction参数:用于获取调用ifTure的函数名\r\n\"\"\"\r\n\r\n\r\ndef ifTure(num, thisFunction):\r\n if num == 'Y' or num == 'y': # ! 如果获取选择是Y或者y,调用thisFunction参数赋值的函数\r\n thisFunction()\r\n elif num == 'N' or num == 'n': # ! 如果获取选择是N或者n,调用main寒素\r\n main()\r\n else: # ! 如果获取的都不是这四个选项,提示请重新选择同时调用ifTure函数\r\n num = input('请输入正确选项,Y/y 继续,N/n 退出:')\r\n ifTure(num, thisFunction)\r\n\r\n\r\ndef saveInfo(stuInfoStr): # !将信息写入文件的操作\r\n with open(stuOpenFile, 'a+') as f: # !以a+的访问模式打开文件\r\n f.write(stuInfoStr + '\\n') # !将传入的字符串已追加的方式写入文件并且换行\r\n f.close() # !关闭文件\r\n\r\n\r\ndef main():\r\n while True:\r\n menu()\r\n numExit = input('请选择菜单选项:')\r\n if numExit == \"1\":\r\n addInfo()\r\n elif numExit == \"2\":\r\n search()\r\n elif numExit == \"3\":\r\n delete()\r\n elif numExit == \"4\":\r\n modify()\r\n elif numExit == \"5\":\r\n show()\r\n elif numExit == \"6\":\r\n exit()\r\n else:\r\n print('请输入正确选项')\r\n\r\n\r\nmain()\r\n","sub_path":"StudentFile.V1.1.0.py","file_name":"StudentFile.V1.1.0.py","file_ext":"py","file_size_in_byte":18620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132612022","text":"# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.plugins.openstack import credential\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass Credential(credential.OpenStackCredential):\n \"\"\"Deprecated version of OpenStackCredential class\"\"\"\n def __init__(self, *args, **kwargs):\n super(Credential, self).__init__(*args, **kwargs)\n LOG.warning(\"Class rally.common.objects.Credential is deprecated \"\n \"since Rally 0.11.0. Use raw dict for OpenStack \"\n \"credentials instead.\")\n\n def to_dict(self, include_permission=False):\n dct = super(Credential, self).to_dict()\n if not include_permission:\n dct.pop(\"permission\")\n return dct\n","sub_path":"rally/common/objects/credential.py","file_name":"credential.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542597437","text":"import os\nimport sys\nimport boto3\nfrom base64 import b64decode\nimport time\n\n# This allowes decrypted values to only be decrypted once per lambda container\ndecrypted = {}\nonAWS = os.environ.get(\"AWS_LAMBDA_FUNCTION_VERSION\", False)\n\nclass Timer(object):\n def __init__(self, start=False):\n self._start = time.time()\n self._end = self._start\n self._steps = []\n\n self._running = False\n if start:\n self.start()\n\n\n def start(self):\n self._running = True\n self._steps = []\n\n self._start = time.time()\n\n\n def step(self, returnDTFromPrev=False):\n if not self._running:\n return -1\n\n now = time.time()\n dt = now - self._start\n self._steps.append(dt)\n if returnDTFromPrev and len(self._steps) >= 2:\n return dt - self._steps[-2]\n return dt\n\n\n def getAvgStep(self):\n end = self._end if self._running else time.time()\n\n numSteps = len(self._steps) or 1\n return (end - self._start) / numSteps\n\n\n def getSteps(self):\n return self._steps\n\n\n def end(self):\n if not self._running:\n return self._end - self._start\n\n self._end = time.time()\n self._running = False\n\n dt = self._end - self._start\n self._steps.append(dt)\n return dt\n\n\ndef getEnv(key, default=None, throw=False):\n if key not in os.environ and throw:\n raise Exception(f\"Key \\\"{key}\\\" is not in the environment\")\n\n if key in decrypted:\n return decrypted[key]\n\n val = os.environ.get(key, default)\n # Try to decrypt if on AWS\n if onAWS and type(val) is str:\n try:\n val = boto3.client('kms').decrypt(CiphertextBlob=b64decode(val))['Plaintext']\n except Exception as e:\n print(e)\n print(f\"Couldn't decrypt value for key {key} using env val\")\n\n decrypted[key] = val\n\n return val\n\n\ndef addModulePath(base, path):\n here = os.path.dirname(os.path.relpath(base))\n sys.path.append(os.path.join(here, path))\n","sub_path":"py/hesburgh/hesutil.py","file_name":"hesutil.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"170106391","text":"\"\"\"Receba como entrada os coeficientes (inteiros) de uma equação de segundo\ngrau: a, b e c, e apresente as raízes reais. (fase2: observe que a não pode ser\nzero. Verifique também se as raízes existem e se são iguais).\"\"\"\n\na, b, c = int(input(\"valor de a\")), int(input(\"valor de b\")), int(input(\"valor de c\"))\nparar = False # variável para gerenciar laço do programa\n\nwhile not parar: # enquanto parar for igual a False o programa ficará repetindo.\n if a == 0:\n print(\"a não pode ser zero!\")\n a = int(input(\"valor de a: \"))\n delta = b ** 2 - 4 * a * c\n print(\"Delta =\", delta, sep=' ')\n if delta < 0:\n print(\"a equação não possui resultados reais\")\n a, b, c = int(input(\"valor de a: \")), int(input(\"valor de b: \")), int(input(\"valor de c: \"))\n if delta == 0:\n print(\"a equação possui apenas um resultado real ou possui dois resultados iguais (essas duas afirmações são \"\n \"equivalentes)\")\n if delta > 0:\n print(\"a equação possui dois resultados distintos reais\")\n raizdelta = delta ** (1 / 2)\n x1 = (- b + raizdelta) / (2 * a)\n x2 = (- b - raizdelta) / (2 * a)\n\n print(\"X' =\", int(x1), \"x'' =\", int(x2), sep=' ')\n parar = True\nif a * (x1 ** (1/2)) + b * x1 + c == 0:\n print(\"logo x =\", int(x1), sep=' ')\nelif a * (x2 ** (1/2)) + b * x2 + c == 0:\n print(\"logo x =\", int(x2), sep=' ')\n","sub_path":"lista 01/atividade 02.py","file_name":"atividade 02.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"437716280","text":"def main():\n\t# write code here\n\ttotal=0\n\tcounter = \"done\"\n\titem =\"\"\n\tmyList = []\n\twhile item!=counter:\n\t\titem = input('Item (enter \"done\" when finished): ')\n\t\tif item==counter:\n\t\t\tbreak\n\t\tprice = input('Price: ')\n\t\tquantity = input('Quantity: ')\n\t\ta = {'name':item ,'price':price ,'quantity':quantity}\n\t\tmyList.append(a)\n\t#print(myList)\n\tprint(\"\\n------------------- \\n receipt \\n------------------- \\n \")\n\tfor key in myList:\n\t\tmultiply = float(key['price']) * float(key['quantity'])\n\t\tprint(key['quantity'],key['name'],multiply,\"KD\")\n\t\ttotal += multiply\n\tprint(\"------------------\\n Total price: {}KD\".format(total))\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"cashier.py","file_name":"cashier.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"127213819","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport numpy as np\nimport torch.nn.functional as F\nimport os\nfrom torchsummaryX import summary\n\ndef save_checkpoint(model, save_path):\n if not os.path.exists(os.path.dirname(save_path)):\n os.makedirs(os.path.dirname(save_path))\n\n torch.save(model.cpu().state_dict(), save_path)\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('Linear') != -1:\n init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_normal_(m.weight.data, gain=0.02)\n elif classname.find('Linear') != -1:\n init.xavier_normal_(m.weight.data, gain=0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef init_weights(net, init_type='normal'):\n print('initialization method [%s]' % init_type)\n if init_type == 'normal':\n net.apply(weights_init_normal)\n elif init_type == 'xavier':\n net.apply(weights_init_xavier)\n elif init_type == 'kaiming':\n net.apply(weights_init_kaiming)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\n\nclass Spec_unet(nn.Module):\n def __init__(self, input_nc = 1, nf = 8, fine_width = 320, fine_height = 768, max_nf = 64):\n super(Spec_unet, self).__init__()\n norm_layer = nn.BatchNorm2d\n use_bias = False\n conv_downsample = [\n nn.Conv2d(input_nc, nf, kernel_size=7, padding=3, bias=use_bias),\n norm_layer(nf),\n nn.LeakyReLU(0.1)]\n nc = nf\n nf*=2\n for i in range(6):\n conv_downsample += [\n nn.Conv2d(nc, min(max_nf, nf), kernel_size=3, stride=2, padding=1, bias=use_bias),\n nn.Dropout(0.2),\n norm_layer(nf),\n nn.LeakyReLU(0.1)]\n nc = min(max_nf, nf)\n nf = min(nf*2, max_nf)\n\n conv_downsample += [\n nn.Conv2d(nf, 8, kernel_size=3, padding=1, bias=use_bias),\n norm_layer(8),\n nn.LeakyReLU(0.1)]\n\n\n self.conv_downsample = nn.Sequential(*conv_downsample)\n \n self.linear = nn.Sequential(\n nn.Linear(8*12*5, 32),\n nn.Dropout(0.2),\n nn.BatchNorm1d(num_features=32),\n nn.LeakyReLU(0.1),\n nn.Linear(32, 1),\n nn.Tanh()\n )\n \n\n\n def forward(self, input):\n downsample = self.conv_downsample(input)\n downsample = downsample.view(-1, 12*5*8)\n output = self.linear(downsample)\n return output\n\n\nif __name__ == '__main__':\n model = Spec_unet()\n init_weights(model, 'kaiming')\n print(model)\n arch = summary(model, torch.rand(2,1,320,768))\n print(arch)\n","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238535817","text":"import sys\nprint(\"Obj file parser Bartłomiej Żmuda\\n\")\n\nvertexList = []\nnormalsList = []\nnodesList = []\nelement = []\n\nfile = open('assets/cylinder.obj')\ncontent = file.readlines()\nfor line in content:\n line = line.rstrip().split(' ', 1)\n prefix = line[0]\n data = line[1]\n if (prefix == 'v'):\n #Process vertex\n data = data.split(' ')\n vertexList.append([float(data[0]), float(data[1]), float(data[2])])\n \n elif (prefix == 'vn'):\n #Process normal\n data = data.split(' ')\n normalsList.append([float(data[0]), float(data[1]), float(data[2])])\n \n elif (prefix == 'f'):\n #Process face\n data = data.split(' ')\n for node in data:\n node = node.split('//')\n \n if(not ([int(node[0]), int(node[1])] in nodesList)):\n nodesList.append([int(node[0]), int(node[1])])\n idx = nodesList.index([int(node[0]), int(node[1])])\n element.append(idx)\n\noutput = open('output.txt', 'w+')\n\nfor node in nodesList:\n vertex = vertexList[node[0]-1]\n normal = normalsList[node[1]-1]\n output.write(str(vertex[0])+'f, '+str(vertex[1])+'f, '+str(vertex[2])+'f, ')\n output.write(str(normal[0])+'f, '+str(normal[1])+'f, '+str(normal[2])+'f,\\n')\n \noutput.write('\\n\\n')\n\ni = 0;\nfor index in element:\n output.write(str(index)+', ')\n i = i + 1\n if (i == 3):\n i = 0\n output.write('\\n')\n\noutput.write('\\n\\n')\n\noutput.write('Size: '+str(len(element)))","sub_path":"OBJ_Parser/OBJ_Parser.py","file_name":"OBJ_Parser.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312461027","text":"#!/usr/env/bin python\n\n## Stripped down from metaphlan2's metaphlan_hclust_heatmap.py (Jun 2018)\n\nimport sys\nimport numpy as np \nimport matplotlib\nmatplotlib.use('Agg')\nimport scipy\nimport pylab\nimport scipy.cluster.hierarchy as sch\nfrom scipy import stats\n\n## Additional investigation\nimport csv\nfrom pandas import DataFrame\n\n## Set Args\n\ntax_units = \"kpcofgs\"\n\nfin = \"/home/fellows/projects1/calculus_microbiome/evolution/04-analysis/screening/metaphlan2/output/mp2_merged_abundance_table_all_20180628.txt\"\n\nxstart = 1\nxstop = 9999\nystart = 1\nystop = 9999\npercentile = 90 \ntop = 40\ntax_lev = 'g'\n\n\n## Load data and put into some object (array? Dataframe? Lists?)\nmat = [l.strip().split('\\t') for l in open( fin ) if l.strip()]\n\n## Extract Genus level\ni = tax_units.index(tax_lev) \nmat = [m for i,m in enumerate(mat) if i == 0 or m[0].split('|')[-1][0] == tax_lev or ( len(m[0].split('|')) == i and m[0].split('|')[-1][0].endswith(\"unclassified\"))]\n\nsample_labels=mat[0][xstart:xstop]\n\nm = [(mm[xstart-1],np.array([float(f) for f in mm[xstart:xstop]])) for mm in mat[ystart:ystop]]\n\n\n\n\n\n\n\n## Filter by percentile !!!!\n## sorted = sorts list of values\n## \t\tkey \"A function that would serve as a key or a basis of sort comparison\"\n## Lambdas are one line functions. They are also known as anonymous functions in some other languages\n## \t\tLambdas argument: manipulate(argument)\n## sort the results of reverse(?)stats.scoreatpercentile(matrix[1],90)\n\n## On each row (taxon), \n\nm_new = sorted(m,key=lambda x:-stats.scoreatpercentile(x[1],percentile))\n\n## iterate through x == iterate through each entry in M\n## apply scoreatpercentile on the iteration, and make minus\n## Sort by output of previous step, so 'largest' minus is smallest value so \n## comes first\n\n## For example\n\nfor i in range(len(m)):\n\t-stats.scoreatpercentile(m[i][1], percentile)\n\n## then sort these percentile scores of each taxa indepdent of sample\n\n\n## Grab the top forty taxa based on the percentile scores\nfeat_labels = [mm[0].split(\"|\")[-1] for mm in m_new[:top]]\n\n## Filter matrix by these fourty\nm_final = [mm[1] for mm in m_new[:top]]\n \nD = np.matrix( np.array( m_final ) )\n\n#return D, feat_labels, sample_labels\n\n\n\n## Convert to dataframe with pandas\ndf = DataFrame.from_records(m)\ndf.columns = [sample_labels]\n\n\n## Write files\n","sub_path":"00-documentation.backup/99-MetaPhlAn2_Heatmap_Subsetting_Investigation.py","file_name":"99-MetaPhlAn2_Heatmap_Subsetting_Investigation.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135236332","text":"import cv2\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--img', required=True)\nparser.add_argument('--result', required=True)\nparser.add_argument('--expected', required=True)\n\nargs = parser.parse_args()\n\nimg = cv2.imread(args.img)\n\nres = open(args.result, 'r')\nexp = open(args.expected, 'r')\n\nres_rect = []\nfor line in res:\n nums = line.split(' ')\n res_rect.append(nums)\n\nexp_rect = []\nfor line in exp:\n nums = line.split(' ')\n exp_rect.append(nums)\n\nfor coords in res_rect:\n x = int(coords[0])\n y = int(coords[1])\n w = int(coords[2])\n h = int(coords[3])\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 1)\n\nfor coords in exp_rect:\n y = int(coords[0])\n x = int(coords[1])\n w = int(coords[2])\n h = int(coords[3])\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)\n\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"python/validation/draw_rectangles.py","file_name":"draw_rectangles.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446685279","text":"import os\nimport tkinter as tk\nfrom tkinter.filedialog import askdirectory\n\n'''\nWith minor adjustments done to two existing packages of Google Photos Takeout, I'm somewhat confident that this\nprogram will work at moving all image and video files out of their individually dated folders. A lot of \ndirectories store .json files for configuration things I suppose, so I try to isolate those files from \nmy exclusive search. \n\nTo use this project, you will need a few things:\n\n1. A Google account with Photos uploaded to photos.google.com\n2. A directory where you want to move your downloaded photos to\n\nTo begin, start by visiting takeout.google.com\n\nNext, log into your account, then choose the data you would like to export.\nSince this program is mostly catered towards a specific set of data, your photos,\nI would advice using this program on one particular set of data as all files moved\nare simply put into one directory.\n\nClick \"Next step\", and choose your frequency. I would choose \"Export once\".\n\nClick \"Create export\". Depending on how large your export is, you may get an email instead\nof an instant download link. \n\nIf prompted, click \"Download\" next to your export. You should now have a zip file with\nthe name \"takeout-(exportID)-(exportSegmentNumber).zip\"\n\nUnzip the file, and now you should have a \"Takeout\" folder.\n\nNow, using this program, you can start moving photos into a \nseparate directory.\n\nBe sure to always choose the \"Google Photos\" directory that will be found after unzipping\nyour data.\n'''\n\n\n\n\ndef mainScreen():\n\n # Functions \n def get_main_directory():\n directory = askdirectory()\n main_entry.insert(0, directory)\n\n def get_storage_directory():\n directory = askdirectory()\n storage_entry.insert(0, directory)\n\n def files_moved_message():\n tk.Label(\n window,\n text = \"Moving Files\"\n ).grid(row = 3, column = 1, padx = 5)\n\n window = tk.Tk()\n window.title(\"Takeout File Mover\")\n\n # Open Main Directory\n open_main = tk.Button(\n window,\n text = \"Open Google Photos directory...\",\n #padx = 10,\n command = get_main_directory\n )\n main_entry = tk.Entry(\n window,\n width = 50\n )\n\n # Open Storage Directory\n open_storage = tk.Button(\n window,\n text = \"Open storage directory...\",\n command = get_storage_directory\n )\n storage_entry = tk.Entry(\n window,\n width = 50\n )\n\n # Move Files\n move_files = tk.Button(\n window,\n text = \"Move Files\",\n padx = 20,\n pady = 20,\n command = lambda: [moveFiles(main_entry.get(), storage_entry.get()), files_moved_message()]\n )\n\n # Organizing Items\n open_main.grid(row= 0, column = 0, sticky = \"ew\", padx = 5)\n main_entry.grid(row= 0, column = 1, sticky = \"e\", padx = 5)\n open_storage.grid(row= 1, column = 0, sticky = \"ew\", padx = 5)\n storage_entry.grid(row= 1, column = 1, sticky = \"e\", padx = 5)\n move_files.grid(row=3, column = 0, padx = 5)\n\n\n window.mainloop()\n\n\ndef moveFiles(main_directory, storage_directory):\n # Variables n stuff\n file_ext_avoid = [\".json\"] # File extensions you want to ignore\n curr_dir_files = [] # This variable stores current directory items, such as folders and text files\n dupe_count = 0 # In case you have two similarly named files, this will allow you to rename a duplicate name and still save whatever file it was\n\n os.chdir(main_directory)\n curr_dir_files = os.listdir()\n try:\n os.mkdir(storage_directory)\n except:\n # This just means the directory was already created\n pass\n for a_dir in curr_dir_files:\n os.chdir(main_directory + \"\\\\\" + a_dir)\n working_dir = os.listdir()\n \n for afile in working_dir:\n for extention in file_ext_avoid:\n if extention not in afile.lower():\n # Duplicate Image name stuff\n try:\n os.rename(afile, storage_directory + \"\\\\\" + afile)\n except:\n os.rename(afile, storage_directory + \"\\\\\" + \"dupe_of_\" + str(dupe_count) + afile)\n dupe_count += 1\n\ndef main():\n mainScreen()\n \n\nif __name__ == '__main__':\n main()","sub_path":"GUIs/takeout_file_mover.py","file_name":"takeout_file_mover.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"333425897","text":"#083.py\n#-*- coding:utf-8 -*-\n\nimport sys\nimport json\nfrom collections import defaultdict, Counter\n\ndef main(infile, outfile):\n\n\ttc = defaultdict(Counter)\n\tfor line in infile:\n\t\tt, c = line.rstrip(\"\\n\").split(\"\\t\")\n\t\ttc[t][c] += 1\n\n\t#N = len(c)\n\n\tfor k in tc.iterkeys():\n\t\tfor v, i in tc[k].iteritems():\n\t\t\toutfile.write(\"{}\\t{}\\t{}\\n\".format(k, v, i))\n\nif __name__ == \"__main__\":\n\tmain(sys.stdin, sys.stdout)\n","sub_path":"083.py","file_name":"083.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238359009","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nimport urllib2\nimport urlparse\n\nfrom base_app.classes.timer import Timer\n\n__author__ = 'Morteza'\n\n\nclass GetUrl:\n def __init__(self, url):\n self.url = url\n self.value = False\n self.time = 0\n self.read_url()\n\n @staticmethod\n def url_encode_none_ascii(__url):\n return re.sub('[\\x80-\\xFF]', lambda c: '%%%02x' % ord(c.group(0)), __url)\n\n def iri_to_uri(self, _enc):\n try:\n parts = urlparse.urlparse(self.url)\n return urlparse.urlunparse(\n part.encode('idna') if part_i == 1 else self.url_encode_none_ascii(part if _enc else part.encode('utf-8'))\n for part_i, part in enumerate(parts)\n )\n except:\n return False\n\n def clean_url(self):\n return self.url.replace(\"'\", \"\").replace('\"', '')\n\n def get_url(self, __url):\n try:\n response = urllib2.urlopen(__url)\n self.value = response.read()\n return True\n except:\n return False\n\n def read_url(self):\n try:\n t = Timer()\n self.clean_url()\n if not self.get_url(self.iri_to_uri(False)):\n if not self.get_url(self.iri_to_uri(True)):\n self.get_url(self.url)\n self.time = t.end()\n except:\n pass\n","sub_path":"base_app/classes/get_url.py","file_name":"get_url.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"194280277","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport pandas as pd\nimport numpy as np\nimport pprint\nfrom df2gspread import df2gspread as d2g\nfrom gspread_dataframe import get_as_dataframe, set_with_dataframe\nfrom itertools import repeat\npd.options.mode.chained_assignment = None\n\n\n# authentication with google spreadsheet\nscope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\njson = input('Note: json file and this python file must be in same folder or path.\\nEnter the name of json file: ')\ncreds = ServiceAccountCredentials.from_json_keyfile_name(json, scope)\nclient = gspread.authorize(creds)\n\n# opening first sheet from google sheet\ngoogle_spreadsheet = input('Enter the name of your google spreadsheet: ')\nsheet = client.open(google_spreadsheet)\n\n\n# opening a second sheet in google sheet \n# must be secondsheet in google sheet \nsheet1 = client.open(google_spreadsheet).get_worksheet(0)\ndata = sheet1.get_all_records()\n# sheet.add_worksheet(rows = 1000,cols = 100,title='found_integration')\nsheet2 = client.open(google_spreadsheet).get_worksheet(1)\n# sheet.add_worksheet(rows = 1000,cols = 100,title='not_found_ntegrations')\nsheet3 = client.open(google_spreadsheet).get_worksheet(2)\n# sheet.add_worksheet(rows = 1000,cols = 100,title='category_mapping')\nsheet4 = client.open(google_spreadsheet).get_worksheet(3)\npp = pprint.PrettyPrinter()\n\n# converting sheet with integrations to dataframe named AB_df\nAB_df = pd.DataFrame(data)\n# pp.pprint(AB_df)\n\n\n# coverting required sheet in df\nconverter_df = pd.read_csv('Updated Converter_7th Aug - Sheet1.csv')\nuniversal_df = pd.read_csv('Updated Cush Sheet as on 17 August - Updated Cush.csv')\n\n\n# creating column names for the integration\nIntegration_column = []\nno_of_column_not_containing_integration = int(input('Enter a number of column which is not containing Integrations: '))\nfor i in range(1,len(AB_df.columns)-(no_of_column_not_containing_integration-1)):\n Integration_column.append('Integration '+str(i))\n# pp.pprint(Integration_column)\n\n\nother_column = []\nfor i in range(no_of_column_not_containing_integration):\n col = input('Enter name of column '+ str(i+1)+' which are not containing integrations: ')\n other_column.append(col)\npp.pprint(other_column)\n\n# updating column name to AB_df\n# other_column = ['Title', 'Review URL', 'G2 Score', 'Description', 'Reviews', 'Comments']\n# new_cols = other_column + Integration_column\n# AB_df.columns = new_cols\n# pp.pprint(AB_df)\n\n\n# coverting only integration column of AB_df to dataframe\nIntegration_df = AB_df[Integration_column]\n# pp.pprint(Integration_df)\n\ndictionary = {'/':' ','\\s+':' ',r'^\\s+':'',r'\\s+$':''} # replace '/' with \" \", '\\s+' with \" \", r'^\\s+'->left strip,r'\\s+$'->Right strip\nIntegration_df.replace(dictionary,regex=True,inplace=True)\n\n# replacing null cells with space\nIntegration_df.replace('','zzz',inplace=True)\n\n\n# making a list of integration available in the AB_df\nIntegration_list = []\nfor i in range(len(Integration_df.index)):\n Integration_list += Integration_df.values[i].tolist()\n# pp.pprint(Integration_list)\n\n\n# making a new list for distinct integrations available in AB_df\nunique_integration_set = set(Integration_list)\nold_unique_integration_list = list(unique_integration_set)\n# pp.pprint(unique_integration_list)\n# print(len(unique_integration_list))\nintegration_list_lower = []\nfor i in range(len(old_unique_integration_list)):\n integration_list_lower.append(str(old_unique_integration_list[i]).lower().replace(' ',''))\n\nunique_integration_list_lower = list(set(integration_list_lower))\n# print(len(unique_integration_list_lower))\n\nunique_integration_dict = {}\nfor key in unique_integration_list_lower:\n for value in old_unique_integration_list:\n if key == str(value).lower().replace(' ',''):\n unique_integration_dict[key] = value \n del key\n break\n# print(len(unique_integration_dict))\n\nunique_integration_list = []\n\nfor value in unique_integration_dict.values():\n unique_integration_list.append(str(value))\n\n# pp.pprint(unique_integration_list)\n# print(len(unique_integration_list))\n\n\n# removiung nan from unique_integration_list\nunique_integration_list = [i for i in unique_integration_list if str(i)!= 'nan']\nunique_integration_list = [i for i in unique_integration_list if str(i)!= 'zzz']\n\nprint('unique integration list: '+ str(len(unique_integration_list)))\n\n# list Software name available in converter df\nSoftware_Name = converter_df['Software Name'].tolist()\n# pp.pprint(Software_Name)\n\n# list Actual spelling of Software name available in converter df\nActual_Spelling = converter_df['Actual Spelling'].tolist()\n# pp.pprint(Actual_Spelling)\n\n\n# making dictionary to map Software Name to their Actual Spelling\nconverter_dictionary = {}\nfor key in Software_Name:\n for value in Actual_Spelling:\n converter_dictionary[key] = value\n Actual_Spelling.remove(value)\n break\n# pp.pprint(converter_dictionary)\n\n\n# making list of Integration available in universal sheet\nuniversal_integration_list = universal_df['Software Name'].tolist()\nunique_universal_integration_list = list(set(universal_integration_list))\npp.pprint(len(sorted(universal_integration_list)))\n\n\n# making dictionary for unique universal integration list\nuniversal_integration_dictionary = {}\nfor key in unique_universal_integration_list:\n for value in unique_universal_integration_list:\n if key == value:\n universal_integration_dictionary[key] = value\n break\n# pp.pprint(universal_integration_dictionary)\n\n\n# merging universal integration dictionary and converter dictionary\nuniversal_integration_dictionary.update(converter_dictionary)\nprint('universal integration dictionary: '+str(len(universal_integration_dictionary)))\n\n\n\n\nfound_integration_dct = {}\n# step 1\n# In this step elements of unique_integration_list are searched in universal_integration_list keys\n# and if element of unique_integration_list found in universal_integration_dictionary\n# then this element is changed to its standard value using universal_integration_dictionary\n# values and updated in the AB_df\nfound_integration_list_in_step1 = []\nfound_integration_dct_in_step1 = {}\nnot_found_integration_in_step1 = []\nfor i in range(len(unique_integration_list)):\n for key,value in universal_integration_dictionary.items():\n if str(key).casefold() == str(unique_integration_list[i]).casefold():\n Integration_df.replace(to_replace=unique_integration_list[i], value=value,inplace=True)\n found_integration_list_in_step1.append(unique_integration_list[i])\n found_integration_dct_in_step1[key] = value\n break\n else:\n not_found_integration_in_step1.append(unique_integration_list[i])\nfound_integration_dct.update(found_integration_dct_in_step1)\n\nprint('Integration found in step1: ' + str(len(found_integration_list_in_step1)))\nprint('Integration not found in step1: '+str(len(not_found_integration_in_step1)))\nprint('found_integration_dct length after step 1: '+ str(len(found_integration_dct)))\n\n\n\n\n\n# step 2 \n# In this step space between integrations are removed and then convert all integration in lower case\n# then this integration names are searched in the dictionary by converting all dictionary key and value\n# in lower and replace space between integration name. one dictionary is made for the integrations which are found \n# which were not found in previous step named dct. After this search the not found integration in previous step and \n# search it in dct, and change it with value available in dct.\nfound_integration_dct_in_step2 = {}\nfound_integration_in_step2 = []\n\nfor i in range(len(not_found_integration_in_step1)):\n for key,value in universal_integration_dictionary.items():\n if str(not_found_integration_in_step1[i]).casefold().replace(' ','') == str(key).casefold().replace(' ',''):\n found_integration_dct_in_step2[not_found_integration_in_step1[i]] = value\n found_integration_in_step2.append(not_found_integration_in_step1[i])\n break\n\nnot_found_integration_in_step2 = list(set(not_found_integration_in_step1)-set(found_integration_in_step2))\nprint('not found integration after step 2: '+str(len(not_found_integration_in_step2)))\n\nfound_integration_dct.update(found_integration_dct_in_step2)\nprint('found integration dct after step 2: '+str(len(found_integration_dct)))\n# del found_integration_dct['zzz'] \n\n# pp.pprint(dct)\nprint('found integration in step 2 '+ str(len(found_integration_in_step2)))\n\n\n\nfor i in range(len(found_integration_in_step2)):\n for key,value in found_integration_dct_in_step2.items():\n if found_integration_in_step2[i].casefold().replace(' ','') == key.casefold().replace(' ',''):\n Integration_df.replace(to_replace=found_integration_in_step2[i],value=value, inplace=True)\n break\n\n\n\n\nother_column_df = AB_df[other_column]\nfinal_df = pd.concat([other_column_df,Integration_df],axis=1)\nfinal_df.to_csv('cleaned.csv',index=False)\n\nnot_found_integration = not_found_integration_in_step2\nsn2an_df = pd.DataFrame(sorted(list(found_integration_dct.items())), columns=['Software Name','Actual Name'])\nsn2an_notfound_df = pd.DataFrame(not_found_integration, columns=['Software Name'])\n\n\n\n# Software to its Category mapping\n\n\n# s2c = software to category list of lists\ns2c = [list(i) for i in universal_df.itertuples(index=False)] # converted softwarename list available in to universal sheet into list of lists\n\n\n# a new list made for found integration using software count dct\nfound_integration_list = []\nfor i in found_integration_dct.values():\n if i not in found_integration_list:\n found_integration_list.append(str(i).lower())\n\nfound_integration_list = [i for i in found_integration_list if str(i)!= 'zzz'] # remove 'zzz' if there any\nprint('found_integration_list length: '+str(len(found_integration_list)))\n\n\n\n\n# found integration list will convert into list of list\nfound_integration_lol = [[i] for i in found_integration_list] # found_integration_list is converted into list of list\npp.pprint(sorted(found_integration_lol))\nprint(len(found_integration_lol))\n\n\n\n# this code will map software name to its all available category using the Universal sheet(The Cush sheet)\nfor i in range(len(found_integration_lol)):\n for j in range(len(s2c)):\n if str(found_integration_lol[i][0]).casefold().replace(' ','') == str(s2c[j][0]).casefold().replace(' ',''):\n found_integration_lol[i].append(s2c[j][1])\n\n \npp.pprint(found_integration_lol)\n\ns2c_df = pd.DataFrame(sorted(found_integration_lol),columns=None) # convert mapped list(found_integration_lol) into dataframe\n# s2c_df.to_csv('s2c.csv')\n\n\n# s2c_df = pd.DataFrame(s2c_df, columns=None)\n\n\n\n# updating data to google sheet\nset_with_dataframe(sheet1,final_df)\nset_with_dataframe(sheet2,sn2an_df)\nset_with_dataframe(sheet3,sn2an_notfound_df)\nset_with_dataframe(sheet4,s2c_df)\n\n\n\n\n# <------------------------Cleaning and Mapping Part is done------------------------------------->\n \n\n# # sheet.add_worksheet(rows=500,cols=500,title='Final Integrals')\n# sheet5 = client.open(google_spreadsheet).get_worksheet(4)\n\n# # sname_df is a dataframe of software name available in raw sheet\n# sname_df = AB_df['Title']\n\n\n# final_integral_df = pd.concat([sname_df,Integration_df],axis=1) # this dataframe will contain only sofware name and its integrations\n# final_integral_df.replace('zzz','',inplace=True) # it replaces zzz with null\n# final_integral_df.replace('',np.NaN,inplace=True) # it replaces null cells with NaN\n\n\n\n# Integration_df.replace('zzz',np.NaN,inplace=True) # replace 'zzz' with null\n# integration_lol = Integration_df.to_numpy().tolist() # this will give list of list of row wise integrations\n# pp.pprint(integration_lol)\n\n\n# cat = [] # category list\n# cat_lol = [] # category list of list row wise\n\n\n\n# # in this category according to integration name is given in output list cat which stands for category\n# # this category list will converted into list of list and appended it cat_lol which stands for \n# # category list of list\n# for i in range(len(integration_lol)):\n# cat.clear()\n# for j in range(len(integration_lol[i])):\n# for k in range(len(s2c)):\n# if integration_lol[i][j] == s2c[k][0]:\n# cat.append(s2c[k][1])\n \n# cat = [cat]\n# cat_lol += cat\n\n# category_df = pd.DataFrame(cat_lol) # cat_lol is converted to dataframe\n\n\n# # category_column list contain new column names for category_df\n# category_column = []\n# for i in range(1,len(category_df.columns)+1):\n# category_column.append('Category '+str(i))\n\n# category_df.columns = category_column\n# new_final_integral_df = pd.concat([final_integral_df,category_df],axis=1) #final_integral_df and category_df is now joined\n\n# new_final_integral_df['Integration_count'] = final_integral_df[Integration_column].count(axis=1) # counts the total number of integration available in Integration in each row\n\n# new_final_integral_df['category_count'] = new_final_integral_df[category_column].count(axis=1) #counts the total number of categories available in according to integration in each row \n\n# # gives number of unique integration categories available in each row\n# unique_category_list = []\n# for i in range(len(cat_lol)):\n# unique_category_list.append(len(list(set(cat_lol[i]))))\n\n# unique_category_df = pd.DataFrame(unique_category_list) # for conviniency unique_category_list is converted to dataframe so we can join it to final_integral_df\n# new_final_integral_df['unique_category_count'] = unique_category_df # new column added to show unique category available in each row\n\n\n# # moving integration_count, category_count, unique_category_count beside the software name \n# df = new_final_integral_df['Integration_count']\n# new_final_integral_df.drop(labels=['Integration_count'],axis=1, inplace=True)\n# new_final_integral_df.insert(1,'Integration_count',df)\n\n# df = new_final_integral_df['unique_category_count']\n# new_final_integral_df.drop(labels=['unique_category_count'],axis=1,inplace=True)\n# new_final_integral_df.insert(2,'unique_category_count',df)\n\n# df = new_final_integral_df['category_count']\n# new_final_integral_df.drop(labels='category_count',axis=1,inplace=True)\n# new_final_integral_df.insert(3,'category_count',df)\n\n\n# new_final_integral_df.sort_values(by='Integration_count',ascending=False,inplace=True) # sorting data according to the descending order of Integration count\n\n\n# set_with_dataframe(sheet5,new_final_integral_df)\n\n# # <---------unique integration count-category count and unique category count completed ----------------->\n\n\n\n\n\n# # sheet.add_worksheet(rows = 3000,cols=1000, title='L1-L2 Overall data') \n# sheet6 = client.open(google_spreadsheet).get_worksheet(5)\n\n\n\n# L1_L2_cat_list = category_df.values.tolist()\n\n# L1_L2_cat_list = [[str(L1_L2_cat_list[i][j]) for j in range(len(L1_L2_cat_list[i])) if str(L1_L2_cat_list[i][j]) != 'None']for i in range(len(L1_L2_cat_list))]\n\n# L1_L2_cat_lol = [[[str(L1_L2_cat_list[i][j])]for j in range(len(L1_L2_cat_list[i]))]for i in range(len(L1_L2_cat_list))]\n\n# for i in range(len(L1_L2_cat_lol)):\n# for j in range(len(L1_L2_cat_lol[i])):\n# for k in range(len(L1_L2_cat_lol[i][j])):\n# L1_L2_cat_lol[i][j].append(L1_L2_cat_lol[i][j][k].split(' - ',1))\n# for l in range(len(L1_L2_cat_lol[i][j][1])):\n# L1_L2_cat_lol[i][j].append(L1_L2_cat_lol[i][j][1][l])\n\n\n# new_L1_L2_cat = []\n# for i in range(len(L1_L2_cat_lol)):\n# for j in range(len(L1_L2_cat_lol[i])):\n# new_L1_L2_cat.append(L1_L2_cat_lol[i][j])\n\n# L1_L2_overall_df = pd.DataFrame(new_L1_L2_cat)\n\n# L1_L2_overall_df = L1_L2_overall_df.drop(1,axis=1)\n\n# L1_L2_overall_df.columns = ['L1-L2 overall','L1','L2']\n\n# set_with_dataframe(sheet6,L1_L2_overall_df)\n\n\n# # <------------L1-L2 categories are separated and updated in L1-L2 overall sheet------------>\n\n# # sheet.add_worksheet(rows = 1000,cols=100, title='Pivot Table')\n# sheet7 = client.open(google_spreadsheet).get_worksheet(6)\n\n# # counting the occurance of every integration of category L1\n\n# L1_count_df = L1_L2_split_df.groupby(['L1'],as_index=False).count()\n# L1_count_cols = ['L1','COUNTA_OF_L1']\n# L1_count_df.columns = L1_count_cols\n# L1_count_df = L1_count_df.sort_values(by='COUNTA_OF_L1',ascending=False,ignore_index=True)\n\n\n# # counting the occurance of L2 based on L1 \n# L2_count_df = L1_L2_split_df.groupby(['L1','L2'],as_index=False).size()\n# L2_count_cols = ['L1','L2','COUNTA_OF_L2']\n# L2_count_df.columns = L2_count_cols\n# L2_count_df = L2_count_df.sort_values(['L1','COUNTA_OF_L2'],ascending=[True,False],ignore_index=True)\n\n# Pivot_Table_df = pd.concat([L1_count_df,L2_count_df],axis=1)\n\n# set_with_dataframe(sheet7,Pivot_Table_df)\n\n# #<----------------------------------Pivot table completed--------------------------------------------> \n\n# # sheet.add_worksheet(rows=500,cols=100,title='Functional Analysis')\n# sheet8 = client.open(google_spreadsheet).worksheet('Functional Analysis')\n\n\n# L1_count_list = L1_count_df.values.tolist() # gives list of list of software name and its number of occurance\n# # pp.pprint(L1_count_list)\n\n# L1_graph_lol = []\n# others_lol = []\n\n\n# # if length of count list is greater or equal to 20 then top 9 softwares are added as it is with their \n# # count and other software are added in 'others' and total count of 'others' is summed up and added as\n# # 'others' count\n# # if length of countlist is less then 20 then top half of them which contains more no of occurence\n# # are added as it is with thier count and remaining half is added to 'others'\n# if len(L1_count_list)>=20: \n# for i in range(len(L1_count_list)):\n# if i<9:\n# L1_graph_lol.append(L1_count_list[i])\n# else:\n# others_lol.append(L1_count_list[i])\n# else:\n# for i in range(int(len(L1_count_list)/2)):\n# L1_graph_lol.append(L1_count_list[i])\n# for j in range(int(len(L1_count_list)/2),len(L1_count_list)+1):\n# others_lol.append(L1_count_list[j])\n\n\n# others = ['other']\n# sum = 0\n# for i in range(len(others_lol)):\n# sum += others_lol[i][1]\n# others.append(sum)\n\n# L1_graph_lol.append(others)\n\n# L1_graph_df = pd.DataFrame(L1_graph_lol,columns=['Software Name','COUNT'])\n\n# set_with_dataframe(sheet8,L1_graph_df)\n\n\n# # <-------------------------------Functional Analysis Completed------------------------------------->\n\n\n# # select top 4 software name which has highest no. of occurence\n# top_4_soft = [] \n# for i in range(4):\n# top_4_soft.append(L1_count_list[i][0])\n\n# L2_count_list = L2_count_df.values.tolist() # coverts L2_count_df into list of list\n\n# # for first software which has highest no. of occurence\n# # \n# try:\n# sheet.add_worksheet(rows=500,cols=100,title=top_4_soft[0])\n# except:\n# sheet9 = client.open(google_spreadsheet).worksheet(top_4_soft[0])\n\n# sheet9 = client.open(google_spreadsheet).worksheet(top_4_soft[0])\n\n\n\n# first_soft = []\n# for i in range(len(L2_count_list)):\n# if top_4_soft[0] == L2_count_list[i][0]:\n# first_soft.append(L2_count_list[i])\n\n# # it will give list of list of L1, L2 and occurence of L2. we need only L2 and its occurence\n\n# for i in range(len(first_soft)):\n# del first_soft[i][0]\n\n# # it will remove L1 from first soft list of list\n\n# first_soft_graph = [] #software names which are included in graph\n# first_soft_others = [] # software which are not in first_soft graph are added in this list\n\n# # if length of first soft is greater or equal to 20 then top 9 softwares are added as it is with their \n# # count and other software are added in first_soft_others and total count of first_soft_others\n# # is summed up and added as 'others' count\n# # if length of first_soft is less then 20 then top half of them which contains more no of occurence\n# # are added as it is with thier count and remaining half is added to 'first_soft_others'\n# if len(first_soft)>=20: \n# for i in range(len(first_soft)):\n# if i<9:\n# first_soft_graph.append(first_soft[i])\n# else:\n# first_soft_others.append(first_soft[i])\n# else:\n# for i in range(int(len(first_soft)/2)):\n# first_soft_graph.append(first_soft[i])\n# for j in range(int(len(first_soft)/2),len(first_soft)):\n# first_soft_others.append(first_soft[j])\n\n\n# # counting the occurence of software available in first_soft_others\n# others = ['other']\n# add = 0\n# for i in range(len(first_soft_others)):\n# add = add + first_soft_others[i][1]\n# others.append(add)\n\n# first_soft_graph.append(others)\n\n\n# first_soft_df = pd.DataFrame(first_soft,columns=['L1','COUNT'])\n# first_soft_graph_df = pd.DataFrame(first_soft_graph,columns=['L1','COUNT'])\n\n# first_soft_final_df = pd.concat([first_soft_df,first_soft_graph_df],axis=1)\n\n# set_with_dataframe(sheet9,first_soft_final_df)\n\n\n# # <---------------------------Highest no. of L2 occurence graph details completed-------------------------------->\n# try:\n# sheet.add_worksheet(rows=500,cols=100,title=top_4_soft[1])\n# except:\n# sheet10 = client.open(google_spreadsheet).worksheet(top_4_soft[1])\n\n# sheet10 = client.open(google_spreadsheet).worksheet(top_4_soft[1])\n\n\n# second_soft = []\n# for i in range(len(L2_count_list)):\n# if top_4_soft[1] == L2_count_list[i][0]:\n# second_soft.append(L2_count_list[i])\n\n# # it will give list of list of L1, L2 and occurence of L2. we need only L2 and its occurence\n\n# for i in range(len(second_soft)):\n# del second_soft[i][0]\n\n# # it will remove L1 from first soft list of list\n\n# second_soft_graph = [] #software names which are included in graph\n# second_soft_others = [] # software which are not in second_soft_graph are added in this list\n\n\n# if len(second_soft)>=20: \n# for i in range(len(second_soft)):\n# if i<9:\n# second_soft_graph.append(second_soft[i])\n# else:\n# second_soft_others.append(second_soft[i])\n# else:\n# for i in range(int(len(second_soft)/2)):\n# second_soft_graph.append(second_soft[i])\n# for j in range(int(len(second_soft)/2),len(second_soft)):\n# second_soft_others.append(second_soft[j])\n\n\n\n# others = ['other']\n# add = 0\n# for i in range(len(second_soft_others)):\n# add = add + second_soft_others[i][1]\n# others.append(add)\n\n# second_soft_graph.append(others)\n\n\n# second_soft_df = pd.DataFrame(second_soft,columns=['L1','COUNT']) \n# second_soft_graph_df = pd.DataFrame(second_soft_graph,columns=['L1','COUNT'])\n\n# second_soft_final_df = pd.concat([second_soft_df,second_soft_graph_df],axis=1)\n\n# set_with_dataframe(sheet10,second_soft_final_df)\n\n# # <--------------------------Second Highest no. of L2 occurence graph details completed-------------------------------->\n# try:\n# sheet.add_worksheet(rows=500,cols=100,title=top_4_soft[2])\n# except:\n# sheet11 = client.open(google_spreadsheet).worksheet(top_4_soft[2])\n\n# sheet11 = client.open(google_spreadsheet).worksheet(top_4_soft[2])\n\n\n# third_soft = []\n# for i in range(len(L2_count_list)):\n# if top_4_soft[2] == L2_count_list[i][0]:\n# third_soft.append(L2_count_list[i])\n\n# # it will give list of list of L1, L2 and occurence of L2. we need only L2 and its occurence\n\n# for i in range(len(third_soft)):\n# del third_soft[i][0]\n\n# # it will remove L1 from first soft list of list\n\n# third_soft_graph = [] #software names which are included in graph\n# third_soft_others = [] # software which are not in second_soft_graph are added in this list\n\n# if len(third_soft)>=20: \n# for i in range(len(third_soft)):\n# if i<9:\n# third_soft_graph.append(third_soft[i])\n# else:\n# third_soft_others.append(third_soft[i])\n# else:\n# for i in range(int(len(third_soft)/2)):\n# third_soft_graph.append(third_soft[i])\n# for j in range(int(len(third_soft)/2),len(third_soft)):\n# third_soft_others.append(third_soft[j])\n\n\n\n# others = ['other']\n# add = 0\n# for i in range(len(third_soft_others)):\n# add = add + third_soft_others[i][1]\n# others.append(add)\n\n# third_soft_graph.append(others)\n\n\n# third_soft_df = pd.DataFrame(third_soft,columns=['L1','COUNT']) \n# third_soft_graph_df = pd.DataFrame(third_soft_graph,columns=['L1','COUNT'])\n\n# third_soft_final_df = pd.concat([third_soft_df,third_soft_graph_df],axis=1)\n\n# set_with_dataframe(sheet11,third_soft_final_df)\n\n\n\n# # <--------------------------Third Highest no. of L2 occurence graph details completed-------------------------------->\n# try:\n# sheet.add_worksheet(rows=500,cols=100,title=top_4_soft[3])\n# except: \n# sheet12 = client.open(google_spreadsheet).worksheet(top_4_soft[3])\n\n# sheet12 = client.open(google_spreadsheet).worksheet(top_4_soft[3])\n\n# fourth_soft = []\n# for i in range(len(L2_count_list)):\n# if top_4_soft[3] == L2_count_list[i][0]:\n# fourth_soft.append(L2_count_list[i])\n\n# # it will give list of list of L1, L2 and occurence of L2. we need only L2 and its occurence\n\n# for i in range(len(fourth_soft)):\n# del fourth_soft[i][0]\n\n# # it will remove L1 from first soft list of list\n\n# fourth_soft_graph = [] #software names which are included in graph\n# fourth_soft_others = [] # software which are not in second_soft_graph are added in this list\n\n# if len(fourth_soft)>=20: \n# for i in range(len(fourth_soft)):\n# if i<9:\n# fourth_soft_graph.append(fourth_soft[i])\n# else:\n# fourth_soft_others.append(fourth_soft[i])\n# else:\n# for i in range(int(len(fourth_soft)/2)):\n# fourth_soft_graph.append(fourth_soft[i])\n# for j in range(int(len(fourth_soft)/2),len(fourth_soft)):\n# fourth_soft_others.append(fourth_soft[j])\n\n\n\n# others = ['other']\n# add = 0\n# for i in range(len(fourth_soft_others)):\n# add = add + fourth_soft_others[i][1]\n# others.append(add)\n\n# fourth_soft_graph.append(others)\n\n\n# fourth_soft_df = pd.DataFrame(fourth_soft,columns=['L1','COUNT']) \n# fourth_soft_graph_df = pd.DataFrame(fourth_soft_graph,columns=['L1','COUNT'])\n\n# fourth_soft_final_df = pd.concat([fourth_soft_df,fourth_soft_graph_df],axis=1)\n\n# set_with_dataframe(sheet12,fourth_soft_final_df)\n\n# # <--------------------------Fourth Highest no. of L2 occurence graph details completed-------------------------------->\n\n\n# try:\n# sheet.add_worksheet(rows=500,cols=100,title='integrals max')\n# except:\n# sheet13 = client.open(google_spreadsheet).worksheet('integrals max')\n\n# sheet13 = client.open(google_spreadsheet).worksheet('integrals max')\n\n# lol_Integration = Integration_df.values.tolist() # give list of list available in integration column (row wise list)\n# list_Integration = [] \n\n# for i in range(len(lol_Integration)):\n# for j in range(len(lol_Integration[i])):\n# list_Integration.append(lol_Integration[i][j])\n\n# # now list_Integration contains list of integration available in Integration column\n\n# Integration_dct = {}\n\n# for i in list_Integration:\n# Integration_dct[i] = Integration_dct.get(i,0)+1\n# # now Integration dct will contain software name as key and its occurence in list as value\n\n# if 'zzz' in Integration_dct:\n# del Integration_dct['zzz'] # removing zzz key from dictionary\n\n# integrals_max_df = pd.DataFrame(Integration_dct.items(),columns=['Software','SUM of Integration Count'])\n\n# integrals_max_df = integrals_max_df.sort_values(by='SUM of Integration Count', ascending=False,ignore_index=True)\n\n# integrals_max_df.dropna(inplace=True)\n\n# Most_Popular_Integrations = integrals_max_df.iloc[0:11]\n# Most_Popular_Integrations[''] = 'Most Popular Integrations'\n\n# integrals_max_df = pd.concat([integrals_max_df,Most_Popular_Integrations],axis=1)\n\n# df = integrals_max_df['']\n# integrals_max_df.drop('',axis=1,inplace=True)\n# integrals_max_df.insert(3,'',df)\n\n\n# set_with_dataframe(sheet13,integrals_max_df)\n\n# # <----------------------------Integrals max sheet completed------------------------------->\n\n\n# try:\n# sheet.add_worksheet(rows=500,cols=100,title='Top SW')\n# except:\n# sheet14 = client.open(google_spreadsheet).worksheet('Top SW')\n\n# sheet14 = client.open(google_spreadsheet).worksheet('Top SW')\n\n# Top_sw_df = new_final_integral_df.iloc[:,[0,1]]\n# Top_sw_df = Top_sw_df.sort_values(by='Integration_count',ascending=False,ignore_index=True)\n\n# Best_Integrated_companies = Top_sw_df.iloc[0:11]\n# Best_Integrated_companies[''] = 'Best Integrated companies'\n# Top_sw_df = pd.concat([Top_sw_df,Best_Integrated_companies],axis=1)\n# df = Top_sw_df['']\n# Top_sw_df.drop('',axis=1,inplace=True)\n# Top_sw_df.insert(3,'',df)\n\n# set_with_dataframe(sheet14,Top_sw_df)\n\n\n# # <---------------------------------Top SW completed----------------------------------------->\n\n\n# try:\n# sheet.add_worksheet(rows=1000,cols=100,title='Functional Area Leader')\n# except:\n# sheet15 = client.open(google_spreadsheet).worksheet('Functional Area Leader')\n\n# sheet15 = client.open(google_spreadsheet).worksheet('Functional Area Leader')\n\n# # Integration_list is availble which contains Integrations in Integration_df\n# # in form of [[first row],[second row],...]\n# # we need to make it one list of all integrations\n# Integrations = Integration_list # contains Integrations\n# for i in range(len(Integration_list)):\n# for j in range(len(Integration_list[i])):\n# Integrations.append(Integration_list[i][j])\n\n# # Integrations contains 'zzz', we need to remove it.\n# while 'zzz' in Integrations:\n# Integrations.remove('zzz')\n\n# # we need to assign category to each integrations, so making integrations list of list\n# Integrations_lol = [[i] for i in Integrations]\n\n\n# # we need software and category list\n# s2c = [list(i) for i in universal_df.itertuples(index=False)]\n\n# # now we need to map category(L1-L2) to particular softwares\n# for i in range(len(Integrations_lol)):\n# for j in range(len(s2c)):\n# if str(Integrations_lol[i][0]).lower().replace(' ','') == str(s2c[j][0]).lower().replace(' ',''):\n# Integrations_lol[i].append(s2c[j][1])\n\n# s2c_list = Integrations_lol # for meaningful naming convention\n\n# # splitting L1 and L2 category, we only need L1 \n# s2c_list = [[str(s2c_list[i][j]).split(' - ',1) for j in range(len(s2c_list[i]))]for i in range(len(s2c_list))]\n# # this gives software name and its category splitted in L1 and L2 e.g [[[soft_name],[L1,L2]],[[soft_name][L1,L2]]]\n\n\n# # removing L2 category\n# for i in range(len(s2c_list)):\n# for j in range(len(s2c_list[i])):\n# if len(s2c_list[i][j])>1:\n# s2c_list[i][j].remove(s2c_list[i][j][1])\n\n\n\n# # we need to count occurence of L1 category which contain 'nan' or 'Error' or 'Error -'\n# L1_dct = {}\n# for i in range(len(s2c_list)):\n# for j in range(1,len(s2c_list[i])):\n# L1_dct[s2c_list[i][j][0]] = L1_dct.get(s2c_list[i][j][0],0)+1\n\n# L1_occurence_lot = [(k,v) for k,v in L1_dct.items()] # This will convert dictionary into list of tuples\n\n# # we need to remove 'nan' or 'Error' or 'Error -'\n\n# for i in L1_occurence_lot:\n# if i[0] == 'nan':\n# L1_occurence_lot.remove(i)\n# if i[0] == 'Error':\n# L1_occurence_lot.remove(i)\n# if i[0] == 'Error -':\n# L1_occurence_lot.remove(i)\n\n# # convert list of tuples to dataframe\n# L1_occurence_df = pd.DataFrame(L1_occurence_lot,columns=['L1','count'])\n# L1_occurence_df.sort_values(by='count',ascending=False,ignore_index=True,inplace=True)\n# # L1_occurence_df.drop('nan',inplace=True)\n\n# # L1_lol contains L1 categories in the form of list of list\n# L1_lol = []\n# for i in L1_dct:\n# L1_lol.append([i])\n\n# L1 = L1_lol # future purpose\n\n# # now we need to append categories to software\n# for i in range(len(L1_lol)):\n# for j in range(len(s2c_list)):\n# for k in range(1,len(s2c_list[j])):\n# if L1_lol[i][0] == s2c_list[j][k][0]:\n# L1_lol[i].append(s2c_list[j][0][0])\n\n\n# # we need to count the particular software occurence in particular category\n# List_software_occurence = []\n# temp_dct = {}\n# for i in range(len(L1_lol)):\n# for j in range(len(L1_lol[i])):\n# temp_dct[L1_lol[i][j]] = temp_dct.get(L1_lol[i][j],0)+1\n# List_software_occurence.append(dict(temp_dct))\n# temp_dct.clear()\n\n# # sorting the software occurence in descending order\n# int_occurence_lot = []\n# for i in range(len(List_software_occurence)):\n# int_occurence_lot.append(sorted([(k,v) for k,v in List_software_occurence[i].items()],key = lambda x:x[1],reverse=True))\n\n# # making pair of L1 cat with software of that category and occurence of software\n# for i in range(len(L1)):\n# for j in range(len(int_occurence_lot)):\n# for k in range(len(int_occurence_lot[j])):\n# if L1[i][0] == int_occurence_lot[j][k][0]:\n# L1[i].append(int_occurence_lot[j])\n\n\n# for i in range(len(L1)):\n# while (len(L1[i]))!=2:\n# for k,j in enumerate(L1[i]): \n# if k == 0:\n# continue\n# elif k == len(L1[i])-1:\n# continue\n# else:\n# L1[i].remove(j)\n\n# for i in range(len(L1)):\n# for j in range(1,len(L1[i])):\n# for k in L1[i][j]:\n# L1[i].append(k)\n# L1[i][1].clear()\n\n\n# for i in L1:\n# if i[0] == 'Error' or i[0] =='Error -':\n# L1.remove(i)\n\n# for i in range(len(L1)):\n# for j in L1[i]:\n# if j==[]:\n# L1[i].remove(j)\n\n\n# L1_int_occurence_df = pd.DataFrame(L1)\n\n# L1_int_occurence_df.drop(1,axis=1,inplace=True)\n# L1_int_occurence_df.rename({0: 'Fun', 1: '(Int,count)'}, axis='columns',inplace=True)\n\n\n# Functional_area_leader_df = pd.concat([L1_occurence_df,L1_int_occurence_df],axis=1)\n# set_with_dataframe(sheet15,Functional_area_leader_df)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# # # remove L2 category from s2c_list\n# # for i in range(len(s2c_list)):\n# # for j in range(len(s2c_list[i])):\n# # if len(s2c_list[i][j])>1:\n# # s2c_list[i][j].remove(s2c_list[i][j][1])\n\n# # # creating lol of L1 category list\n# # L1_lol = []\n# # for i in L1_dct:\n# # L1_lol.append([i])\n\n# # for i in L1_lol:\n# # if i[0] == 'nan':\n# # L1_lol.remove(i)\n# # if i[0] == 'Error':\n# # L1_lol.remove(i)\n# # if i[0] == 'Error -':\n# # L1_lol.remove(i)\n\n\n\n# # L1 = L1_lol #for future purpose\n# # # adding integration beside category according to category\n# # # e.g. [['L1','all int of L1 category'],['L1','all int of L1 category']....]\n# # for i in range(len(L1_lol)):\n# # for j in range(len(s2c_list)):\n# # for k in range(1,len(s2c_list[j])):\n# # if L1_lol[i][0] == s2c_list[j][k][0]:\n# # L1_lol[i].append(s2c_list[j][0][0])\n\n\n# # # it counts the occurence of each integration and make a list named List_software_occurence\n# # List_software_occurence = []\n# # temp_dct = {}\n# # for i in range(len(L1_lol)):\n# # for j in range(len(L1_lol[i])):\n# # temp_dct[L1_lol[i][j]] = temp_dct.get(L1_lol[i][j],0)+1\n# # List_software_occurence.append(dict(temp_dct))\n# # temp_dct.clear()\n\n# # # it converts dictionary of occurence of integrations convert it into tuple and \n# # # sort it in descending order\n# # int_occurence_lot = []\n# # for i in range(len(List_software_occurence)):\n# # int_occurence_lot.append(sorted([(k,v) for k,v in List_software_occurence[i].items()],key = lambda x:x[1],reverse=True))\n\n\n# # # it joins integration corresponding to L1 category available in L1_list\n# # # e.g. [[L1,[(integration name corresponding to L1 category,occurence )]],...]\n# # for i in range(len(L1)):\n# # for j in range(len(int_occurence_lot)):\n# # for k in range(len(int_occurence_lot[j])):\n# # if L1[i][0] == int_occurence_lot[j][k][0]:\n# # L1[i].append(int_occurence_lot[j])\n\n# # # pp.pprint(L1_list) \n# # for i in range(len(L1)):\n# # while (len(L1[i]))!=2:\n# # for k,j in enumerate(L1[i]): \n# # if k == 0:\n# # continue\n# # elif k == len(L1[i])-1:\n# # continue\n# # else:\n# # L1[i].remove(j)\n\n\n\n# # # we can not have [[L1,[(integration name corresponding to L1 category,occurence )]],...] format\n# # # so we append tuple (integration name corresponding to L1 category,occurence ) with L1\n# # # like [['Development Software',[],[],('jira', 6),('airtable', 4),('zapier', 4)] \n# # for i in range(len(L1)):\n# # for j in range(1,len(L1[i])):\n# # for k in L1[i][j]:\n# # L1[i].append(k)\n# # L1[i][1].clear()\n\n\n# # for i in L1:\n# # if i[0] == 'nan':\n# # L1.remove(i)\n# # if i[0] == 'Error':\n# # L1.remove(i)\n# # if i[0] == 'Error -':\n# # L1.remove(i)\n\n\n\n\n\n\n","sub_path":"cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":36878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66012044","text":"from __future__ import print_function\nimport ncs\nimport sys\nimport time\n\ndef create_service_interface_on_device(hostname, interface_name):\n with ncs.maapi.single_write_trans('admin', 'python', ['ncsadmin']) as trans:\n print(hostname, interface_name)\n root = ncs.maagic.get_root(trans)\n service_name = input(\"Enter the service name\")\n print(service_name)\n service_entry = root.services.project01.create(service_name)\n service_entry.device = hostname\n\n print(\"Creating service Interface: \" + service_name)\n #tun = service_entry.ios.tunnels.create(str(tun_id))\n #tun.tunnel_id = int(tun_id)\n \n Int = service_entry.ios.interfaces.create(str(interface_name))\n #Int.interface_name = str(interface_name)\n #tun.tunnel_id = int(tun_id)\n \n \"\"\"\n \n # Dump commit dry run output\n outformat = 'native'\n dryRun = root.services.commit_dry_run\n dryInput = dryRun.get_input()\n dryInput.outformat = outformat\n dryOutput = dryRun(dryInput)\n dryrun_output = \"../Output/\" + \"DryRun.txt\"\n for d in dryOutput.native.device:\n dryrun_output = dryrun_output + \"\\n\" + d.name + \"\\n\" + d.data.lstrip()\n \"\"\"\n # Commit\n trans.apply()\n # Return dryrun_output\n #return dryrun_output\n\ndef getValues ():\n enter_interface_name = input(\"Enter the interface name:\")\n enter_hostname = input(\"Enter the hostname name:\")\n create_service_interface_on_device(enter_hostname,enter_interface_name)\n\n\nif __name__ == \"__main__\":\n #main()\n getValues()\n \"\"\"\n current_date_and_time = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n input_file = sys.argv[1]\n dryrun_file = \"../Output/\" + current_date_and_time + \"_DryRun.txt\"\n print(dryrun_file)\n\n # Use wae_file_parser to get the dictionary of target devices\n filtered_list_of_devices = parse_wae_file(input_file)\n\n filtered_list_of_devices = remove_disconnected_devices(filtered_list_of_devices)\n\n total_dryrun_output = \"\"\n\n for hostname, tunnels in filtered_list_of_devices.items():\n total_dryrun_output = total_dryrun_output + \\\n create_service_Tunnels_on_device(hostname, tunnels)\n\n with open(dryrun_file, \"w\") as outfile:\n outfile.write(total_dryrun_output)\n \"\"\"","sub_path":"code/config_int.py","file_name":"config_int.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57811873","text":"from trainers import *\nimport time\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\ndef main():\n opts = BaseOptions()\n args = opts.parse()\n \n # args.resume = '../debug/checkpoints.pth' #断点训练,不可用\n #是否进行预训练\n args.pretrain = True\n if args.pretrain:\n args.batch_size = 256\n args.epochs = 60\n args.lr = 1e-3\n args.scala_ce = 1\n args.pretrain_path = '../data/resnet50-19c8e357.pth'\n \n logger = Logger(args.save_path) #创建日志文件\n opts.print_options(logger) #打印日志文件中的参数\n\n #加载数据集\n source_loader, _, _, _ = get_transfer_dataloaders(args.source, args.target, args.img_size, args.crop_size, args.padding, args.batch_size // 2, False)\n args.num_classes = 4101 #辅助数据集的ID数\n\n if args.resume:\n #断点训练\n trainer, start_epoch = load_checkpoint(args, logger)\n else:\n trainer = ReidTrainer(args, logger) #类class\n start_epoch = 0\n\n total_epoch = args.epochs\n\n start_time = time.time()\n epoch_time = AverageMeter()\n\n for epoch in range(start_epoch, total_epoch):\n\n #计算需要的时间\n need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (total_epoch - epoch))\n need_time = 'Stage 1, [Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)\n\n logger.print_log('\\n==>>{:s} [Epoch={:03d}/{:03d}] {:s}'.format(time_string(), epoch, total_epoch, need_time))\n \n #Train\n meters_trn = trainer.pre_MSMT(source_loader, epoch)\n logger.print_log(' **Train** ' + create_stat_string(meters_trn))\n\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"目标检测与re-id任务/MAR-master/src/pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436161081","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nf = lambda x: np.cos(0.3 * x)\nf_deriv = lambda x: -0.3 * np.sin(0.3 * x)\n\na = -np.pi / 2\nb = np.pi\nN = 39 # cel mai mic N a.i. eroarea maxima <= 1e-5\n\nx = np.linspace(a, b, N + 1)\n\nh = x[1] - x[0]\ninteriorNodes = x[1: -1]\n\ny = f(interiorNodes)\ny_plus = f(interiorNodes + h)\ny_minus = f(interiorNodes - h)\nderiv = f_deriv(interiorNodes)\n\nregressive = (y - y_minus) / h\nprogressive = (y_plus - y) / h\ncentral = (y_plus - y_minus) / (2 * h)\n\n\n# plotam atat a doua derivata exacta, cat si pe cea aproximata de noi\nplt.figure(\"Aproximarea primei derivate\")\nplt.title(\"Aproximarea primei derivate\")\nplt.plot(interiorNodes, deriv, c='red', label='A doua derivata exacta')\nplt.plot(interiorNodes, regressive, label='Diferente finite regresive')\nplt.plot(interiorNodes, progressive, label='Diferente finite progresive')\nplt.plot(interiorNodes, central, label='Diferente finite centrale')\nplt.legend()\nplt.show()","sub_path":"cn/6 - derivare numerica/derivare_f_prim.py","file_name":"derivare_f_prim.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311739177","text":"from app import db\nfrom exception import ForbiddenException\nfrom exception.messages import MESSAGE_CODE_PCE_42_CANNOT_BE_PROCESSED_WITHOUT_PCE_R2\nfrom ext.ExtendedElection.ExtendedElectionProvincialCouncilElection2021 import TALLY_SHEET_CODES\nfrom ext.ExtendedElection.ExtendedElectionProvincialCouncilElection2021.TEMPLATE_ROW_TYPE import \\\n TEMPLATE_ROW_TYPE_SEATS_ALLOCATED, TEMPLATE_ROW_TYPE_ELECTED_CANDIDATE, TEMPLATE_ROW_TYPE_DRAFT_ELECTED_CANDIDATE\nfrom ext.ExtendedTallySheet import ExtendedEditableTallySheetReport\nfrom orm.entities.Meta import MetaData\nfrom orm.entities.TallySheet import TallySheetTallySheetModel\nfrom orm.entities.Template import TemplateRowModel, TemplateModel\nimport math\n\nfrom flask import render_template\nimport re\nfrom orm.entities import Area, Template, TallySheet\nfrom orm.entities.Workflow import WorkflowInstance\nfrom orm.enums import AreaTypeEnum\nfrom util import convert_image_to_data_uri\n\n\nclass ExtendedTallySheet_PCE_42(ExtendedEditableTallySheetReport):\n\n def get_template_column_to_query_filter_map(self, only_group_by_columns=False):\n extended_election = self.tallySheet.election.get_extended_election()\n\n template_column_to_query_filter_map = super(\n ExtendedTallySheet_PCE_42, self).get_template_column_to_query_filter_map(\n only_group_by_columns=only_group_by_columns)\n template_column_to_query_column_map = self.get_template_column_to_query_column_map()\n\n party_ids_to_be_filtered = []\n pce_r2_tally_sheets = db.session.query(TallySheet.Model).filter(\n TallySheet.Model.tallySheetId == TallySheetTallySheetModel.childTallySheetId,\n TallySheet.Model.latestVersionId != None,\n TallySheetTallySheetModel.parentTallySheetId == self.tallySheet.tallySheetId,\n TallySheet.Model.templateId == Template.Model.templateId,\n Template.Model.templateName == TALLY_SHEET_CODES.PCE_R2,\n WorkflowInstance.Model.workflowInstanceId == TallySheet.Model.workflowInstanceId,\n WorkflowInstance.Model.status.in_(\n extended_election.tally_sheet_verified_statuses_list()\n ),\n ).all()\n\n if len(pce_r2_tally_sheets) == 0:\n raise ForbiddenException(\n message=\"PCE-42 cannot be processed before PCE-R2 is completed and verified.\",\n code=MESSAGE_CODE_PCE_42_CANNOT_BE_PROCESSED_WITHOUT_PCE_R2\n )\n\n pce_r2_tally_sheet_ids = [tallySheet.tallySheetId for tallySheet in pce_r2_tally_sheets]\n\n for pce_r2_tally_sheet in pce_r2_tally_sheets:\n pe_r2_extended_tally_sheet_version = pce_r2_tally_sheet.get_extended_tally_sheet_version(\n tallySheetVersionId=pce_r2_tally_sheet.latestVersionId)\n party_wise_seat_calculation_df = pe_r2_extended_tally_sheet_version.get_party_wise_seat_calculations()\n for party_wise_seat_calculation_df_index in party_wise_seat_calculation_df.index:\n seats_allocated = party_wise_seat_calculation_df.at[\n party_wise_seat_calculation_df_index, 'seatsAllocated']\n\n if seats_allocated > 0:\n party_id = party_wise_seat_calculation_df.at[party_wise_seat_calculation_df_index, 'partyId']\n party_ids_to_be_filtered.append(int(party_id))\n\n pe_ce_ro_pr_3_tally_sheets = db.session.query(\n TallySheet.Model.tallySheetId\n ).filter(\n TallySheet.Model.tallySheetId == TallySheetTallySheetModel.childTallySheetId,\n TallySheetTallySheetModel.parentTallySheetId == self.tallySheet.tallySheetId,\n TallySheet.Model.templateId == Template.Model.templateId,\n Template.Model.templateName == TALLY_SHEET_CODES.PCE_CE_RO_PR_3,\n MetaData.Model.metaId == TallySheet.Model.metaId,\n MetaData.Model.metaDataKey == \"partyId\",\n MetaData.Model.metaDataValue.in_(party_ids_to_be_filtered)\n ).all()\n pe_ce_ro_pr_3_tally_sheet_ids = [tallySheet.tallySheetId for tallySheet in pe_ce_ro_pr_3_tally_sheets]\n\n template_column_to_query_filter_map[\"partyId\"] += [\n template_column_to_query_column_map[\"partyId\"].in_(party_ids_to_be_filtered),\n TallySheet.Model.tallySheetId.in_(pe_ce_ro_pr_3_tally_sheet_ids + pce_r2_tally_sheet_ids)\n ]\n\n return template_column_to_query_filter_map\n\n def on_get_release_result_params(self):\n pd_code = None\n pd_name = None\n\n administrative_district = self.tallySheet.area\n ed_name_regex_search = re.match('([0-9a-zA-Z]*) *- *(.*)', administrative_district.areaName)\n ed_code = ed_name_regex_search.group(1)\n ed_name = ed_name_regex_search.group(2)\n\n result_type = \"RE_SC\"\n result_code = ed_code\n result_level = \"ADMINISTRATIVE-DISTRICT\"\n\n return result_type, result_code, result_level, ed_code, ed_name, pd_code, pd_name\n\n class ExtendedTallySheetVersion(ExtendedEditableTallySheetReport.ExtendedTallySheetVersion):\n def json(self):\n extended_tally_sheet = self.tallySheet.get_extended_tally_sheet()\n result_type, result_code, result_level, ed_code, ed_name, pd_code, pd_name = extended_tally_sheet.on_get_release_result_params()\n\n candidate_wise_results = self.get_candidate_wise_results().sort_values(\n by=['electionPartyId', \"candidateId\"], ascending=[True, True]\n ).reset_index()\n\n return {\n \"type\": result_type,\n \"level\": result_level,\n \"ed_code\": ed_code,\n \"ed_name\": ed_name,\n \"by_candidate\": [\n {\n \"party_code\": candidate_wise_result.partyAbbreviation,\n \"party_name\": candidate_wise_result.partyName,\n \"candidate_number\": str(candidate_wise_result.candidateNumber),\n \"candidate_name\": candidate_wise_result.candidateName,\n \"candidate_type\": candidate_wise_result.candidateType\n } for candidate_wise_result in candidate_wise_results.itertuples()\n ]\n }\n\n def get_candidate_wise_results(self):\n\n candidate_wise_results_df = self.df.loc[\n (self.df['templateRowType'] == TEMPLATE_ROW_TYPE_ELECTED_CANDIDATE) & (self.df['numValue'] == 0)]\n\n candidate_wise_results_df[\"seatsAllocated\"] = [0 for i in range(len(candidate_wise_results_df))]\n candidate_wise_results_df[\"preferenceCount\"] = [0 for i in range(len(candidate_wise_results_df))]\n\n for index in candidate_wise_results_df.index:\n party_id = candidate_wise_results_df.at[index, \"partyId\"]\n candidate_id = candidate_wise_results_df.at[index, \"candidateId\"]\n\n seats_allocated = self.df.loc[(self.df[\"partyId\"] == party_id) & (\n self.df['templateRowType'] == TEMPLATE_ROW_TYPE_SEATS_ALLOCATED)][\"numValue\"].values[0]\n\n preference_count = self.df.loc[(self.df[\"candidateId\"] == candidate_id) & (\n self.df['templateRowType'] == \"CANDIDATE_FIRST_PREFERENCE\")][\"numValue\"].values[0]\n\n candidate_wise_results_df.at[index, \"seatsAllocated\"] = seats_allocated\n candidate_wise_results_df.at[index, \"preferenceCount\"] = preference_count\n\n candidate_wise_results_df = candidate_wise_results_df.sort_values(\n by=[\"seatsAllocated\", \"electionPartyId\", \"preferenceCount\", \"candidateId\"],\n ascending=[False, True, False, True]\n )\n\n return candidate_wise_results_df\n\n def get_post_save_request_content(self):\n tally_sheet_id = self.tallySheetVersion.tallySheetId\n\n template_rows = db.session.query(\n TemplateRowModel.templateRowId,\n TemplateRowModel.templateRowType\n ).filter(\n TemplateModel.templateId == TallySheet.Model.templateId,\n TemplateRowModel.templateId == TemplateModel.templateId,\n TemplateRowModel.templateRowType.in_([\n TEMPLATE_ROW_TYPE_ELECTED_CANDIDATE, TEMPLATE_ROW_TYPE_DRAFT_ELECTED_CANDIDATE\n ]),\n TallySheet.Model.tallySheetId == tally_sheet_id\n ).group_by(\n TemplateRowModel.templateRowId\n ).all()\n\n content = []\n\n seats_allocated_per_party_df = self.df.loc[\n (self.df['templateRowType'] == TEMPLATE_ROW_TYPE_SEATS_ALLOCATED) & (self.df['numValue'] > 0)]\n\n # The derived rows are calculated only if the PCE-R2 is available and verified.\n if len(seats_allocated_per_party_df) > 0:\n candidate_wise_valid_vote_count_result = self.get_candidate_wise_valid_vote_count_result().sort_values(\n by=['numValue'], ascending=False\n )\n for index_1 in seats_allocated_per_party_df.index:\n party_id = seats_allocated_per_party_df.at[index_1, \"partyId\"]\n number_of_seats_allocated = seats_allocated_per_party_df.at[index_1, \"numValue\"]\n\n if number_of_seats_allocated is not None and not math.isnan(number_of_seats_allocated):\n filtered_candidate_wise_valid_vote_count_result = candidate_wise_valid_vote_count_result.loc[\n candidate_wise_valid_vote_count_result[\"partyId\"] == party_id]\n for index_2 in filtered_candidate_wise_valid_vote_count_result.index:\n if number_of_seats_allocated > 0:\n for template_row in template_rows:\n num_value = filtered_candidate_wise_valid_vote_count_result.at[\n index_2, \"incompleteNumValue\"]\n candidate_id = filtered_candidate_wise_valid_vote_count_result.at[\n index_2, \"candidateId\"]\n if not math.isnan(num_value):\n content.append({\n \"templateRowId\": template_row.templateRowId,\n \"templateRowType\": template_row.templateRowType,\n \"partyId\": int(party_id),\n \"candidateId\": int(candidate_id),\n\n # TODO remove once the complete validation has been fixed.\n \"numValue\": 0\n })\n else:\n content.append({\n \"templateRowId\": template_row.templateRowId,\n \"templateRowType\": template_row.templateRowType,\n \"partyId\": int(party_id),\n \"candidateId\": None,\n\n # TODO remove once the complete validation has been fixed.\n \"numValue\": 0\n })\n\n number_of_seats_allocated -= 1\n\n return content\n\n def html(self, title=\"\", total_registered_voters=None):\n tallySheetVersion = self.tallySheetVersion\n\n stamp = tallySheetVersion.stamp\n\n content = {\n \"election\": {\n \"electionName\": tallySheetVersion.tallySheet.election.get_official_name()\n },\n \"stamp\": {\n \"createdAt\": stamp.createdAt,\n \"createdBy\": stamp.createdBy,\n \"barcodeString\": stamp.barcodeString\n },\n \"tallySheetCode\": \"PCE-42\",\n \"provinceNo\": Area.get_associated_areas(\n tallySheetVersion.tallySheet.area, AreaTypeEnum.Province)[0].areaId,\n \"province\": Area.get_associated_areas(\n tallySheetVersion.tallySheet.area, AreaTypeEnum.Province)[0].areaName,\n \"administrativeDistrictNo\": Area.get_associated_areas(\n tallySheetVersion.tallySheet.area, AreaTypeEnum.AdministrativeDistrict)[0].areaId,\n \"administrativeDistrict\": Area.get_associated_areas(\n tallySheetVersion.tallySheet.area, AreaTypeEnum.AdministrativeDistrict)[0].areaName,\n \"countingCentre\": tallySheetVersion.tallySheet.area.areaName,\n \"data\": []\n }\n\n candidate_wise_results = self.get_candidate_wise_results().sort_values(\n by=[\"seatsAllocated\", \"electionPartyId\", \"preferenceCount\", \"candidateId\"],\n ascending=[False, True, False, True]\n ).reset_index()\n\n for index in candidate_wise_results.index:\n candidate_name = candidate_wise_results.at[index, \"candidateName\"]\n party_name = candidate_wise_results.at[index, \"partyName\"]\n content[\"data\"].append({\n \"candidateName\": \"\" if candidate_name is None else candidate_name,\n \"partyName\": party_name\n })\n\n html = render_template(\n 'ProvincialCouncilElection2021/PCE-42.html',\n content=content\n )\n\n return html\n\n def html_letter(self, title=\"\", total_registered_voters=None, signatures=[]):\n tallySheetVersion = self.tallySheetVersion\n stamp = tallySheetVersion.stamp\n\n content = {\n \"election\": {\n \"electionName\": tallySheetVersion.tallySheet.election.get_official_name()\n },\n \"stamp\": {\n \"createdAt\": stamp.createdAt,\n \"createdBy\": stamp.createdBy,\n \"barcodeString\": stamp.barcodeString\n },\n \"signatures\": signatures,\n \"province\": Area.get_associated_areas(\n tallySheetVersion.tallySheet.area, AreaTypeEnum.Province)[0].areaName,\n \"administrativeDistrict\": Area.get_associated_areas(\n tallySheetVersion.tallySheet.area, AreaTypeEnum.AdministrativeDistrict)[0].areaName,\n \"data\": [],\n \"logo\": convert_image_to_data_uri(\"static/Emblem_of_Sri_Lanka.png\"),\n \"date\": stamp.createdAt.strftime(\"%d/%m/%Y\"),\n \"time\": stamp.createdAt.strftime(\"%H:%M:%S %p\")\n }\n\n candidate_wise_results = self.get_candidate_wise_results().sort_values(\n by=[\"seatsAllocated\", \"electionPartyId\", \"preferenceCount\", \"candidateId\"],\n ascending=[False, True, False, True]\n ).reset_index()\n\n for index in candidate_wise_results.index:\n data_row = [\n candidate_wise_results.at[index, \"partyName\"],\n candidate_wise_results.at[index, \"partyAbbreviation\"],\n candidate_wise_results.at[index, \"candidateNumber\"],\n candidate_wise_results.at[index, \"candidateName\"]\n ]\n\n content[\"data\"].append(data_row)\n\n html = render_template(\n 'ProvincialCouncilElection2021/PCE-42-LETTER.html',\n content=content\n )\n\n return html\n","sub_path":"results-tabulation-api/ext/ExtendedElection/ExtendedElectionProvincialCouncilElection2021/ExtendedTallySheet/ExtendedTallySheet_PCE_42.py","file_name":"ExtendedTallySheet_PCE_42.py","file_ext":"py","file_size_in_byte":15762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401201660","text":"#导入随机函数\r\nimport random\r\n#导入时间函数\r\nimport time\r\nimport mcpi.minecraft as minecraft\r\nimport mcpi.block as block\r\nmc = minecraft.Minecraft.create()\r\npos = mc.player.getTilePos()\r\n\r\nwhile True:\r\n pos = mc.player.getTilePos()\r\n #生成空气方块\r\n mc.setBlocks(pos.x+1,pos.y,pos.z,pos.x+1,pos.y+2,pos.z,0)#前\r\n mc.setBlocks(pos.x-1,pos.y,pos.z,pos.x-1,pos.y+2,pos.z,0)#后\r\n mc.setBlocks(pos.x,pos.y,pos.z-1,pos.x,pos.y+2,pos.z-1,0)#左\r\n mc.setBlocks(pos.x,pos.y,pos.z+1,pos.x,pos.y+2,pos.z+1,0)#右\r\n #生成矿灯\r\n mc.setBlock(pos.x+1,pos.y+2,pos.z,169)\r\n #消除头顶方块\r\n mc.setBlock(pos.x,pos.y+2,pos.z,0)\r\n #消除矿灯周围方块\r\n mc.setBlock(pos.x+2,pos.y+2,pos.z,0)\r\n mc.setBlock(pos.x+1,pos.y+2,pos.z-1,0)\r\n mc.setBlock(pos.x+1,pos.y+2,pos.z+1,0)\r\n mc.setBlock(pos.x+1,pos.y+3,pos.z,0)\r\n\r\n\r\n","sub_path":"精加工地形.py","file_name":"精加工地形.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541166930","text":"import itertools, random, time, json\n\n\ndef make_dict(score, game):\n board = ''\n board_moves = []\n move_score = {}\n default_score = 5\n #moves = []\n\n def pick_move(score):\n moves = []\n aval_moves = dict.keys(score[board])\n aval_moves = list(aval_moves)\n aval_score = dict.values(score[board])\n aval_score = list(aval_score)\n for m in range(len(aval_moves)):\n for count in range(aval_score[m]):\n moves.append(aval_moves[m])\n random.shuffle(moves)\n try:\n next_move = random.choice(moves)\n except:\n next_move = ''\n return(next_move, score)\n\n for row in range(len(game)):\n for col in range(len(game)):\n board += str(game[row][col])\n\n if board in score:\n next_move, score = pick_move(score)\n return(next_move, score, board)\n elif board not in score:\n #find moves\n for r in range(len(game)):\n for c in range(len(game)):\n if game[r][c] == 0:\n board_moves.append(str(c) + ':' + str(r))\n #add default score 5\n for move in board_moves:\n move_score[move] = default_score\n #build Dict\n score[board] = move_score\n next_move, score = pick_move(score)\n return(next_move, score, board)\n\n\ndef modify_score(score, game_won, Player_1_mm, Player_2_mm):\n if game_won == 1:\n board = dict.keys(Player_1_mm)\n board = list(board)\n for b in board:\n score[b][Player_1_mm[b]] +=1\n board = dict.keys(Player_2_mm)\n board = list(board)\n for b in board:\n if score[b][Player_2_mm[b]] > 1:\n score[b][Player_2_mm[b]] -=1\n elif game_won == 2:\n board = dict.keys(Player_2_mm)\n board = list(board)\n for b in board:\n score[b][Player_2_mm[b]] +=1\n board = dict.keys(Player_1_mm)\n board = list(board)\n for b in board:\n if score[b][Player_1_mm[b]] > 1:\n score[b][Player_1_mm[b]] -=1\n return(score)\n\n\ndef win(current_game):\n\n def all_same(l, win_type):\n if l.count(l[0]) == len(l) and l[0] != 0:\n player = l[0]\n print(f\"Player {player} is the winner {win_type}!\")\n return(player)\n else:\n player = 0\n return(player)\n\n\n # Horizontal\n for row in current_game:\n player = all_same(row, \"horizontally (-)\")\n if player != 0:\n return(player)\n # Diagonal /\n diags = []\n for col, row in enumerate(reversed(range(len(current_game)))):\n diags.append(current_game[row][col])\n player = all_same(diags, \"diagonally (/)\")\n if player != 0:\n return(player)\n # Diagonal \\\n diags = []\n for ix in range(len(current_game)):\n diags.append(current_game[ix][ix])\n player = all_same(diags, \"diagonally (\\\\)\")\n if player != 0:\n return(player)\n # Vertically\n for col in range(len(current_game)):\n check = []\n for row in current_game:\n check.append(row[col])\n player = all_same(check, \"Vertically (|)\")\n if player != 0:\n return(player)\n # Tie\n tie_check = []\n for row in range(len(current_game)):\n for col in range(len(current_game)):\n tie_check.append(current_game[row][col])\n if 0 not in tie_check:\n print(\"The game is a tie nobody won\")\n return(3)\n\n return(0)\n\n\ndef game_board(game_map, player=0, row=0, column=0, just_display=False):\n try:\n if game_map[row][column] != 0:\n print(\"This space has been taken, Please choode another!\")\n return game_map, False\n print(\" \"+\" \".join(str(i) for i in range(len(game_map))))\n if not just_display:\n game_map[row][column] = player\n for count, row in enumerate(game_map):\n print(count, row)\n return game_map, True\n\n except IndexError as e: \n print(\"Error: did you input row/column as 0 1 or 2 ect?\", e)\n return game_map, False\n\n\nplay = True\nplayers = [1, 2]\n#score = {}\ntry:\n with open('data.json') as json_file: \n score = json.load(json_file)\n json_file.close()\n #print(score)\nexcept:\n score = {}\nwhile play:\n #print(score)\n player = 0\n print('Key for moves in 3x3 \\n[1, 2, 3]\\n[4, 5, 6]\\n[7, 8, 9]')\n print('Please select a game mode \\n1) Human vs PC \\n2) PC vs Human \\n3) Human vs Human \\n4) PC vs PC \\n0) Exit ')\n game_mode = int(input('-> '))\n if game_mode == 0:\n with open('data.json', 'w') as fp:\n json.dump(score, fp, indent=4)\n fp.close()\n print(\"Saved 'Score' Goodbye!\")\n play = False\n continue\n moves = []\n game_size = int(input(\"What size game of tic tac toe (e.g. 3 = 3x3, 4 = 4x4)? \"))\n if game_mode == 4:\n loop = int(input('how meny games? '))\n else:\n loop = 1\n for g in range(loop):\n game = [[0 for i in range(game_size)] for i in range(game_size)]\n for c in range(game_size):\n for r in range(game_size):\n moves.append(str(r) + ':' + str(c))\n game_won = 0\n game, _ = game_board(game, just_display=True)\n player_choice = itertools.cycle([1, 2])\n make_dict(score, game)\n Player_1_mm = {}\n Player_2_mm = {}\n while game_won == 0:\n current_player = next(player_choice)\n if game_mode == 1 and current_player == 2: # Human vs PC\n time.sleep(0.5)\n next_move, score, board = make_dict(score, game)\n print(f'Player {current_player} made move {next_move}')\n Player_2_mm[board] = next_move\n move = next_move.split(':')\n game, played = game_board(game, current_player, int(move[1]), int(move[0]))\n elif game_mode == 2 and current_player == 1: # PC vs Human\n time.sleep(0.5)\n next_move, score, board = make_dict(score, game)\n print(f'Player {current_player}2 made move {next_move}')\n Player_1_mm[board] = next_move\n move = next_move.split(':')\n game, played = game_board(game, current_player, int(move[1]), int(move[0]))\n elif game_mode == 4: # PC vs PC\n #time.sleep(0.001)\n next_move, score, board = make_dict(score, game)\n print(f'Player {current_player} made move {next_move}')\n if current_player == 1:\n Player_1_mm[board] = next_move\n elif current_player == 2:\n Player_2_mm[board] = next_move\n move = next_move.split(':')\n print(move)\n game, played = game_board(game, current_player, int(move[1]), int(move[0]))\n else:\n print(f\"Current player: {current_player}\")\n played = False\n next_move = ''\n next_move, _, board = make_dict(score, game)\n while not played:\n space_choice = int(input(f'Player {current_player} pick a space: '))\n next_move = moves[space_choice - 1].split(':')\n if current_player == 1:\n Player_1_mm[board] = moves[space_choice - 1]\n elif current_player == 2:\n Player_2_mm[board] = moves[space_choice - 1]\n game, played = game_board(game, current_player, int(next_move[1]), int(next_move[0]))\n make_dict(score, game)\n\n game_won = win(game)\n if game_won != 0:\n make_dict(score, game)\n score = modify_score(score, game_won, Player_1_mm, Player_2_mm)\n if g == loop -1:\n again = input(\"The game is over, would you like to play again (y/n) \")\n else:\n again = 'y'\n if again.lower() == \"y\":\n print(\"Restarting...\")\n elif again.lower() == \"n\":\n with open('data.json', 'w') as fp:\n json.dump(score, fp, indent=4)\n fp.close()\n print(\"Saved 'Score' Goodbye!\")\n play = False\n else:\n print(\"Not a valid answer, Saved 'Score' and exiting\")\n with open('data.json', 'w') as fp:\n json.dump(score, fp, indent=4)\n fp.close()\n play = False","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335148139","text":"from django.shortcuts import render, redirect\nfrom django.utils import timezone\nfrom django.views.decorators.http import require_POST\nfrom coupon.models import Coupon\nfrom coupon.forms import CouponCodeForm\n\n\n@require_POST\ndef coupon_apply(request):\n now = timezone.now()\n form = CouponCodeForm(request.POST)\n if form.is_valid():\n code = form.cleaned_data['code']\n try:\n coupon = Coupon.objects.get(code=code,\n active=True)\n request.session['coupon_id'] = coupon.id\n except ValueError:\n request.session['coupon_id'] = None\n return redirect('cart')\n","sub_path":"coupon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637084427","text":"# 列表,元组,集合 互相转换, 字典转换要用zip\n\nmy_list = [1, 4, 5, 4]\n\nmy_tuple = (5, 7)\n\nmy_set = {4, 6}\n\n# list -> set\nr1 = set(my_list)\n\n# tuple -> set\nr2 = set(my_tuple)\n\n# list -> tuple\nr3 = tuple(my_list)\n\n# set -> list\nr4 = list(my_set)\n\n# tuple -> list\nr5 = list(my_tuple)\n\n# list() set() tuple()","sub_path":"python_basic/day02/07-相互转换.py","file_name":"07-相互转换.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"490031473","text":"import pandas as pd\nimport csv\nimport re\n\ndef clean_text(each):\n each = each.replace('[','').replace(']','').replace('\\\"','').replace('\\'','').replace('','').lower()\n return each\n\ndef split_string(each):\n each = re.sub(r'(0|([0-9][0-9])|([0-9][0-9])|[1-9])\\s{6}', 'a', each)\n return each\n\nall_csr = list()\nfor i in range(7):\n df_csr = pd.read_csv(\"all_mustard_csr{}.tsv\".format(i+1), delimiter='\\t', header=0, usecols=[\"utterance\"])\n df_csr['sentence'] = df_csr['utterance'].apply(lambda x: clean_text(x) if x!='[]' else ' ')\n if i>0:\n all_csr.append(df_csr[1:])\n else:\n all_csr.append(df_csr)\n\nwith open('all_mustard.tsv', 'w') as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n tsv_writer.writerow(['csr_utterance'])\n for each in all_csr:\n current = each['sentence'].values\n for i in range(len(current)):\n sent = current.tolist()[i]\n tsv_writer.writerow([sent])\n \nout_file.close()","sub_path":"all_mustard.py","file_name":"all_mustard.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358286612","text":"from ceph_deploy.util import paths\nfrom ceph_deploy.util.wrappers import check_call\nfrom ceph_deploy.util.context import remote\nfrom ceph_deploy import conf\nfrom StringIO import StringIO\n\n\ndef ceph_version(conn, logger):\n \"\"\"\n Log the remote ceph-version by calling `ceph --version`\n \"\"\"\n return check_call(conn, logger, ['ceph', '--version'])\n\n\ndef which_service(conn, logger):\n \"\"\"\n Attempt to find the right `service` executable location as it\n might not be in the path for the user executing the remote\n calls\n \"\"\"\n logger.info('locating `service` executable...')\n locations = ['/sbin/service', '/usr/sbin/service']\n for location in locations:\n if conn.modules.os.path.exists(location):\n logger.info('found `service` executable: %s' % location)\n return location\n logger.error('could not find `service` executable')\n\n\ndef mon_create(distro, logger, args, monitor_keyring, hostname):\n logger.debug('remote hostname: %s' % hostname)\n path = paths.mon.path(args.cluster, hostname)\n done_path = paths.mon.done(args.cluster, hostname)\n init_path = paths.mon.init(args.cluster, hostname, distro.init)\n\n configuration = conf.load(args)\n conf_data = StringIO()\n configuration.write(conf_data)\n\n with remote(distro.sudo_conn, logger, conf.write_conf) as remote_func:\n remote_func(args.cluster, conf_data.getvalue(), overwrite=args.overwrite_conf)\n\n if not distro.sudo_conn.modules.os.path.exists(path):\n logger.info('creating path: %s' % path)\n distro.sudo_conn.modules.os.makedirs(path)\n\n logger.debug('checking for done path: %s' % done_path)\n if not distro.sudo_conn.modules.os.path.exists(done_path):\n logger.debug('done path does not exist: %s' % done_path)\n if not distro.sudo_conn.modules.os.path.exists(paths.mon.constants.tmp_path):\n logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path)\n distro.sudo_conn.modules.os.makedirs(paths.mon.constants.tmp_path)\n keyring = paths.mon.keyring(args.cluster, hostname)\n\n def write_monitor_keyring(keyring, monitor_keyring):\n \"\"\"create the monitor keyring file\"\"\"\n with file(keyring, 'w') as f:\n f.write(monitor_keyring)\n\n logger.info('creating keyring file: %s' % keyring)\n with remote(distro.sudo_conn, logger, write_monitor_keyring) as remote_func:\n remote_func(keyring, monitor_keyring)\n\n check_call(\n distro.sudo_conn,\n logger,\n [\n 'ceph-mon',\n '--cluster', args.cluster,\n '--mkfs',\n '-i', hostname,\n '--keyring', keyring,\n ],\n )\n\n logger.info('unlinking keyring file %s' % keyring)\n distro.sudo_conn.modules.os.unlink(keyring)\n\n def create_done_path(done_path):\n \"\"\"create a done file to avoid re-doing the mon deployment\"\"\"\n with file(done_path, 'w'):\n pass\n\n with remote(distro.sudo_conn, logger, create_done_path) as remote_func:\n remote_func(done_path)\n\n def create_init_path(init_path):\n \"\"\"create the init path if it does not exist\"\"\"\n import os\n if not os.path.exists(init_path):\n with file(init_path, 'w'):\n pass\n\n with remote(distro.sudo_conn, logger, create_init_path) as remote_func:\n remote_func(init_path)\n","sub_path":"node-2/site-packages/ceph_deploy/hosts/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"241998448","text":"import logging\nfrom pathlib import Path\n\nfrom .exceptions import FileIOError, NetworkIOError\n\n\nclass FileWrapper(object):\n def __init__(self):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.file_path = None\n self.file_name = None\n self.file_size = None\n self._size = None\n self._path = None\n\n def wrap_file_for_reading(self, *args):\n self.logger.debug('.wrap_file_for_reading(args), args={0}'.format(args))\n file = Path(*args)\n try:\n with file.open('rb') as _:\n pass\n except OSError as e:\n raise FileIOError('Failed to open file for reading: {0}. {1}'.format(\n file.as_posix(),\n e.strerror\n ))\n self.file_path = file.as_posix()\n self.file_name = file.name\n self.file_size = file.stat().st_size\n\n def wrap_file_for_writing(self, *args, size):\n self.logger.debug('.wrap_file_for_writing(args, size), args={0}, size={1}'.format(args, size))\n file = Path(*args)\n try:\n file.parent.mkdir(parents=True)\n except FileExistsError:\n pass\n try:\n with file.open('wb') as _:\n pass\n except OSError as e:\n raise FileIOError('Failed to open file for writing: {0}. {1}'.format(\n file.as_posix(),\n e.strerror\n ))\n self.file_path = file.as_posix()\n self.file_name = file.name\n self.file_size = size\n\n def get_file_info_asdict(self, relative_to=None):\n info = self.get_file_info(relative_to=relative_to)\n return dict(\n file_path=info[0],\n file_size=info[1],\n # 'file_hash': self.file_hash,\n )\n\n def get_file_info(self, relative_to=None):\n file_path = Path(self.file_path).relative_to(relative_to).as_posix() if relative_to else self.file_path\n ret = file_path, self.file_size\n self.logger.debug('.get_file_info()={0}'.format(ret))\n return ret\n\n def send_file(self, connection):\n self.logger.debug('.send_file(connection), connection={0}'.format(connection))\n try:\n with Path(self.file_path).open('rb') as f:\n bytesnum = connection.send_file(f)\n except OSError as e:\n self.logger.error('Failed to open file for sending: \"{0}\". {1}'.format(self.file_path, e))\n raise FileIOError(e.strerror)\n except NetworkIOError as e:\n self.logger.error('Failed to send file: \"{0}\". {1}'.format(self.file_path, e))\n raise\n\n if bytesnum != self.file_size:\n raise FileIOError('File transfer failed, bytes sent: {0}, actual file size: {1}'.format(\n bytesnum, self.file_size)\n )\n\n def receive_file(self, connection):\n self.logger.debug('.receive_file(connection), connection={0}'.format(connection))\n try:\n with Path(self.file_path).open('wb') as f:\n bytes_left = self.file_size\n while bytes_left:\n inbytes = connection.receive(bytes_left)\n f.write(inbytes)\n bytes_left -= len(inbytes)\n except OSError as e:\n self.logger.error('Failed to receive file: \"{0}\". {1}'.format(self.file_path, e))\n raise FileIOError(e.strerror)\n except NetworkIOError as e:\n self.logger.error('Failed to receive file: \"{0}\". {1}'.format(self.file_path, e))\n raise\n\n def __str__(self):\n return 'File path: \"{0}\", size:{1}'.format(self.file_path, self.file_size)\n\n def __repr__(self):\n str(self)\n\n @property\n def file_size(self):\n if not isinstance(self._size, int):\n raise AttributeError('File size not set')\n return self._size\n\n @file_size.setter\n def file_size(self, value):\n self._size = value\n\n @property\n def file_path(self):\n if not self._path:\n raise AttributeError('File path not set')\n return self._path\n\n @file_path.setter\n def file_path(self, value):\n self._path = value\n","sub_path":"file_backup/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58607440","text":"my_name = 'Khaleefa'\nmy_age = '24'\nmy_height = '170' #cm\nmy_weight = '70' #kg\nmy_eyes = 'black'\n\nprint (f\"Lets talk about {my_name}.\")\nprint (f\"He's {my_height} inches tall\")\nprint (f\"He's {my_weight} pounds heavy\")\ntotal = my_age + my_height + my_weight\nprint (f\"if i add {my_age}, {my_weight}, and {my_height}, i get {total}\")\n","sub_path":"ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337402263","text":"\"\"\"\nCopyright ArxanFintech Technology Ltd. 2018 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport logging\nimport os\nimport timeout_decorator\nfrom base64 import b64encode, b64decode\n\nMAX_TIMEOUT = 3\nCRYPTO_BIN_PATH = \"./utils/crypto-util\"\nSIGN_BIN_PATH = \"./utils/sign-util\"\nCUR_PATH = os.path.dirname(__file__)\nMODE_CRYPTO = \"crypt\"\nMODE_SIGN = \"sign\"\n\n@timeout_decorator.timeout(MAX_TIMEOUT, timeout_exception=Exception)\ndef run_cmd(params, mode):\n params_str = \" \".join(map(lambda x: \"-{0} {1}\".format(x, params[x]), params))\n bin_path = \"\"\n if mode == \"crypt\":\n bin_path = os.path.join(CUR_PATH, CRYPTO_BIN_PATH)\n elif mode == \"sign\":\n bin_path = os.path.join(CUR_PATH, SIGN_BIN_PATH)\n else:\n raise Exception(\"%s, unsupported mode\" %mode)\n\n cmd = \" \".join([bin_path, params_str])\n result = os.popen(cmd).read()\n if result.startswith(\"[ERROR]\"):\n raise Exception(\"{}, failed to run cmd: {}\".format(result, cmd))\n\n return result.strip()\n \ndef decrypt_and_verify(cipher_b64, apikey, cert_path):\n \"\"\"Decrypt and verify date with executable\n generated from crypto tools in sdk-go-common\n\n :param cipher_b64: base64 formatted data to be decrypted and verified\n :param apikey: api key generated from server\n :param cert_path: private key file and cert file\n :Returns: decoded and verified message\n \"\"\"\n params = {\n \"mode\": \"2\",\n \"apikey\": apikey,\n \"path\": cert_path,\n \"data\": cipher_b64\n }\n return run_cmd(params, MODE_CRYPTO)\n\ndef sign_and_encrypt(plain_text, apikey, cert_path):\n \"\"\"Sign and encrypt date with executable\n generated from crypto tools in sdk-go-common\n\n :param plain_text: plain text to be signed and encrypted\n :param apikey: api key generated from server\n :param cert_path: private key file and cert file\n :Returns: signed and encrypted message\n \"\"\"\n params = {\n \"mode\": \"1\",\n \"apikey\": apikey,\n \"path\": cert_path,\n \"data\": \"'{}'\".format(b64encode(plain_text))\n }\n result = run_cmd(params, MODE_CRYPTO)\n return result\n\ndef sign(plain_text, secret_key, did, nonce):\n \"\"\" Sign date with executable generated\n from sign tools in sdk-go-common\n\n :param plain_text: plain text to be signed and encrypted\n :param secret_key: secret key generated from server\n :param did: did\n :param nonce: nonce\n :Returns: signed message\n \"\"\"\n params = {\n \"key\": secret_key,\n \"nonce\": nonce,\n \"did\": did,\n \"data\": \"'{}'\".format(b64encode(plain_text))\n }\n signed_data = run_cmd(params, MODE_SIGN)\n\n return signed_data\n\n","sub_path":"cryption/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"284193725","text":"from datetime import datetime\nfrom io import BytesIO\nimport uuid\n\nfrom cloudbridge.cloud.interfaces.resources import BucketObject\nfrom test.helpers import ProviderTestBase\nimport test.helpers as helpers\n\n\nclass CloudObjectStoreServiceTestCase(ProviderTestBase):\n\n def __init__(self, methodName, provider):\n super(CloudObjectStoreServiceTestCase, self).__init__(\n methodName=methodName, provider=provider)\n\n def test_crud_bucket(self):\n \"\"\"\n Create a new bucket, check whether the expected values are set,\n and delete it.\n \"\"\"\n name = \"cbtestcreatebucket-{0}\".format(uuid.uuid4())\n test_bucket = self.provider.object_store.create(name)\n with helpers.cleanup_action(lambda: test_bucket.delete()):\n self.assertTrue(\n test_bucket.id in repr(test_bucket),\n \"repr(obj) should contain the object id so that the object\"\n \" can be reconstructed, but does not. eval(repr(obj)) == obj\")\n\n buckets = self.provider.object_store.list()\n\n list_buckets = [c for c in buckets if c.name == name]\n self.assertTrue(\n len(list_buckets) == 1,\n \"List buckets does not return the expected bucket %s\" %\n name)\n\n # check iteration\n iter_buckets = [c for c in self.provider.object_store\n if c.name == name]\n self.assertTrue(\n len(iter_buckets) == 1,\n \"Iter buckets does not return the expected bucket %s\" %\n name)\n\n # check find\n find_buckets = self.provider.object_store.find(name=name)\n self.assertTrue(\n len(find_buckets) == 1,\n \"Find buckets does not return the expected bucket %s\" %\n name)\n\n get_bucket = self.provider.object_store.get(\n test_bucket.id)\n self.assertTrue(\n list_buckets[0] ==\n get_bucket == test_bucket,\n \"Objects returned by list: {0} and get: {1} are not as \"\n \" expected: {2}\" .format(list_buckets[0].id,\n get_bucket.id,\n test_bucket.name))\n\n buckets = self.provider.object_store.list()\n found_buckets = [c for c in buckets if c.name == name]\n self.assertTrue(\n len(found_buckets) == 0,\n \"Bucket %s should have been deleted but still exists.\" %\n name)\n\n def test_crud_bucket_objects(self):\n \"\"\"\n Create a new bucket, upload some contents into the bucket, and\n check whether list properly detects the new content.\n Delete everything afterwards.\n \"\"\"\n name = \"cbtestbucketobjs-{0}\".format(uuid.uuid4())\n test_bucket = self.provider.object_store.create(name)\n\n # ensure that the bucket is empty\n objects = test_bucket.list()\n self.assertEqual([], objects)\n\n with helpers.cleanup_action(lambda: test_bucket.delete()):\n obj_name = \"hello_world.txt\"\n obj = test_bucket.create_object(obj_name)\n\n self.assertTrue(\n obj.id in repr(obj),\n \"repr(obj) should contain the object id so that the object\"\n \" can be reconstructed, but does not. eval(repr(obj)) == obj\")\n\n with helpers.cleanup_action(lambda: obj.delete()):\n # TODO: This is wrong. We shouldn't have to have a separate\n # call to upload some content before being able to delete\n # the content. Maybe the create_object method should accept\n # the file content as a parameter.\n obj.upload(\"dummy content\")\n objs = test_bucket.list()\n\n self.assertTrue(\n isinstance(objs[0].size, int),\n \"Object size property needs to be a int, not {0}\".format(\n type(objs[0].size)))\n self.assertTrue(\n datetime.strptime(objs[0].last_modified,\n \"%Y-%m-%dT%H:%M:%S.%f\"),\n \"Object's last_modified field format {0} not matching.\"\n .format(objs[0].last_modified))\n\n # check iteration\n iter_objs = list(test_bucket)\n self.assertListEqual(iter_objs, objs)\n\n found_objs = [o for o in objs if o.name == obj_name]\n self.assertTrue(\n len(found_objs) == 1,\n \"List bucket objects does not return the expected\"\n \" object %s\" % obj_name)\n\n get_bucket_obj = test_bucket.get(obj_name)\n self.assertTrue(\n found_objs[0] ==\n get_bucket_obj == obj,\n \"Objects returned by list: {0} and get: {1} are not as \"\n \" expected: {2}\" .format(found_objs[0].id,\n get_bucket_obj.id,\n obj.id))\n\n obj_too = test_bucket.get(obj_name)\n self.assertTrue(\n isinstance(obj_too, BucketObject),\n \"Did not get object {0} of expected type.\".format(obj_too))\n\n objs = test_bucket.list()\n found_objs = [o for o in objs if o.name == obj_name]\n self.assertTrue(\n len(found_objs) == 0,\n \"Object %s should have been deleted but still exists.\" %\n obj_name)\n\n def test_upload_download_bucket_content(self):\n\n name = \"cbtestbucketobjs-{0}\".format(uuid.uuid4())\n test_bucket = self.provider.object_store.create(name)\n\n with helpers.cleanup_action(lambda: test_bucket.delete()):\n obj_name = \"hello_upload_download.txt\"\n obj = test_bucket.create_object(obj_name)\n\n with helpers.cleanup_action(lambda: obj.delete()):\n content = b\"Hello World. Here's some content.\"\n # TODO: Upload and download methods accept different parameter\n # types. Need to make this consistent - possibly provider\n # multiple methods like upload_from_file, from_stream etc.\n obj.upload(content)\n target_stream = BytesIO()\n obj.save_content(target_stream)\n self.assertEqual(target_stream.getvalue(), content)\n target_stream2 = BytesIO()\n for data in obj.iter_content():\n target_stream2.write(data)\n self.assertEqual(target_stream2.getvalue(), content)\n","sub_path":"test/test_object_store_service.py","file_name":"test_object_store_service.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"19328249","text":"\n# -*- coding: utf-8 -*-\nimport csv\nimport os\nimport datetime\nimport tools\n\nCSV_FILES = ['csv/autumn_2016-2017_2_fix.csv', 'csv/2.csv'] #can parse multiple files\nDAYS = ['ПОНЕДЕЛЬНИК', 'ВТОРНИК', 'СРЕДА', 'ЧЕТВЕРГ', 'ПЯТНИЦА', 'СУББОТА', 'ВОСКРЕСЕНЬЕ']\nWEEK_TOP = 1\nWEEK_BOTTOM = 2\nWEEK_ALL = 3\nWEEK_TOP_SEARCH_TEXT = 'ВЕРХНЯЯ НЕДЕЛЯ:'\nWEEK_BOTTOM_SEARCH_TEXT = 'НИЖНЯЯ НЕДЕЛЯ:'\n\n\ndef get_week_by_date(week_top_dates, week_bottom_dates, by_date=datetime.datetime.now()):\n \"\"\"\n Определяет числительная или знаменательная неделя для даты by_date\n \"\"\"\n def in_week_dates(week_dates, date_):\n for wd in week_dates:\n if date_.month == wd.month and date_.day == wd.day:\n return True\n return False\n\n date_ = get_date_first_week_day(by_date) if not by_date is None else None\n\n if in_week_dates(week_top_dates, date_):\n return WEEK_TOP\n elif in_week_dates(week_bottom_dates, date_):\n return WEEK_BOTTOM\n else:\n return WEEK_ALL\n return week\n\ndef get_date_first_week_day(by_date):\n \"\"\"\n возвращает дату для первого дня недели даты by_date\n \"\"\"\n return by_date - datetime.timedelta(days=by_date.weekday()) if not by_date is None else None\n\n\nclass Lesson_Time:\n start, end = None, None\n def __init__(self, **kwargs):\n self.start = datetime.datetime.strptime(kwargs.get('start'), \"%H.%M\")\n self.end = datetime.datetime.strptime(kwargs.get('end'), \"%H.%M\")\n super(Lesson_Time, self).__init__()\n\n def get_sec(self, t):\n return t.second + t.minute * 60 + t.hour * 3600 if not t is None else 0\n\n @property\n def start_sec(self): return self.get_sec(self.start)\n\n @property\n def end_sec(self): return self.get_sec(self.end)\n\nclass Lesson:\n week_day = None\n time = None\n group = None\n group_code = None\n week = WEEK_ALL\n discipline = None\n professor = None\n room = None\n\n def __init__(self, **kwargs):\n self.week_day = kwargs.get('week_day')\n self.time = kwargs.get('time')\n self.group = int(kwargs.get('group'))\n self.group_code = kwargs.get('group_code')\n self.week = int(kwargs.get('week', WEEK_ALL))\n self.discipline = kwargs.get('discipline').strip()\n self.professor = kwargs.get('professor').strip()\n self.room = kwargs.get('room').strip()\n super(Lesson, self).__init__()\n\n @property\n def gid(self):\n return int(self.group_code[:self.group_code.find('-')].strip())\n\n def set_time(self, time1, time2):\n if not time1 is None: # числитель\n self.time = time1\n else: # знаменатель\n self.time = time2\n self.week = WEEK_BOTTOM\n\n def debug(self):\n z = {}\n zz = dir(self)\n\n for a in filter(lambda x: not x.startswith('_'), zz):\n if a in ['print','g','set_time','debug']: continue\n z[a] = getattr(self, a)\n if z[a].__class__ is Lesson_Time:\n z[a]={'start':z[a].start, 'end':z[a].end}\n return z\n\n\nclass CsvLessons(object):\n lessons = [] # занятия\n start_index = None # индекс стартовой строки, потому как в начале идет шапка\n index = None # индекс текущей строки при чтении\n groups = [] # список найденных групп\n last_row_weekday, last_row_time = None, None # последние найденне день недели и время занятия, нужно при строках знаменателя\n last_row_professor, last_row_room = None, None # последний найденный препод\n week_top_dates, week_bottom_dates = [],[] # списки начала недель числитель и знаменатель соответственно\n skip_empty_line = False # пропускать при чтении пустые строки\n\n\n def __init__(self, path, skip_empty_line = False):\n self.skip_empty_line = skip_empty_line\n self.parse_file(path)\n super(CsvLessons, self).__init__()\n\n def is_empty(self, data):\n \"\"\"\n Проверяем на пустоту\n \"\"\"\n if data is None: return True\n if data.__class__ is list:\n for item in data:\n if self.is_empty(item):\n return False\n return True\n elif data.__class__ is str:\n return data.strip() == ''\n return False\n\n def get_header_index(self, row):\n \"\"\"\n Проверяем строку на шапку, если в строке есть ячейка \"ДНИ\" , тосчитаем ее шапкой\n \"\"\"\n return self.index if row[0].upper()=='ДНИ' else None\n\n def get_week_day_id(self, row):\n \"\"\"\n Получаем номер дня недели\n \"\"\"\n week_day_name = row[0].upper().strip() if len(row) > 0 and not row[0] is None else None\n # print('[%s]'%week_day_name)\n return DAYS.index(week_day_name)+1 if week_day_name in DAYS else None\n\n def get_groups(self, row):\n \"\"\"\n Получаем список групп\n \"\"\"\n self.groups = [item for item in row if not self.is_empty(item)]\n return self.groups\n\n def get_time(self, row):\n \"\"\"\n Получаем время\n \"\"\"\n if self.is_empty(row[1]): return None\n period = [t.strip() for t in row[1].split('-')]\n return Lesson_Time(start=period[0], end=period[1])\n\n def make_numerator(self, week_day, group_id, time_):\n \"\"\"\n если в списке есть запись для данной группы, в то же время,\n то значит у нас знаменатель и нужно существующую запись сделать числителем\n \"\"\"\n\n for lesson in self.lessons:\n if lesson.group == group_id and lesson.week_day == week_day and lesson.time.start == time_.start: # l.time.end == time.end:\n lesson.week = WEEK_TOP\n\n def parse_line_lessons(self, row, week_day, time_):\n \"\"\"\n Получаем список занятий в строке\n \"\"\"\n items = row[2:-2] # отрезаем дни недели и время в начале и в конце строки\n group_id = -1\n lessons = []\n\n for i in range(0, len(items), 3):\n group_id += 1\n item = items[i:i + 3]\n\n row_time = self.get_time(row)\n if row_time is None: # если стока без времени, то предудующую делаем числителем\n self.make_numerator(week_day, group_id, time_)\n if self.is_empty(item[0]): continue # если ди\n\n lessons.append(\n Lesson(\n week_day=week_day,\n time=time_,\n group=group_id,\n group_code=self.groups[group_id],\n week=WEEK_ALL if not row_time is None else WEEK_BOTTOM,\n discipline=item[0],\n professor=item[1],\n room=item[2],\n )\n )\n return lessons\n\n def make_week_dates(self, dates):\n ct = datetime.datetime.now()\n return [datetime.datetime.strptime(\"%s.%d\" % (d.strip(), ct.year), \"%d.%m.%Y\") for d in dates]\n\n\n def get_weeks(self, row):\n \"\"\"\n Получаем недели для числителей и знаменателей\n нужно учитывать что значние сдвинуто на 3 ячейки вправо\n \"\"\"\n k = 0\n for item in row:\n if WEEK_TOP_SEARCH_TEXT.upper() in item.strip().upper():\n self.week_top_dates = self.make_week_dates(row[k + 3].split(','))\n return True\n elif WEEK_BOTTOM_SEARCH_TEXT.upper() in item.strip().upper() :\n self.week_bottom_dates = self.make_week_dates(row[k + 3].split(','))\n return True\n k += 1\n return False\n\n def parse_file(self, path):\n \"\"\"Парсим файл\"\"\"\n print(u'Обрабатываем файл <%s>...' % path)\n if not os.path.exists(path):\n return False\n self.lessons = list()\n with open(path, 'r') as f:\n stream = csv.reader(f, delimiter=';', )\n for row in stream:\n if self.skip_empty_line and self.is_empty(row): continue\n self.lessons.extend(self.parse_line(row))\n if len(self.week_top_dates) > 0 and len(self.week_bottom_dates) > 0:\n break\n\n print(u'Прочитано %d групп.' % len(self.groups))\n print(u'Прочитано %d занятий.' % len(self.lessons))\n return self.lessons\n\n def parse_line(self, row):\n \"\"\"Парсим строку файла\"\"\"\n self.index = 0 if self.index is None else self.index + 1\n if self.start_index is None:\n self.start_index = self.get_header_index(row)\n\n if self.start_index is None or self.index <= self.start_index: # пропускаем все что выше шапки\n return []\n\n if self.index-self.start_index == 1:# строка с названиями групп\n self.get_groups(row)\n return []\n\n if self.get_weeks(row): return []# строка с описание недель\n self.last_row_day = (lambda d: d if not d is None else self.last_row_day )(self.get_week_day_id(row))\n self.last_row_time = (lambda t: t if not t is None else self.last_row_time )(self.get_time(row))\n\n return self.parse_line_lessons(row, self.last_row_day, self.last_row_time)\n\n\n\n def get_group_id_by_name(self, name, exact_match=False):\n \"\"\"\n Возвращает индекс г��уппы по названию, если\n :param name: название группы\n :param exact_match: точное совпадение имени\n :return:\n \"\"\"\n if exact_match:\n if name in self.groups:\n return self.groups.index(name)\n else:\n for i in range(len(self.groups)):\n if name in self.groups[i]:\n return (i, self.groups[i])\n return None\n\nif __name__ == '__main__':\n c = CsvLessons(path=CSV_FILES[0], skip_empty_line=False)\n print(u'|' + '-' * 146 + '|')\n print(u'|%s|%s|%s|%s|%s|' % (\n u'Группа'.center(10, ' '),\n u'День недели'.center(15, ' '),\n u'Время'.center(15, ' '),\n u'Дисциплина'.center(72, ' '),\n u'Числитель/знаменатель'.center(30, ' '),\n ))\n print(u'|'+'-'*146+'|')\n k = 0\n for l in c.lessons:\n if l.group != 1: continue #только группа 2121-ДБ\n if l.week == WEEK_TOP: w=u'ЧИСЛИТЕЛЬ'\n elif l.week == WEEK_BOTTOM: w=u'ЗНАМЕНАТЕЛЬ'\n else: w=u'ВСЕГДА'\n print(u'|%s|%s|%s| %s|%s|' % (\n c.groups[l.group].center(10, ' '),\n DAYS[l.week_day-1].center(15, ' '),\n str(u'%s:%s-%s:%s' % (\n str(l.time.start.hour).rjust(2, '0'),\n str(l.time.start.minute).rjust(2, '0'),\n str(l.time.end.hour).rjust(2, '0'),\n str(l.time.end.minute).rjust(2, '0'),\n )).center(15, ' '),\n l.discipline.ljust(70, ' '),\n w.center(30, ' '),\n\n\n ))\n k+=1\n print(u'|' + '-' * 146 + '|')\n print(u'| ВСЕГО: |' + (u' %d |' % k).rjust(136, ' '))\n print(u'|' + '-' * 146 + '|')\n\n # print(c.get_lessons_by_group(group_id=c.get_group_id_by_name('02121-ДБ',True)))\n # DEBUG_DATE = datetime.datetime.now()\n # DEBUG_DATE=DEBUG_DATE-datetime.timedelta(days=6)\n # print(DEBUG_DATE, DEBUG_DATE.weekday()+1)\n\n # ll = c.get_lessons_by_group(group_id=c.get_group_id_by_name('02121-ДБ',True), by_date=DEBUG_DATE)\n # for l in ll:\n # print('group:%d time:[%s-%s] week:%d day:%d' % (l.group, l.time.start, l.time.end, l.week, l.week_day) )\n\n","sub_path":"csvtools.py","file_name":"csvtools.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167104928","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass DataQualityOperator(BaseOperator):\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"redshift\",\n tests=[\"\"],\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.tests = tests\n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n\n for test in self.tests:\n query = test.get('test')\n self.log.info('testing {}'.format(query))\n result = redshift.get_first(query)[0]\n if result != test.get('exp_result'):\n raise ValueError(\"failed test query {}\".format(query))\n","sub_path":"airflow/plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"62583770","text":"import seqalign\nfrom sys import argv\nwords = {}\nwith open('cleaned_definitions.csv','r') as f:\n for i in f.readlines():\n i=i.split(\"|\")\n syllables = i[0]\n i[0] = i[0].replace(u\"·\",u\"\").lstrip().rstrip()\n words[i[0]] = (syllables,i[2])\nprint(len(words))\n\n\nimport nltk\nprondict = nltk.corpus.cmudict.dict()\n\ndef clean(string):\n return string.replace(\"ˈ\",\"\").replace(\"-\",\"\").replace(\"ˌ\",\"\").rstrip().lstrip().replace(\" \",\"\")\n\noverlap=[]\nfor eachWord in words:\n if eachWord in prondict and words[eachWord]!=\"\":\n overlap.append(eachWord)\n\n\ntest=overlap[0]\nprint(test,prondict[test],words[test])\n\n\nsound_mapping = {}\nfor eachWord in overlap:\n pron = clean(words[eachWord][1])\n cmupron = prondict[eachWord][0]\n if len(cmupron)!=len(pron): continue\n for eachSound in range(0,len(pron)):\n try:\n sound_mapping[cmupron[eachSound]][pron[eachSound]]+=1\n except:\n sound_mapping[cmupron[eachSound]] = {pron[eachSound]:1}\n\nprint (sound_mapping.keys())\n\n\ndef translate(seq,mapping):\n newseq = []\n for i in seq:\n try:\n mapMe = mapping[i]\n mapMe = mapMe.keys()[0]\n newseq.append(mapMe)\n except:\n newseq.append(i)\n return newseq\n\nfor eachWord in overlap:\n pron = clean(words[eachWord][1]).split(\",\")[0]\n cmupron = [i.lower() for i in prondict[eachWord][0]]\n translated = translate(pron,sound_mapping)\n\n print (\"SEQ ALIGN\")\n seqalign.main(translated,cmupron,match=2,mismatch=-1,gap=-1)\n exit(1)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106399595","text":"# código para criar uma base randomica povoada\n# names é um modulo com 40 nomes randomicos\n\nimport names\nimport random\n# conexão com o banco\nfrom cassandra.cluster import Cluster\n\ncluster = Cluster()\n# criar o banco antes\nsession = cluster.connect('aula')\n\n\n# funções de criação\n\ndef create_table():\n # dropa a tabela\n session.execute(\"\"\"drop table users\"\"\")\n\n # cria a tabela novamente sem dados\n session.execute(\n \"\"\"\n CREATE TABLE users (\n id uuid PRIMARY KEY,\n lastname text,\n age text,\n city text,\n email text,\n firstname text);\n \"\"\"\n )\n\n\n# insere as linhas\ndef generate(qtd):\n # limpa a tabela\n create_table()\n pessoas = qtd\n for pessoa in range(pessoas):\n # insere os usuarios de teste\n idade = random.randint(10, 99)\n cidade = random.randint(111111, 999999)\n session.execute(\"\"\"\n INSERT INTO users (id, lastname, age, city, email, firstname)\n VALUES (uuid(), '{0}', '{1}', '{2}', '{0}@gmail.com', '{3}');\n \"\"\".format(names.get_last_name(), idade, cidade, names.get_first_name()))\n\n\n# Funções de seleção\n\n# seleciona as linhas do banco\ndef select_all():\n # executa os selects de teste\n users = session.execute(\"select * from users\")\n print(\n # firula pra criar um cabeçalho pra tabela\n \"First Name\" + \" \" * 4 + \"Last Name\" + \" \" * 5 + \"Age\" + \" \" * 2 + \"Cidade\" + \" \" * 3 + \"Email\" + \" \" * 21 + \"Id\")\n print(\"-\" * 104)\n for user in users:\n # ljust insere os espaços à esquerda\n print(user.firstname.ljust(13), user.lastname.ljust(13),\n user.age.ljust(4), user.city.ljust(8), user.email.lower().ljust(25),\n user.id)\n\n\n# Funções de edição\n\n# edita uma linha inteira filtrando pelo ultimo nome, informando a coluna que será modificada e o novo valor\ndef edit(last_name, coluna, valor):\n pessoa = session.execute(\"select * from users where lastname='{0}' allow filtering\".format(last_name))\n for dados in pessoa:\n id = dados.id\n session.execute(\"update users set {1} = '{2}' where id = {0}\".format(id, coluna, valor))\n print(\"\")\n\n\n# edita o valor da idade informado o id da pessoa e o novo valor\ndef edit_age(id, valor):\n pessoa = session.execute(\"select * from users where id={0} allow filtering\".format(id))\n for dados in pessoa:\n id = dados.id\n session.execute(\"update users set age = '{1}' where id = {0}\".format(id, valor))\n select_all()\n print(\"\")\n\n\n# edita o ultimo nome informado o id da pessoa e a correção\ndef edit_first_name(id, firstname):\n pessoa = session.execute(\"select * from users where id={0} allow filtering\".format(id))\n for dados in pessoa:\n id = dados.id\n session.execute(\"update users set firstname = '{1}' where id = {0}\".format(id, firstname))\n select_all()\n print(\"\")\n\n\n# edita o ultimo nome informado o id da pessoa e a correção\ndef edit_last_name(id, lastname):\n pessoa = session.execute(\"select * from users where id={0} allow filtering\".format(id))\n for dados in pessoa:\n id = dados.id\n session.execute(\"update users set lastname = '{1}' where id = {0}\".format(id, lastname))\n session.execute(\"update users set email = '{1}@gmail.com' where id = {0}\".format(id, lastname))\n select_all()\n print(\"\")\n\n\n# edita o nome inteiro informado o id da pessoa e a correção\ndef edit_all_name(id, nome):\n nome = nome.partition(\" \")\n firstname = nome[0]\n lastname = nome[2]\n pessoa = session.execute(\"select * from users where id={0} allow filtering\".format(id))\n for dados in pessoa:\n id = dados.id\n session.execute(\"update users set firstname = '{1}' where id = {0}\".format(id, firstname))\n session.execute(\"update users set lastname = '{1}' where id = {0}\".format(id, lastname))\n session.execute(\"update users set email = '{1}@gmail.com' where id = {0}\".format(id, lastname))\n select_all()\n print(\"\")\n\n\n# edita o email informado o id da pessoa e a correção\ndef edit_email(id, mail):\n pessoa = session.execute(\"select * from users where id={0} allow filtering\".format(id))\n for dados in pessoa:\n id = dados.id\n session.execute(\"update users set email = '{1}' where id = {0}\".format(id, mail))\n select_all()\n print(\"\")\n","sub_path":"playlist_casandra/Users/random_users.py","file_name":"random_users.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317303763","text":"import os\nimport wave\nimport struct\n\n\ndef wav_read(fname):\n \"\"\"\n Read a wave file. This will always convert to mono.\n\n Arguments:\n * fname: a string containing a file name of a WAV file.\n\n Returns a tuple with 2 elements:\n * a Python list with floats in the range [-1, 1] representing samples.\n the length of this list will be the number of samples in the given wave\n file.\n * an integer containing the sample rate\n \"\"\"\n f = wave.open(fname, 'r')\n chan, bd, sr, count, _, _ = f.getparams()\n\n assert bd == 2, \"bit depth must be 16\"\n\n data = []\n for i in range(count):\n frame = f.readframes(1)\n if chan == 2:\n l = struct.unpack(' List[Iterable[Any]]:\n \"Return first n items of the iterable as a list\"\n return list(itertools.islice(iterable, n))\n\n\np(\"TAKE 1\")\nprint(take(5, (x for x in [1, 2, 3])))\n\np(\"SLICED\")\nsliced = itertools.islice((x for x in [1, 2, 3]), 5)\nprint(type(sliced))\nprint(list(sliced))\n","sub_path":"bare_python/s05_03_take_iter.py","file_name":"s05_03_take_iter.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73181116","text":"from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.flatpages import views as flat_views\n\nfrom . import views\n\nurlpatterns = [\n ### App URLs\n url(r'^home/?$', views.home),\n url(r'^$', views.home),\n url(r'^app/?$', views.app, name=\"app\"),\n # url(r'^sandbox/$', views.sandbox, name=\"sandbox\"),\n # url(r'^sandbox/json/(?P[\\w_]+)', views.sandbox_json, name=\"sandbox_json\"),\n url(r'^help/$', flat_views.flatpage, {'url': '/help/'}, name=\"help\"),\n url(r'^methods/$', flat_views.flatpage, {'url': '/methods/'}, name='methods'),\n url(r'^thankyou/$', flat_views.flatpage, {'url': '/thankyou/'}, name='thankyou'),\n\n ### API urls\n url(r'^get_veg_unit_by_bbox', views.get_veg_unit_by_bbox),\n # url(r'^get_segment_by_bbox', views.get_segment_by_bbox), # suspected ununsed RDH 7/31/2018\n # url(r'^get_segment_by_id', views.get_segment_by_id),\n # url(r'^segment/(?P[\\w_]+)', views.get_segment_by_id), # suspected unused RDH 7/31/2018\n url(r'^pourpoint/(?P[\\w_]+)', views.get_pourpoint_by_id),\n # url(r'^filter_results', views.filter_results), # suspected ununsed RDH 7/31/2018\n url(r'^get_results_by_scenario_id', views.get_results_by_scenario_id),\n url(r'^get_status_by_scenario_id', views.get_status_by_scenario_id),\n url(r'^get_downstream_pour_points', views.get_downstream_pour_points),\n url(r'^get_hydro_results_by_pour_point_id', views.get_hydro_results_by_pour_point_id),\n url(r'^get_results_by_state', views.get_results_by_state),\n url(r'^get_focus_area_at', views.get_focus_area_at),\n url(r'^get_focus_area', views.get_focus_area),\n url(r'^get_basin', views.get_basin),\n url(r'^save_drawing', views.save_drawing),\n url(r'^upload_treatment_shapefile/', views.upload_treatment_shapefile, name='shp_upload'),\n url(r'^create_treatment_areas/', views.create_treatment_areas),\n url(r'^claim_treatment_area/', views.claim_treatment_area),\n url(r'^login_check/', views.login_check),\n\n ### Filter/Scenarios Work\n url(r'get_scenarios/(?P[\\w_]+)/$', views.get_scenarios),\n url(r'get_scenarios$', views.get_scenarios),\n url(r'get_scenarios/$', views.get_scenarios),\n url(r'get_planningunits$', views.get_planningunits),\n\n url(r'^get_user_scenario_list/$', views.get_user_scenario_list),\n\n url(r'^set_treatment_prescriptions/$', views.set_treatment_prescriptions),\n\n ### end API urls\n url(r'^', views.index, name='index'),\n]\n","sub_path":"ucsrb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438603674","text":"import datetime, textwrap\nfrom sqlalchemy import Column, Integer, String, DateTime, func, desc, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom calendar import monthrange\nfrom . import Base\n\nMAX_INFO_LENGTH = 45\nMAX_MSG_LENGTH = 65\n\nclass Post(Base):\n __tablename__ = 'posts'\n\n id = Column(Integer, primary_key=True)\n users = relationship('PostUser', back_populates='post')\n values = relationship('PostValue', back_populates='post')\n poster = Column(String(64), nullable=False)\n text = Column(String(500), nullable=False)\n posted_at = Column(DateTime, nullable=False)\n slack_timestamp = Column(String(64), nullable=False)\n slack_channel = Column(String(64), nullable=False)\n\n def __init__(self, poster, text, slack_timestamp, slack_channel, posted_at=None):\n self.poster = poster\n self.text = text\n self.posted_at = posted_at or datetime.datetime.now()\n self.slack_timestamp = slack_timestamp\n self.slack_channel = slack_channel\n\n def post_url(self, slack):\n channel = slack.get_channel_name(self.slack_channel)\n\n if channel:\n ts = self.slack_timestamp.replace(\".\", \"\")\n return \"https://50onred.slack.com/archives/{}/p{}\".format(channel, ts)\n else:\n return \"(Private Message or Group)\"\n\n @property\n def users_value_info_for_table(self):\n users = map(lambda u: u.formatted_name, self.users)\n users = \", \".join(users)\n\n values = map(lambda v: v.value, self.values)\n values = \", \".join(values)\n\n text = \"@{} -> {} for {}\".format(self.poster, users, values)\n return textwrap.fill(text, MAX_INFO_LENGTH)\n\n def message_info_for_table(self, slack):\n if not isinstance(self.text, unicode):\n text = unicode(self.text, \"ISO-8859-1\")\n else:\n text = self.text\n\n def replace_usernames(token):\n if token.startswith(\"<@\"):\n last_chars = token[12:0]\n user_id = token[:12].strip(\"@<>\")\n user_name = slack.get_user_name(user_id)\n token = u\"@{}{}\".format(user_name, last_chars)\n\n return token\n\n text_tokens = map(replace_usernames, text.split())\n text = textwrap.fill(u\" \".join(text_tokens), MAX_MSG_LENGTH)\n return u\"{}\\n{}\".format(text, self.post_url(slack))\n\n @property\n def posted_at_formatted(self):\n return self.posted_at.strftime('%B %d %Y %I:%M %p')\n\n @classmethod\n def posts_by_user(cls, session, user, date, month, year):\n query = session.query(Post).join(Post.users).filter(PostUser.user == user)\n\n if date or month:\n dates = _get_date_range(date, month, year)\n query = query.filter(Post.posted_at >= dates[0], Post.posted_at <= dates[1])\n\n return query\n\n @classmethod\n def posts_by_value(cls, session, value, date, month, year):\n query = session.query(cls)\n\n if (value and value != \"all\"):\n query = query.join(Post.values).filter(PostValue.value == value)\n\n if date or month:\n dates = _get_date_range(date, month, year)\n query = query.filter(Post.posted_at >= dates[0], Post.posted_at <= dates[1])\n\n return query\n\n @classmethod\n def leaders_by_value(cls, session, value, date, month, year):\n query = session.query(PostUser.user, func.count(PostUser.id).label('user_occurence')\n ).group_by(PostUser.user\n ).order_by(desc('user_occurence'))\n\n if value and value != \"all\":\n query = query.join(\"post\").join(Post.values).filter(PostValue.value == value)\n\n if date or month:\n dates = _get_date_range(date, month, year)\n query = query.join(\"post\").filter(Post.posted_at >= dates[0], Post.posted_at <= dates[1])\n\n return query\n\nclass PostUser(Base):\n __tablename__ = 'post_users'\n\n id = Column(Integer, primary_key=True)\n user = Column(String, nullable=False)\n post_id = Column(Integer, ForeignKey('posts.id'))\n\n post = relationship('Post', back_populates='users')\n\n def __init__(self, user):\n self.user = user\n\n @property\n def formatted_name(self):\n return \"@{}\".format(self.user)\n\nclass PostValue(Base):\n __tablename__ = 'post_values'\n\n id = Column(Integer, primary_key=True)\n value = Column(String, nullable=False)\n post_id = Column(Integer, ForeignKey('posts.id'))\n\n post = relationship('Post', back_populates='values')\n\n def __init__(self, value):\n self.value = value\n\ndef _get_date_range(date, month, year):\n start, end = None, None\n\n if date:\n start = datetime.datetime(year, month, date)\n end = datetime.datetime(year, month, date, 23, 59, 59)\n else:\n start = datetime.datetime(year, month, 1)\n days_in_month = monthrange(year, month)[1]\n end = datetime.datetime(year, month, days_in_month, 23, 59, 59)\n\n return (start, end)\n","sub_path":"db/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139184875","text":"import os\nimport logging\nimport traceback\nimport unittest\nimport tempfile\n\n#from pbcore.util.Process import backticks\nfrom base_test_case import PACKAGE_DATA_DIR, ROOT_DATA_DIR, run_backticks\nfrom base_test_case import BaseTestCase\nfrom pbinternal2.util.range import Range, Ranges, OverlappingRanges\n\nlog = logging.getLogger(__name__)\n\nclass TestPBRange(BaseTestCase):\n \"\"\"Unit and integrationt tests for the Missing Adapter Report class and \\\n associated module functions\"\"\"\n\n def setUp(self):\n \"\"\"\n Before *every* test\n \"\"\"\n try:\n BaseTestCase.setUp(self)\n except Exception as err:\n log.error(err)\n tb = traceback.format_exc()\n log.error(tb)\n raise\n log.debug(\"In setUp()\")\n self.output = tempfile.mkdtemp(suffix=\"range_tests\")\n\n def tearDown(self):\n \"\"\"\n After *every* test\n \"\"\"\n try:\n BaseTestCase.tearDown(self)\n except Exception as err:\n log.error(err)\n tb = traceback.format_exc()\n log.error(tb)\n raise\n\n def test_range(self):\n r1 = Range(5, 10)\n r2 = Range(10, 15)\n r3 = Range(7, 15)\n r4 = Range(5, 10)\n self.assertTrue(r1.contains(5))\n self.assertFalse(r1.contains(10))\n self.assertFalse(r1.intersects(r2))\n self.assertTrue(r1.intersects(r3))\n self.assertTrue(Range(0, 0) == r1.intersect(r2))\n self.assertTrue(Range(7, 10) == r1.intersect(r3))\n self.assertTrue(r1 == r4)\n\n def test_ranges(self):\n r = Ranges()\n r.add_range(Range(1, 3))\n r.add_range(Range(9, 12))\n self.assertEqual(\"Rs {[1,3) [9,12)}\", str(r))\n r.add_range(Range(2, 5))\n self.assertEqual(\"Rs {[1,5) [9,12)}\", str(r))\n r.add_range(Range(1, 12))\n self.assertEqual(\"Rs {[1,12)}\", str(r))\n r.add_range(Range(14, 15))\n self.assertEqual(\"Rs {[1,12) [14,15)}\", str(r))\n r.add_range(Range(20, 25))\n self.assertEqual(\"Rs {[1,12) [14,15) [20,25)}\", str(r))\n r.add_range(Range(11, 22))\n self.assertEqual(\"Rs {[1,25)}\", str(r))\n\n r1 = Range(5, 10)\n r.remove_range(r1)\n self.assertEqual(\"Rs {[1,5) [10,25)}\", str(r))\n r5 = Range(3, 12)\n r.remove_range(r5)\n self.assertEqual(\"Rs {[1,3) [12,25)}\", str(r))\n r.remove_range(Range(-1, 3))\n self.assertEqual(\"Rs {[12,25)}\", str(r))\n r.remove_range(Range(1, 3))\n self.assertEqual(\"Rs {[12,25)}\", str(r))\n r.remove_range(Range(1, 25))\n self.assertEqual(\"Rs {}\", str(r))\n\n r5 = Ranges()\n r5.add_range(Range(1, 25))\n r5.add_range(Range(27, 29))\n r5.add_range(Range(35, 40))\n r6 = Ranges()\n r6.add_range(Range(2, 5))\n r6.add_range(Range(20, 30))\n r6.add_range(Range(42, 45))\n r5.merge(r6)\n self.assertEqual(\"Rs {[1,30) [35,40) [42,45)}\", str(r5))\n\n def test_overlapping_ranges(self):\n r = OverlappingRanges()\n r1 = Range(0, 15)\n r2 = Range(1, 3)\n r3 = Range(9, 12)\n r.add_range(r1)\n r.add_range(r2)\n r.add_range(r3)\n self.assertEqual(\"Rs {[0,15) [1,3) [9,12)}\", str(r))\n\n query_ranges = [Range(0, 1), Range(2, 4), Range(13, 15), Range(15, 17)]\n answers = [[r1, r2], [r1, r2], [r1], []]\n for q_range, expected in zip(query_ranges, answers):\n for actual, exp in zip(r.overlapping_ranges(q_range), expected):\n self.assertTrue(actual == exp)\n","sub_path":"tests/test_pbinternal2_range.py","file_name":"test_pbinternal2_range.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59456337","text":"# Copyright (c) 2011 Bastian Venthur\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\nPython library for the AR.Drone.\n\nV.1 Tested with Python 2.6.6 and AR.Drone vanilla firmware 1.5.1.\nV.2 Tested with Python 2.7, Python 3.5 and vanilla firmware 2.0\n\"\"\"\n\nimport time\nimport socket\nimport struct\nimport sys\nimport threading\nimport multiprocessing\nimport numpy as np\n\nfrom .arnetwork import (\n ARDroneNetworkProcess,\n ARDRONE_COMMAND_ADDR,\n NAVDATA_KEYS,\n)\n\n\n__author__ = \"Bastian Venthur\"\n\n\nSESSION_ID = \"943dac23\"\nUSER_ID = \"36355d78\"\nAPP_ID = \"21d958e4\"\n\n\nAVAILABLE_COMMANDS = [\n \"emergency\",\n \"land\",\n \"takeoff\",\n \"move_left\",\n \"move_right\",\n \"move_down\",\n \"move_up\",\n \"move_backward\",\n \"move_forward\",\n \"turn_left\",\n \"turn_right\",\n \"hover\",\n]\n\n\nclass ARDrone(object):\n \"\"\"ARDrone Class.\n\n Instanciate this class to control your drone and receive decoded video and\n navdata.\n Possible value for video codec (drone2):\n NULL_CODEC = 0,\n UVLC_CODEC = 0x20, // codec_type value is used for START_CODE\n P264_CODEC = 0x40,\n MP4_360P_CODEC = 0x80,\n H264_360P_CODEC = 0x81,\n MP4_360P_H264_720P_CODEC = 0x82,\n H264_720P_CODEC = 0x83,\n MP4_360P_SLRS_CODEC = 0x84,\n H264_360P_SLRS_CODEC = 0x85,\n H264_720P_SLRS_CODEC = 0x86,\n H264_AUTO_RESIZE_CODEC = 0x87, // resolution is automatically adjusted\n MP4_360P_H264_360P_CODEC = 0x88,\n \"\"\"\n\n def __init__(self, is_ar_drone_2=False, hd=False, use_video=True):\n \"\"\"Initialize the AR Drone, with appropriate options flags.\"\"\"\n self.use_video = use_video\n self.seq_nr = 1\n self.timer_t = 0.2\n self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)\n self.lock = threading.Lock()\n self.speed = 0.2\n\n time.sleep(0.5)\n self.config_ids_string = [SESSION_ID, USER_ID, APP_ID]\n self.configure_multisession(\n SESSION_ID,\n USER_ID,\n APP_ID,\n self.config_ids_string,\n )\n self.set_session_id(self.config_ids_string, SESSION_ID)\n time.sleep(0.5)\n self.set_profile_id(self.config_ids_string, USER_ID)\n time.sleep(0.5)\n self.set_app_id(self.config_ids_string, APP_ID)\n time.sleep(0.5)\n self.set_video_bitrate_control_mode(self.config_ids_string, \"1\")\n time.sleep(0.5)\n self.set_video_bitrate(self.config_ids_string, \"10000\")\n time.sleep(0.5)\n self.set_max_bitrate(self.config_ids_string, \"10000\")\n time.sleep(0.5)\n self.set_fps(self.config_ids_string, \"30\")\n time.sleep(0.5)\n self.hd = hd\n if self.hd:\n self.image_shape = (720, 1280, 3)\n self.set_video_codec(self.config_ids_string, 0x83)\n else:\n self.image_shape = (360, 640, 3)\n self.set_video_codec(self.config_ids_string, 0x81)\n\n self.last_command_is_hovering = True\n self.com_pipe, com_pipe_other = multiprocessing.Pipe()\n\n self.navdata = {0: {key: 0 for key in NAVDATA_KEYS}}\n\n self.network_process = ARDroneNetworkProcess(\n com_pipe_other,\n is_ar_drone_2,\n self,\n use_video=use_video,\n )\n self.network_process.start()\n\n self.image = np.zeros(self.image_shape, np.uint8)\n self.time = 0\n self.last_command_is_hovering = True\n\n time.sleep(1.0)\n\n self.at(at_config_ids, self.config_ids_string)\n self.at(at_config, \"general:navdata_demo\", \"TRUE\")\n\n def takeoff(self):\n \"\"\"Make the drone takeoff.\"\"\"\n self.at(at_ftrim)\n self.at(at_config, \"control:altitude_max\", \"20000\")\n self.at(at_ref, True)\n\n def land(self):\n \"\"\"Make the drone land.\"\"\"\n self.at(at_ref, False)\n\n def hover(self):\n \"\"\"Make the drone hover.\"\"\"\n self.at(at_pcmd, False, 0, 0, 0, 0)\n\n def move_left(self):\n \"\"\"Make the drone move left.\"\"\"\n self.at(at_pcmd, True, -self.speed, 0, 0, 0)\n\n def move_right(self):\n \"\"\"Make the drone move right.\"\"\"\n self.at(at_pcmd, True, self.speed, 0, 0, 0)\n\n def move_up(self):\n \"\"\"Make the drone rise upwards.\"\"\"\n self.at(at_pcmd, True, 0, 0, self.speed, 0)\n\n def move_down(self):\n \"\"\"Make the drone decent downwards.\"\"\"\n self.at(at_pcmd, True, 0, 0, -self.speed, 0)\n\n def move_forward(self):\n \"\"\"Make the drone move forward.\"\"\"\n self.at(at_pcmd, True, 0, -self.speed, 0, 0)\n\n def move_backward(self):\n \"\"\"Make the drone move backwards.\"\"\"\n self.at(at_pcmd, True, 0, self.speed, 0, 0)\n\n def turn_left(self):\n \"\"\"Make the drone rotate left.\"\"\"\n self.at(at_pcmd, True, 0, 0, 0, -self.speed)\n\n def turn_right(self):\n \"\"\"Make the drone rotate right.\"\"\"\n self.at(at_pcmd, True, 0, 0, 0, self.speed)\n\n def reset(self):\n \"\"\"Toggle the drone's emergency state.\"\"\"\n # Enter emergency mode\n self.at(at_ref, False, True)\n self.at(at_ref, False, False)\n # Leave emergency mode\n self.at(at_ref, False, True)\n\n def trim(self):\n \"\"\"Flat trim the drone.\"\"\"\n self.at(at_ftrim)\n\n def set_speed(self, speed):\n \"\"\"Set the drone's speed.\n\n Valid values are floats from [0..1]\n \"\"\"\n self.speed = speed\n\n def set_camera_view(self, downward):\n \"\"\"Set which video camera is used.\n\n If 'downward' is true, downward camera will be viewed -\n otherwise frontwards.\n \"\"\"\n channel = None\n if downward:\n channel = 0\n else:\n channel = 1\n self.set_video_channel(self.config_ids_string, channel)\n\n def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n self.lock.acquire()\n self.com_watchdog_timer.cancel()\n cmd(self.seq_nr, *args, **kwargs)\n self.seq_nr += 1\n self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)\n self.com_watchdog_timer.start()\n self.lock.release()\n\n def configure_multisession(self, session_id, user_id, app_id, config_ids_string):\n self.at(at_config, \"custom:session_id\", session_id)\n self.at(at_config, \"custom:profile_id\", user_id)\n self.at(at_config, \"custom:application_id\", app_id)\n\n def set_session_id(self, config_ids_string, session_id):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"custom:session_id\", session_id)\n\n def set_profile_id(self, config_ids_string, profile_id):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"custom:profile_id\", profile_id)\n\n def set_app_id(self, config_ids_string, app_id):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"custom:application_id\", app_id)\n\n def set_video_bitrate_control_mode(self, config_ids_string, mode):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"video:bitrate_control_mode\", mode)\n\n def set_video_bitrate(self, config_ids_string, bitrate):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"video:bitrate\", bitrate)\n\n def set_video_channel(self, config_ids_string, channel):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"video:video_channel\", channel)\n\n def set_max_bitrate(self, config_ids_string, max_bitrate):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"video:max_bitrate\", max_bitrate)\n\n def set_fps(self, config_ids_string, fps):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"video:codec_fps\", fps)\n\n def set_video_codec(self, config_ids_string, codec):\n self.at(at_config_ids, config_ids_string)\n self.at(at_config, \"video:video_codec\", codec)\n\n def commwdg(self):\n \"\"\"Communication watchdog signal.\n\n This needs to be send regulary to keep the communication w/ the drone\n alive.\n \"\"\"\n self.at(at_comwdg)\n\n def halt(self):\n \"\"\"Shutdown the drone.\n\n This method does not land or halt the actual drone, but the\n communication with the drone. You should call it at the end of your\n application to close all sockets, pipes, processes and threads related\n with this object.\n \"\"\"\n print(\"Halt Called\")\n self.lock.acquire()\n self.com_watchdog_timer.cancel()\n self.com_pipe.send('die!')\n self.network_process.terminate()\n self.network_process.join()\n self.lock.release()\n\n def get_image(self):\n _im = np.copy(self.image)\n return _im\n\n def get_navdata(self):\n return self.navdata\n\n def set_navdata(self, navdata):\n self.navdata = navdata\n self.get_navdata()\n\n def set_image(self, image):\n if (image.shape == self.image_shape):\n self.image = image\n self.image = image\n\n def apply_command(self, command):\n if command == 'emergency':\n command = 'reset'\n\n try:\n getattr(self, command)()\n except AttributeError:\n print(\"Command %s is not a recognized command\" % command)\n if any((command == \"hover\" and not self.last_command_is_hovering,\n command in ('land', 'takeoff'))):\n self.last_command_is_hovering = True\n else:\n self.last_command_is_hovering = False\n\n\nclass ARDrone2(ARDrone):\n def __init__(self, hd=False, use_video=True):\n ARDrone.__init__(self, True, hd, use_video=use_video)\n\n###############################################################################\n# Low level AT Commands\n###############################################################################\n\n\ndef at_ref(seq, takeoff, emergency=False):\n \"\"\"\n Basic behaviour of the drone: take-off/landing, emergency stop/reset).\n\n Parameters:\n seq -- sequence number\n takeoff -- True: Takeoff / False: Land\n emergency -- True: Turn off the engines\n \"\"\"\n p = 0b10001010101000000000000000000\n if takeoff:\n p += 0b1000000000\n if emergency:\n p += 0b0100000000\n at(\"REF\", seq, [p])\n\n\ndef at_pcmd(seq, progressive, lr, fb, vv, va):\n \"\"\"\n Make the drone move (translate/rotate).\n\n Parameters:\n seq -- sequence number\n progressive -- True: enable progressive commands, False: disable (i.e.\n enable hovering mode)\n lr -- left-right tilt: float [-1..1] negative: left, positive: right\n rb -- front-back tilt: float [-1..1] negative: forwards, positive:\n backwards\n vv -- vertical speed: float [-1..1] negative: go down, positive: rise\n va -- angular speed: float [-1..1] negative: spin left, positive: spin\n right\n\n The above float values are a percentage of the maximum speed.\n \"\"\"\n p = 1 if progressive else 0\n at(\"PCMD\", seq, [p, float(lr), float(fb), float(vv), float(va)])\n\n\ndef at_ftrim(seq):\n \"\"\"\n Tell the drone it's lying horizontally.\n\n Parameters:\n seq -- sequence number\n \"\"\"\n at(\"FTRIM\", seq, [])\n\n\ndef at_zap(seq, stream):\n \"\"\"\n Select which video stream to send on the video UDP port.\n\n Parameters:\n seq -- sequence number\n stream -- Integer: video stream to broadcast\n \"\"\"\n # FIXME: improve parameters to select the modes directly\n at(\"ZAP\", seq, [stream])\n\n\ndef at_config(seq, option, value):\n \"\"\"Set configuration parameters of the drone.\"\"\"\n at(\"CONFIG\", seq, [str(option), str(value)])\n\n\ndef at_config_ids(seq, value):\n \"\"\"Set configuration parameters of the drone.\"\"\"\n at(\"CONFIG_IDS\", seq, value)\n\n\ndef at_ctrl(seq, num):\n \"\"\"Ask the parrot to drop its configuration file\"\"\"\n at(\"CTRL\", seq, [num, 0])\n\n\ndef at_comwdg(seq):\n \"\"\"\n Reset communication watchdog.\n \"\"\"\n # FIXME: no sequence number\n at(\"COMWDG\", seq, [])\n\n\ndef at_aflight(seq, flag):\n \"\"\"\n Makes the drone fly autonomously.\n\n Parameters:\n seq -- sequence number\n flag -- Integer: 1: start flight, 0: stop flight\n \"\"\"\n at(\"AFLIGHT\", seq, [flag])\n\n\ndef at_pwm(seq, m1, m2, m3, m4):\n \"\"\"\n Sends control values directly to the engines, overriding control loops.\n\n Parameters:\n seq -- sequence number\n m1 -- front left command\n m2 -- fright right command\n m3 -- back right command\n m4 -- back left command\n \"\"\"\n # FIXME: what type do mx have?\n raise NotImplementedError()\n\n\ndef at_led(seq, anim, f, d):\n \"\"\"\n Control the drones LED.\n\n Parameters:\n seq -- sequence number\n anim -- Integer: animation to play\n f -- ?: frequence in HZ of the animation\n d -- Integer: total duration in seconds of the animation\n \"\"\"\n pass\n\n\ndef at_anim(seq, anim, d):\n \"\"\"\n Makes the drone execute a predefined movement (animation).\n\n Parameters:\n seq -- sequcence number\n anim -- Integer: animation to play\n d -- Integer: total duration in sections of the animation\n \"\"\"\n at(\"ANIM\", seq, [anim, d])\n\n\ndef at(command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n param_str = ''\n for p in params:\n if type(p) == int:\n param_str += \",%d\" % p\n elif type(p) == float:\n param_str += \",%d\" % f2i(p)\n elif type(p) == str:\n param_str += ',\"' + p + '\"'\n msg = \"AT*%s=%i%s\\r\" % (command, seq, param_str)\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(\"utf-8\"), ARDRONE_COMMAND_ADDR)\n\n\ndef f2i(f):\n \"\"\"Interpret IEEE-754 floating-point value as signed integer.\n\n Arguments:\n f -- floating point value\n \"\"\"\n return struct.unpack('i', struct.pack('f', f))[0]\n\n\nif __name__ == \"__main__\":\n '''\n For testing purpose only\n '''\n import termios\n import fcntl\n import os\n\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n drone = ARDrone(is_ar_drone_2=True)\n\n import cv2\n try:\n startvideo = True\n video_waiting = False\n while 1:\n time.sleep(.0001)\n if startvideo:\n try:\n cv2.imshow(\"Drone camera\", cv2.cvtColor(drone.get_image(), cv2.COLOR_BGR2RGB))\n cv2.waitKey(1)\n except:\n if not video_waiting:\n print(\"Video will display when ready\")\n video_waiting = True\n pass\n\n try:\n c = sys.stdin.read(1)\n c = c.lower()\n print(\"Got character\", c)\n if c == 'a':\n drone.move_left()\n if c == 'd':\n drone.move_right()\n if c == 'w':\n drone.move_forward()\n if c == 's':\n drone.move_backward()\n if c == ' ':\n drone.land()\n if c == '\\n':\n drone.takeoff()\n if c == 'q':\n drone.turn_left()\n if c == 'e':\n drone.turn_right()\n if c == '1':\n drone.move_up()\n if c == '2':\n drone.hover()\n if c == '3':\n drone.move_down()\n if c == 't':\n drone.reset()\n if c == 'x':\n drone.hover()\n if c == 'y':\n drone.trim()\n if c == 'i':\n startvideo = True\n try:\n navdata = drone.get_navdata()\n\n print('Emergency landing =', navdata['drone_state']['emergency_mask'])\n print('User emergency landing = ', navdata['drone_state']['user_el'])\n print('Navdata type= ', navdata['drone_state']['navdata_demo_mask'])\n print('Altitude= ', navdata[0]['altitude'])\n print('video enable= ', navdata['drone_state']['video_mask'])\n print('vision enable= ', navdata['drone_state']['vision_mask'])\n print('command_mask= ', navdata['drone_state']['command_mask'])\n except:\n pass\n\n if c == 'j':\n print(\"Asking for configuration...\")\n drone.at(at_ctrl, 5)\n time.sleep(0.5)\n drone.at(at_ctrl, 4)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)\n drone.halt()\n","sub_path":"adetaylor_api/libardrone/libardrone.py","file_name":"libardrone.py","file_ext":"py","file_size_in_byte":18370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382215858","text":"# -*- coding:utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom dateutil import relativedelta\n\n\nclass HrAgence(models.Model):\n _name = 'hr.agence'\n _description = \"hr agence\"\n\n name = fields.Char('Libellé', required=True)\n description = fields.Text('Description')\n\n\nclass VisiteMedical(models.Model):\n _name = 'hr.visit.medical'\n _description = \"Gestion des visites medicals\"\n\n name = fields.Char('Libellé', required=True)\n date_prevue = fields.Date(\"Date prévue\", required=True)\n date_effective = fields.Date(\"Date éffective\", required=True)\n description = fields.Text(\"Commentaire\")\n lieu_visite = fields.Char(\"Lieu de la visite\", required=True)\n employee_id = fields.Many2one('hr.employee', \"Employé\", required=False)\n\n\nclass HrEmployee(models.Model):\n _inherit = 'hr.employee'\n\n @api.depends(\"date_return_last_holidays\", \"start_date\")\n def _get_estimed_holidays(self):\n today = fields.Datetime.now()\n for emp in self:\n facteur = emp.company_id.number_holidays_locaux if emp.nature_employe == 'local' \\\n else emp.company_id.number_holidays_expat\n emp.date_return_last_holidays = False\n if emp.date_return_last_holidays:\n start = fields.Date.from_string(emp.date_return_last_holidays)\n vals = {\n 'estimed_date_leave': False,\n 'number_days_estimed_holidays': 0,\n 'estimated_date_return_leave': False\n }\n if emp.date_return_last_holidays == emp.start_date and emp.seniority_employee >= 1:\n vals['estimed_date_leave'] = fields.Date.from_string(emp.start_date) + \\\n relativedelta.relativedelta(year=today.year)\n else:\n vals['estimed_date_leave'] = fields.Date.from_string(emp.date_return_last_holidays) + \\\n relativedelta.relativedelta(years=+1)\n tmp = vals['estimed_date_leave'] - start\n vals['number_days_estimed_holidays'] = tmp.days * 12 / 360 * facteur\n vals['estimated_date_return_leave'] = vals['estimed_date_leave'] + relativedelta.relativedelta(\n days=+vals['number_days_estimed_holidays'])\n emp.update(vals)\n\n def name_get(self):\n result = []\n for emp in self:\n if emp.first_name:\n name = emp.name + ' ' + emp.first_name\n else:\n name = emp.name\n result.append((emp.id, name))\n return result\n\n medic_exam = fields.Date(string='Medical Examination Date', groups=\"hr.group_hr_user\")\n bank_ids = fields.One2many(\"res.partner.bank\", \"employee_id\", \"Comptes bancaires\")\n main_bank_id = fields.Many2one(\"res.partner.bank\", \"Compte bancaire principale\",\n domain=\"[('employee_id', '=', id)]\")\n dispatch_bank_ids = fields.One2many('hr.employee.salary.dispatched.line', 'employee_id', 'Repartition du salaire')\n motif_fin_contract_id = fields.Many2one('hr.employee.motif.cloture', \"Motif de fin\", required=False)\n motif_depart = fields.Text('Commentaire de depart')\n college = fields.Selection([('cadre', 'Cadre'), ('non_cadre', 'Non cadre')], string=\"Collège\", default=False)\n agence_id = fields.Many2one('hr.agence', 'Agence', required=False)\n type_employee = fields.Selection([('director', 'Directeur'), ('department_chief', 'Chef de departement'),\n ('service_chief', 'chef de service'), ('employee', 'Collaborateur'),\n ('general_director', 'Directeur général'), ('project_manager', 'Chef de projet')],\n \"Type de l'employé\")\n current_leave_state = fields.Selection(\n selection_add=[('direction', 'Directeur'), ('department', 'Chef de departement'),\n ('service', 'Chef de service')])\n date_first_alerte_retraite = fields.Date(\"Date première alerte retraite\")\n date_second_alerte_retraite = fields.Date(\"Date seconde alerte retraite\")\n medic_exam_yearly = fields.Date(\"Visite médicale annuelle\", required=False)\n date_annienete = fields.Date(\"Date d'embauche\", required=False)\n # type_piece_id = fields.Many2one('hr.employee.nature_piece', \"Type de pièce\", required=False)\n # num_piece = fields.Char(\"Numéro de la pièce\", required=False)\n mobile_personnal = fields.Char(\"Tél Portable personnel\")\n gender = fields.Selection([\n ('male', 'M'),\n ('female', 'F'),\n ('other', 'Autre')\n ], groups=\"hr.group_hr_user\", default=\"male\")\n visit_ids = fields.One2many('hr.visit.medical', 'employee_id', \"Visistes médicales\")\n email_personal = fields.Char(\"Email personnel\", required=False)\n estimed_date_leave = fields.Date(\"Date prévisonnelle de depart en congés\", compute=\"_get_estimed_holidays\",\n store=True)\n number_days_estimed_holidays = fields.Integer(\"Nombre de jours de congés estimés\", compute=\"_get_estimed_holidays\",\n store=True)\n estimated_date_return_leave = fields.Date(\"Date prévisonnelle de retour en congés\", compute=\"_get_estimed_holidays\",\n store=True)\n num_cgare = fields.Char(\"N° CGRAE\", required=False)\n num_crrae = fields.Char('N° CRRAE', required=False)\n num_cmu = fields.Char('N° CMU', required=False)\n first_name = fields.Char(\"Prenoms\", required=True)\n\n @api.onchange('end_date')\n @api.depends('end_date')\n def onChangeDateEnd(self):\n if self.company_id and self.end_date:\n first_date = str(fields.Date.from_string(self.end_date) + relativedelta.relativedelta(\n months=- self.company_id.first_alert_retraite))\n second_date = str(fields.Date.from_string(self.end_date) + relativedelta.relativedelta(\n months=- self.company_id.second_alert_retraite))\n self.date_first_alerte_retraite = first_date\n self.date_second_alerte_retraite = second_date\n\n @api.depends(\"enfants_ids\")\n def _compute_children(self):\n for emp in self:\n emp.total_children = len(emp.enfants_ids)\n children_in_charge = 0\n for child in emp.enfants_ids:\n if child.age <= emp.company_id.max_age_child:\n children_in_charge += 1\n else:\n if child.certification_frequentation:\n children_in_charge += 1\n emp.children = children_in_charge\n\n\nclass Department(models.Model):\n _inherit = \"hr.department\"\n _rec_name = 'name'\n\n\nclass ChildEmployee(models.Model):\n _inherit = \"hr.employee.enfant\"\n\n @api.model\n def send_notifcation_certification(self):\n today = fields.Date.from_string(fields.Date.today())\n\n enfants = self.search([])\n\n for eft in enfants:\n if eft.age >= 21:\n # TODO: send notification\n return True\n values = {}\n\n certification_frequentation = fields.Boolean(\"Certificat de frequentation\")\n num_cmu = fields.Char('N° CMU', required=False)\n\n\nclass HrEmployeeMotifCloture(models.Model):\n _name = 'hr.employee.motif.cloture'\n _description = \"hr employe motif cloture\"\n\n name = fields.Char('Libellé', required=True)\n description = fields.Text('Description', required=False)\n\n#\n# class HrEmployeeNaturePiece(models.Model):\n# _name = \"hr.employee.nature_piece\"\n# _description = \"Nature de la piece\"\n#\n# name = fields.Char(\"Libellé\", required=True, size=225)\n# description = fields.Text(\"Description\", required=False)\n","sub_path":"hr_cnce/models/hrEmloyee.py","file_name":"hrEmloyee.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463607626","text":"\"\"\"Room for scheduling and running a \"match\", a series of games between a pair of racers.\"\"\"\n\nimport asyncio\nimport datetime\nfrom typing import Optional, Mapping, Union\n\nimport discord\nimport pytz\n\nfrom necrobot.botbase.necroevent import NEDispatch\nfrom necrobot.botbase.botchannel import BotChannel\nfrom necrobot.config import Config\nfrom necrobot.match import matchdb\nfrom necrobot.match import cmd_match\nfrom necrobot.match.match import Match\nfrom necrobot.match.matchracedata import MatchRaceData\nfrom necrobot.race import cmd_race\nfrom necrobot.race import raceinfo\nfrom necrobot.race.race import Race, RaceEvent\nfrom necrobot.race.raceconfig import RaceConfig\nfrom necrobot.test import cmd_test\nfrom necrobot.user import cmd_user\nfrom necrobot.util import console\nfrom necrobot.util import ordinal\nfrom necrobot.util import timestr\nfrom necrobot.race import racedb\n\n\nclass MatchRoom(BotChannel):\n def __init__(self, match_discord_channel: discord.TextChannel, match: Match):\n \"\"\"BotChannel where a match is taking place.\n \n Parameters\n ----------\n match_discord_channel: discord.Channel\n The discord channel corresponding to this BotChannel.\n match: Match\n The Match object for the match.\n \"\"\"\n BotChannel.__init__(self)\n self._channel = match_discord_channel # type: discord.TextChannel\n self._match = match # type: Match\n\n self._current_race = None # type: Optional[Race]\n self._last_begun_race = None # type: Optional[Race]\n\n self._countdown_to_match_future = None # type: Optional[asyncio.Future]\n\n self._current_race_number = None # type: Optional[int]\n\n self._last_begun_race_number = None # type: Optional[int]\n self._current_race_contested = False # type: bool\n\n self._match_race_data = None # type: Optional[MatchRaceData]\n\n self._prematch_channel_commands = [\n cmd_match.AlertStaff(self),\n cmd_match.Confirm(self),\n cmd_match.GetMatchInfo(self),\n cmd_match.Suggest(self),\n cmd_match.Unconfirm(self),\n cmd_match.ForceBegin(self),\n cmd_match.ForceCancelMatch(self),\n cmd_match.ForceConfirm(self),\n cmd_match.ForceReschedule(self),\n cmd_match.Postpone(self),\n cmd_match.RebootRoom(self),\n cmd_match.SetMatchType(self),\n cmd_match.Update(self),\n\n cmd_race.ChangeRules(self),\n\n cmd_test.TestMatch(self),\n\n cmd_user.UserInfo(self),\n ]\n\n self._during_match_channel_commands = [\n cmd_match.AlertStaff(self),\n cmd_match.CancelRace(self),\n cmd_match.ChangeWinner(self),\n cmd_match.Contest(self),\n cmd_match.ForceCancelMatch(self),\n cmd_match.ForceNewRace(self),\n cmd_match.ForceRecordRace(self),\n cmd_match.GetMatchInfo(self),\n cmd_match.Postpone(self),\n cmd_match.RebootRoom(self),\n cmd_match.SetMatchType(self),\n cmd_match.Update(self),\n\n cmd_race.Ready(self),\n cmd_race.Unready(self),\n cmd_race.Done(self),\n cmd_race.Undone(self),\n cmd_race.Time(self),\n\n cmd_race.Pause(self),\n cmd_race.Unpause(self),\n cmd_race.Reseed(self),\n cmd_race.ChangeRules(self),\n\n cmd_test.TestMatch(self),\n\n cmd_user.UserInfo(self),\n ]\n\n self._postmatch_channel_commands = [\n cmd_match.AlertStaff(self),\n cmd_match.CancelRace(self),\n cmd_match.ChangeWinner(self),\n cmd_match.Contest(self),\n cmd_match.ForceCancelMatch(self),\n cmd_match.ForceCloseRoom(self),\n cmd_match.ForceNewRace(self),\n cmd_match.ForceRecordRace(self),\n cmd_match.GetMatchInfo(self),\n cmd_match.Postpone(self),\n cmd_match.RebootRoom(self),\n cmd_match.SetMatchType(self),\n cmd_match.Update(self),\n\n cmd_race.ChangeRules(self),\n\n cmd_test.TestMatch(self),\n\n cmd_user.UserInfo(self),\n ]\n\n self.channel_commands = self._prematch_channel_commands\n\n# Properties\n @property\n def channel(self) -> discord.TextChannel:\n return self._channel\n\n @property\n def match(self) -> Match:\n return self._match\n\n @property\n def current_race(self) -> Optional[Race]:\n \"\"\"The \"main\" Race; the one that most commands should apply to. Not None if self.before_races is False.\"\"\"\n return self._current_race\n\n @property\n def last_begun_race(self) -> Optional[Race]:\n \"\"\"The last race to begin (sent a RaceEvent.RACE_BEGIN to this room). Useful for allowing commands to apply\n to a finished race during the ready-up phase of the subsequent race.\n \"\"\"\n return self._last_begun_race\n\n @property\n def played_all_races(self) -> bool:\n \"\"\"True if the match is over.\"\"\"\n if self._match_race_data is None:\n return False\n\n if self.match.is_best_of:\n return self._match_race_data.leader_wins > self.match.number_of_races // 2\n else:\n return self._match_race_data.num_finished >= self.match.number_of_races\n\n def is_racer_id(self, racer_id: Union[str, int]) -> bool:\n rid_int = int(racer_id)\n return rid_int == self.match.racer_1.member.id or rid_int == self.match.racer_2.member.id\n\n async def during_races(self) -> bool:\n \"\"\"True if the match has started but not finished.\"\"\"\n return self.current_race is not None and not self.played_all_races\n\n async def contest_last_begun_race(self) -> None:\n \"\"\"Mark the last begun race as contested.\"\"\"\n if self._last_begun_race is not None and not self._last_begun_race.final:\n self._current_race_contested = True\n return\n\n if self._last_begun_race_number == 0:\n return\n\n contest_race_number = self._last_begun_race_number\n\n await matchdb.set_match_race_contested(\n match=self.match,\n race_number=contest_race_number,\n contested=True\n )\n\n async def initialize(self) -> None:\n \"\"\"Async initialization method\"\"\"\n if self._countdown_to_match_future is not None:\n self._countdown_to_match_future.cancel()\n self._countdown_to_match_future = asyncio.ensure_future(self._countdown_to_match_start(warn=True))\n self._match_race_data = await matchdb.get_match_race_data(self.match.match_id)\n self._current_race_number = self._match_race_data.num_finished + self._match_race_data.num_canceled\n self._last_begun_race_number = self._current_race_number\n self._set_channel_commands()\n\n async def send_channel_start_text(self) -> None:\n msg = '\\n \\N{BULLET} To suggest a time, use `.suggest`. (See `.help suggest` for more info.) Give the time ' \\\n 'in your own local timezone (which you\\'ve registered using `.timezone`).\\n' \\\n '\\N{BULLET} Confirm a suggested time with `.confirm`. You may remove a confirmation with ' \\\n '`.unconfirm`.\\n' \\\n '\\N{BULLET} To reschedule a time both racers have confirmed, both racers must call `.unconfirm`.\\n' \\\n '\\N{BULLET} You may alert CoNDOR staff at any time by calling `.staff`.\\n'\n\n if self.match.racer_1.timezone is not None and self.match.racer_2.timezone is not None:\n utcnow = pytz.utc.localize(datetime.datetime.utcnow())\n r1off = utcnow.astimezone(self.match.racer_1.timezone).utcoffset()\n r2off = utcnow.astimezone(self.match.racer_2.timezone).utcoffset()\n\n if r1off > r2off:\n ahead_racer_name = self.match.racer_1.display_name\n behind_racer_name = self.match.racer_2.display_name\n diff_str = timestr.timedelta_to_str(r1off - r2off)\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} {0} is currently {1} ahead of {2}.\\n'.format(\n ahead_racer_name, diff_str, behind_racer_name\n )\n elif r1off < r2off:\n ahead_racer_name = self.match.racer_2.display_name\n behind_racer_name = self.match.racer_1.display_name\n diff_str = timestr.timedelta_to_str(r2off - r1off)\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} {0} is currently {1} ahead of {2}.\\n'.format(\n ahead_racer_name, diff_str, behind_racer_name\n )\n else:\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} The two racers in this match currently have the same UTC offset.\\n'\n\n else:\n if self.match.racer_1.timezone is None and self.match.racer_2.timezone is not None:\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} {0} has not registered a timezone. Please call `.timezone`.\\n'.format(\n self.match.racer_1.display_name\n )\n elif self.match.racer_1.timezone is not None and self.match.racer_2.timezone is None:\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} {0} has not registered a timezone. Please call `.timezone`.\\n'.format(\n self.match.racer_2.display_name\n )\n else:\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} {0} and {1} have not registered a timezone. Please call `.timezone`.\\n'.format(\n self.match.racer_1.display_name,\n self.match.racer_2.display_name\n )\n\n # noinspection PyUnresolvedReferences\n msg += '\\N{BULLET} This match is a {0}.'.format(self.match.format_str)\n\n await self.channel.send(msg)\n\n async def update(self) -> None:\n if self.match.is_scheduled and self.current_race is None:\n if self._countdown_to_match_future is not None:\n self._countdown_to_match_future.cancel()\n self._countdown_to_match_future = asyncio.ensure_future(self._countdown_to_match_start())\n elif not self.match.is_scheduled:\n if self._countdown_to_match_future is not None:\n self._countdown_to_match_future.cancel()\n self._current_race = None\n\n self._set_channel_commands()\n\n if self.played_all_races:\n await self._end_match()\n\n async def change_race_info(self, command_args: list) -> None:\n \"\"\"Change the RaceInfo for this room by parsing the input args\"\"\"\n new_race_info = raceinfo.parse_args_modify(\n command_args,\n raceinfo.RaceInfo.copy(self.match.race_info)\n )\n if new_race_info:\n self.match.set_race_info(new_race_info)\n if self.current_race is not None and self.current_race.before_race:\n self.current_race.race_info = raceinfo.RaceInfo.copy(self.match.race_info)\n await self.write('Changed rules for the next race.')\n await self.update()\n\n async def process(self, race_event: RaceEvent) -> None:\n \"\"\"Process a RaceEvent\"\"\"\n if race_event.event == RaceEvent.EventType.RACE_BEGIN:\n self._last_begun_race = self._current_race\n self._last_begun_race_number = self._current_race_number\n elif race_event.event == RaceEvent.EventType.RACE_BEGIN_COUNTDOWN:\n await NEDispatch().publish(event_type='begin_match_race', match=self.match)\n elif race_event.event == RaceEvent.EventType.RACE_END:\n await asyncio.sleep(1) # Waiting for a short time feels good UI-wise\n await self.write('The race will end in {} seconds.'.format(self.current_race.race_config.finalize_time_sec))\n elif race_event.event == RaceEvent.EventType.RACE_FINALIZE:\n await NEDispatch().publish(event_type='end_match_race', match=self.match)\n\n race_winner = race_event.race.racers[0]\n race_loser = race_event.race.racers[1]\n auto_contest = (\n race_winner.is_finished\n and race_loser.is_finished\n and race_loser.time - race_winner.time <= Config.MATCH_AUTOCONTEST_IF_WITHIN_HUNDREDTHS\n )\n\n if auto_contest:\n self._current_race_contested = True\n await NEDispatch().publish(\n 'notify',\n message='A race has been automatically contested in channel {0}, because the finish times were '\n 'close.'.format(self.channel.mention)\n )\n\n await self._record_race(race_event.race, self._race_winner(race_event.race))\n\n # Write end-of-race message\n end_race_msg = 'The race has ended.'\n if auto_contest:\n end_race_msg += ' This match has been automatically marked as contested because the finish times ' \\\n 'were close.'\n await self.write(end_race_msg)\n\n # Begin a new race if appropriate, or end the match.\n if self.played_all_races:\n await self._end_match()\n else:\n await self._begin_new_race()\n elif race_event.event == RaceEvent.EventType.RACE_CANCEL:\n await self.write('The race has been canceled.')\n if not self.played_all_races:\n await self._begin_new_race()\n\n async def write(self, text: str) -> None:\n \"\"\"Write text to the channel\"\"\"\n await self.channel.send(text)\n\n async def alert_racers(self) -> None:\n \"\"\"Post an alert pinging both racers in the match\"\"\"\n member_1 = self.match.racer_1.member\n member_2 = self.match.racer_2.member\n\n alert_str = ''\n if member_1 is not None:\n alert_str += member_1.mention + ', '\n if member_2 is not None:\n alert_str += member_2.mention + ', '\n\n if alert_str:\n await self.write(f'{alert_str[:-2]}: The match is scheduled to begin {self.match.discord_rel_timestamp}.')\n\n async def force_new_race(self) -> None:\n \"\"\"Begin a new race, canceling the old one if necessary\"\"\"\n if self.current_race is not None and not self.current_race.complete:\n await self.current_race.cancel()\n\n # Only directly call begin_new_race if cancelling the old one did not begin a new one already\n if self.current_race is None or self.current_race.complete:\n await self._begin_new_race()\n\n async def cancel_race(self, race_number: int) -> bool:\n \"\"\"Mark a race as canceled\n \n Parameters\n ----------\n race_number: int\n The number of the race to cancel, counting only uncanceled races.\n \"\"\"\n race_number = race_number - self._match_race_data.num_canceled\n success = await matchdb.cancel_race(self.match, race_number)\n if success:\n self._match_race_data.num_finished -= 1\n self._match_race_data.num_canceled += 1\n return success\n\n async def force_record_race(self, winner: int) -> None:\n \"\"\"Record a \"fake\" race with the given winner\"\"\"\n await matchdb.record_match_race(\n match=self.match,\n winner=winner\n )\n self._update_race_data(race_winner=winner)\n await self.update()\n\n async def add_cawmentator_permissions(self) -> None:\n cawmentator = await self.match.get_cawmentator()\n if cawmentator is not None and not self.is_racer_id(cawmentator.member.id) \\\n and not self.is_referee(cawmentator.member) and not self.is_admin(cawmentator.member):\n await self.channel.set_permissions(\n cawmentator.member,\n read_messages=True,\n read_message_history=False,\n send_messages=False\n )\n await asyncio.sleep(5)\n await self.channel.send(\n 'This race\\'s cawmentator {caw} can now read this channel.'.format(caw=cawmentator.member.mention)\n )\n\n async def remove_cawmentator_permissions(self) -> None:\n cawmentator = await self.match.get_cawmentator()\n if cawmentator is not None and not self.is_racer_id(cawmentator.member.id):\n await self.channel.set_permissions(cawmentator.member, overwrite=None)\n\n async def _countdown_to_match_start(self, warn: bool = False) -> None:\n \"\"\"Does things at certain times before the match\n \n Posts alerts to racers in this channel, and sends NecroEvents at alert times. Begins the match\n at the appropriate time. This is stored as a future in this object, and is meant to be canceled\n if this object closes.\n \"\"\"\n try:\n if not self.match.is_scheduled:\n return\n\n time_until_match = self.match.time_until_match\n\n # Begin match now if appropriate\n if time_until_match < datetime.timedelta(seconds=0):\n if not self.played_all_races:\n if warn:\n await self.write(\n 'I believe that I was just restarted; an error may have occurred. I am '\n 'beginning a new race and attempting to pick up this match where we left '\n 'off. If this is an error, or if there are unrecorded races, please contact '\n 'an admin.')\n await self._begin_new_race()\n return\n\n # Wait until the first warning\n if time_until_match > Config.MATCH_FIRST_WARNING:\n await asyncio.sleep((time_until_match - Config.MATCH_FIRST_WARNING).total_seconds())\n await self.alert_racers()\n await NEDispatch().publish('match_alert', match=self.match, final=False)\n\n # Wait until the final warning\n time_until_match = self.match.time_until_match\n if time_until_match > Config.MATCH_FINAL_WARNING:\n await asyncio.sleep((time_until_match - Config.MATCH_FINAL_WARNING).total_seconds())\n\n # At this time, we've either just passed the FINAL_MATCH_WARNING or the function was just called\n # (happens if the call comes sometime after the FINAL_MATCH_WARNING but before the match).\n await self.alert_racers()\n await NEDispatch().publish('match_alert', match=self.match, final=True)\n\n await asyncio.sleep(self.match.time_until_match.total_seconds())\n await self._begin_new_race()\n except asyncio.CancelledError:\n console.info('MatchRoom._countdown_to_match_start() was cancelled.')\n raise\n\n async def _begin_new_race(self):\n \"\"\"Begin a new race\"\"\"\n # Shift to during-match commands\n self.channel_commands = self._during_match_channel_commands\n\n # Make the race\n match_race_data = await matchdb.get_match_race_data(self.match.match_id)\n self._current_race = Race(self, self.match.race_info,\n race_config=RaceConfig(finalize_time_sec=15, auto_forfeit=1))\n self._current_race_number = match_race_data.num_races + 1\n await self._current_race.initialize()\n\n # Enter the racers automatically\n for racer in self.match.racers:\n await self.current_race.enter_member(racer.member, mute=True)\n\n # Output text\n await self.write(\n 'Please input the seed ({1}) and type `.ready` when you are ready for the {0} race. '\n 'When both racers `.ready`, the race will begin.'.format(\n ordinal.num_to_text(match_race_data.num_finished + 1),\n self.current_race.race_info.seed))\n\n if self._countdown_to_match_future is not None:\n self._countdown_to_match_future.cancel()\n\n async def _end_match(self):\n \"\"\"End the match\"\"\"\n self._current_race = None\n self.channel_commands = self._postmatch_channel_commands\n\n # Send event\n if self._match_race_data.r1_wins > self._match_race_data.r2_wins:\n winner = self.match.racer_1\n winner_wins = self._match_race_data.r1_wins\n loser_wins = self._match_race_data.r2_wins\n elif self._match_race_data.r2_wins > self._match_race_data.r1_wins:\n winner = self.match.racer_2\n winner_wins = self._match_race_data.r2_wins\n loser_wins = self._match_race_data.r1_wins\n else:\n winner = '[Tied]'\n winner_wins = self._match_race_data.r1_wins\n loser_wins = self._match_race_data.r2_wins\n\n self.match.set_finish_time(pytz.utc.localize(datetime.datetime.utcnow()))\n\n await NEDispatch().publish(\n 'end_match',\n match=self.match,\n winner=winner,\n winner_wins=winner_wins,\n loser_wins=loser_wins,\n r1_wins=self._match_race_data.r1_wins,\n r2_wins=self._match_race_data.r2_wins\n )\n\n await self.write('Match complete.')\n\n async def _record_race(self, race: Race, race_winner: int) -> None:\n \"\"\"Record the given race as part of this match\"\"\"\n await racedb.record_race(race)\n await matchdb.record_match_race(\n match=self.match,\n race_number=self._current_race_number,\n race_id=self.current_race.race_id,\n winner=race_winner,\n contested=self._current_race_contested,\n canceled=False\n )\n self._update_race_data(race_winner=race_winner)\n\n # TODO: move to LadderManager class, trigger on appropriate event\n # async def _record_new_ratings(self, race_winner: int) -> None:\n # \"\"\"Get new ratings for the racers in this match and record them\"\"\"\n # racer_1 = self.match.racer_1\n # racer_2 = self.match.racer_2\n #\n # rating_1 = await ratingsdb.get_rating(racer_1.discord_id)\n # rating_2 = await ratingsdb.get_rating(racer_2.discord_id)\n #\n # new_ratings = ratingutil.get_new_ratings(rating_1=rating_1, rating_2=rating_2, winner=race_winner)\n #\n # await ratingsdb.set_rating(racer_1.discord_id, new_ratings[0])\n # await ratingsdb.set_rating(racer_2.discord_id, new_ratings[1])\n #\n # # this isn't working\n # # if Config.RATINGS_IN_NICKNAMES:\n # # for pair in [(racer_1, rating_1,), (racer_2, rating_2,)]:\n # # member = pair[0].member\n # # nick = '{0} ({1})'.format(pair[0].member.name, pair[1].displayed_rating)\n # # await self.client.change_nickname(member=member, nickname=nick)\n\n def _set_channel_commands(self) -> None:\n if self.current_race is None:\n if self.played_all_races:\n self.channel_commands = self._postmatch_channel_commands\n else:\n self.channel_commands = self._prematch_channel_commands\n else:\n self.channel_commands = self._during_match_channel_commands\n\n def _race_winner(self, race: Race) -> int:\n \"\"\"Get the number of the race's winner (1 or 2, for match.racer_1 or match.racer_2)\"\"\"\n race_winner_id = int(race.winner.member.id)\n if race_winner_id == int(self.match.racer_1.member.id):\n return 1\n elif race_winner_id == int(self.match.racer_2.member.id):\n return 2\n else:\n return 0\n\n def _update_race_data(self, race_winner: int) -> None:\n \"\"\"Update this object's MatchRaceData\"\"\"\n self._match_race_data.num_finished += 1\n if race_winner == 1:\n self._match_race_data.r1_wins += 1\n else:\n self._match_race_data.r2_wins += 1\n","sub_path":"necrobot/match/matchroom.py","file_name":"matchroom.py","file_ext":"py","file_size_in_byte":24129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"517864265","text":"import unittest, os\nfrom src.patcher.patcher import Patcher\n\nclass TestPatcher(unittest.TestCase):\n def test_pass0(self):\n dirpath = \"tests/unittests/patcher/resources/\"\n file_content_map = {}\n entries = os.listdir(dirpath)\n entries = list(filter(lambda x: x[0] != \".\", entries))\n\n for entry in entries:\n filepath = dirpath + entry\n content = \"\"\n with open(filepath) as f:\n content = f.read()\n\n file_content_map[entry] = content\n\n root_content = file_content_map[\"root\"]\n diff_content = file_content_map[\"diff\"]\n patcher = Patcher(root_content, diff_content)\n patcher.patch()\n\n root_edited_content = file_content_map[\"root_edited\"]\n self.assertEqual(patcher.patched_content, root_edited_content)\n\n diff1_content = file_content_map[\"diff1\"]\n patcher.update(diff1_content)\n patcher.patch()\n\n root_edited2_content = file_content_map[\"root_edited2\"]\n self.assertEqual(patcher.patched_content, root_edited2_content)\n","sub_path":"tests/unittests/patcher/test_patcher.py","file_name":"test_patcher.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381809619","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml=urlopen('https://movie.naver.com/movie/running/current.nhn')\n\nsoup=BeautifulSoup(html,'lxml')\n\nmovie_content=soup.find_all('div',{'id':'content'})\nmovie_li=movie_content[0].find_all('li')\n\ntitle_list=[]\nscore_list=[]\nmovie_ranking=dict()\n\nfor data in movie_li:\n title=data.find_all('dt',{'class':'tit'})\n for content in title:\n content=content('a')\n for content2 in content:\n title_list.append(content2.get_text())\n\n score=data.find_all(\"dl\",{\"class\":\"info_star\"})\n for content in score:\n content=content.find_all(\"span\",{'class':'num'})\n for content2 in content:\n score_list.append(content2.get_text())\n\nfor i in range(len(title_list)):\n movie_ranking[str(i+1)+\"위\"]=title_list[i]+\":\"+score_list[i]\n\nfor rank,info in movie_ranking.items():\n print(rank,\"-\",info)\n","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511361686","text":"# -*- coding: utf-8 -*-\n\nimport new\nfrom django.apps import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission, Group\nfrom django.db.models.signals import post_delete, post_save\nfrom django.utils import six\nfrom django.utils.translation import ugettext as _\n\nfrom frame.models import ManagementRole, Department, Scope, Version, UserProfile, GroupInfo\nfrom tools.utils import getLogger\n\nlogger = getLogger()\n\n\ndef _get_group_info(self):\n try:\n group_info = self.info.info\n except GroupInfo.DoesNotExist:\n group_info = _(u'暂无组信息')\n return group_info\n\n\ndef _get_app_info(self):\n apps = self.app.all()\n app_info = ''\n if len(apps) != 0:\n if len(apps) == 1:\n app_info = apps[0].name\n else:\n for app in apps:\n app_info += app.name + ', '\n app_info = app_info[:-2]\n else:\n app_info = _(u'暂未关联应用')\n return app_info\n\n\ndef _auth_group__str__(old_method, self, *args, **kwds):\n return \"%s | %s | %s\" % (\n six.text_type(self.name),\n six.text_type(_get_group_info(self)),\n six.text_type(_get_app_info(self))\n )\n\n\ndef _auth_group__unicode__(old_method, self, *args, **kwds):\n return \"%s | %s | %s\" % (\n six.text_type(self.name),\n six.text_type(_get_group_info(self)),\n six.text_type(_get_app_info(self))\n )\n\n\ndef _get_mmtr_info(self):\n return self.user.username, self.scope.name, self.get_role_display()\n\n\ndef _managementrole__unicode__(old_method, self, *args, **kwds):\n username, scope, role = _get_mmtr_info(self)\n return \"%s | %s | %s\" % (\n six.text_type(username),\n six.text_type(scope),\n six.text_type(role)\n )\n\n\ndef _app__unicode__(old_method, self, *args, **kwds):\n appname = self.name + 'test'\n return \"%s | %s | %s\" % (\n six.text_type(appname),\n six.text_type(appname),\n six.text_type(appname)\n )\n\nclass FrameConfig(AppConfig):\n name = 'frame'\n\n def ready(self):\n try:\n enhance_method(Group, '__str__', _auth_group__str__)\n enhance_method(Group, '__unicode__', _auth_group__unicode__)\n enhance_method(ManagementRole, '__unicode__', _managementrole__unicode__)\n register_model()\n\n except Exception as e:\n logger.error(\"frame config is failed, info:%s\" % str(e), exc_info=True)\n\n\ndef enhance_method(klass, method_name, replacement):\n \"\"\"\n :param klass:\n :param method_name:\n :param replacement:\n :return:\n \"\"\"\n method = getattr(klass, method_name)\n setattr(klass, method_name, new.instancemethod(\n lambda *args, **kwds: replacement(method, *args, **kwds), None, klass))\n\n\ndef register_model():\n User = get_user_model()\n model_list = [ManagementRole, Department, Scope, Permission, User, Group]\n for model_item in model_list:\n register_post_save(model_item, post_save)\n register_post_delete(model_item, post_delete)\n\n # UserProfile register signal\n post_save.connect(user_opr_handler_save, UserProfile, dispatch_uid=UserProfile._meta.object_name)\n register_post_delete(UserProfile, post_delete)\n\n\ndef register_post_save(_model, register_type):\n register_type.connect(model_opr_handler_save, _model, dispatch_uid=_model._meta.object_name)\n\n\ndef register_post_delete(_model, register_type):\n register_type.connect(model_opr_handler_delete, _model, dispatch_uid=_model._meta.object_name)\n\n\n'''\n:param\nsender: The model class.\ninstance: The actual instance being saved.\nupdate_fields: The set of fields to update explicitly specified in the save() method. None if this argument was not used in the save() call.\ncreated: A boolean; True if a new record was created.\n'''\n\n\ndef model_opr_handler_save(sender, instance, created, update_fields, **kwargs):\n try:\n if update_fields:\n for update_field in list(update_fields):\n if update_field == 'last_login':\n return\n version = Version()\n version.opr_detail = \"model:%s; created:%s; update_fields:%s\" % (instance._meta.verbose_name, created, update_fields)\n version.save()\n except Exception as e:\n logger.error('model_opr_handler_save except.info:%s' % str(e), exc_info=True)\n\n\ndef model_opr_handler_delete(sender, **kwargs):\n try:\n version = Version()\n version.opr_detail = \"%s delete\" % sender._meta.verbose_name\n version.save()\n except Exception as e:\n logger.error('model_opr_handler_delete except. info:%s' % str(e), exc_info=True)\n\n\ndef user_opr_handler_save(sender, instance, created, update_fields, **kwargs):\n try:\n # UserProfile的改动只关注phone_number, safe_email, department\n if update_fields:\n for update_field in list(update_fields):\n if update_field in [\"phone_number\", \"safe_email\", \"department\"]:\n version = Version()\n version.opr_detail = \"UserProfile have update the field, such as phone_number, safe_email, department\"\n version.save()\n if created:\n version = Version()\n version.opr_detail = \"UserProfile have been created\"\n version.save()\n except Exception as e:\n logger.error(\"user_opr_handler_delete except. info:%s\" % str(e), exc_info=True)\n\n\n","sub_path":"WiseEye/iam/frame/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37469851","text":"from tkinter import *\r\n\r\n\r\nclass BuckysButtons:\r\n\r\n def __init__(self, master):\r\n frame = Frame(master)\r\n frame.pack()\r\n\r\n self.printButton = Button(frame, text = \"Print something on the screen\", command = self.printMessage)\r\n self.printButton.pack(side = LEFT)\r\n\r\n self.quitButton = Button(frame, text = \"Quit\", command = quit)\r\n self.quitButton.pack(side = LEFT)\r\n\r\n def printMessage(self):\r\n print(\"This is printed on the screen\")\r\n\r\n\r\n\r\nroot = Tk()\r\nb = BuckysButtons(root)\r\n\r\n\r\nroot.mainloop()","sub_path":"PycharmProjects/PythonRulz/Folder_main/PersonalProjects/Tkinter learn and others/Thinkter8.py","file_name":"Thinkter8.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584277965","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2009 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import osv, fields\nfrom openerp.osv.expression import get_unaccent_wrapper\nfrom openerp.tools.translate import _\n\nclass res_partner(osv.osv):\n _inherit = \"res.partner\"\n\n _columns = {\n 'street': fields.char('Street', required=True),\n 'street2': fields.char('Street2', required=False),\n 'zip': fields.char('Zip', size=24, change_default=True, required=True),\n 'city': fields.char('City', required=True),\n 'state_id': fields.many2one(\"res.country.state\", 'State', ondelete='restrict', required=False),\n 'country_id': fields.many2one('res.country', 'Country', ondelete='restrict', required=True),\n 'email': fields.char('Email', required=True),\n 'phone': fields.char('Phone', required=True),\n 'fax': fields.char('Fax', required=False),\n 'mobile': fields.char('Mobile', required=False),\n }\n ","sub_path":"wk_partner_required/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394378826","text":"# coding: utf-8\n\n# Copyright (c) 2015, thumbor-community\n# Use of this source code is governed by the MIT license that can be\n# found in the LICENSE file.\n\nimport botocore.session\nfrom botocore.utils import fix_s3_host\nfrom tornado_botocore.base import Botocore\nfrom thumbor.utils import logger\nfrom thumbor.engines import BaseEngine\nimport functools\n\n\nclass Bucket(object):\n _instances = {}\n\n @staticmethod\n def __new__(cls, bucket, region, endpoint, *args, **kwargs):\n key = (bucket, region, endpoint) + args + functools.reduce(lambda x, y: x + y, kwargs.items(), ())\n\n if not cls._instances.get(key):\n cls._instances[key] = super(Bucket, cls).__new__(cls)\n\n return cls._instances[key]\n\n \"\"\"\n This handles all communication with AWS API\n \"\"\"\n def __init__(self, bucket, region, endpoint):\n \"\"\"\n Constructor\n :param string bucket: The bucket name\n :param string region: The AWS API region to use\n :param string endpoint: A specific endpoint to use\n :return: The created bucket\n \"\"\"\n self._bucket = bucket\n self._region = region\n self._endpoint = endpoint\n\n if not hasattr(self, '_session'):\n self._session = botocore.session.get_session()\n if endpoint is not None:\n self._session.unregister('before-sign.s3', fix_s3_host)\n\n if not hasattr(self, '_get_client'):\n self._get_client = Botocore(service='s3', region_name=self._region,\n operation='GetObject', session=self._session,\n endpoint_url=self._endpoint)\n if not hasattr(self, '_put_client'):\n self._put_client = Botocore(service='s3', region_name=self._region,\n operation='PutObject', session=self._session,\n endpoint_url=self._endpoint)\n\n if not hasattr(self, '_delete_client'):\n self._delete_client = Botocore(service='s3', region_name=self._region,\n operation='DeleteObject', session=self._session,\n endpoint_url=self._endpoint)\n\n async def get(self, path):\n \"\"\"\n Returns object at given path\n :param string path: Path or 'key' to retrieve AWS object\n :param callable callback: Callback function for once the retrieval is done\n \"\"\"\n return self._get_client.call(\n Bucket=self._bucket,\n Key=self._clean_key(path),\n )\n\n async def get_url(self, path, method='GET', expiry=3600):\n \"\"\"\n Generates the presigned url for given key & methods\n :param string path: Path or 'key' for requested object\n :param string method: Method for requested URL\n :param int expiry: URL validity time\n :param callable callback: Called function once done\n \"\"\"\n client = self._session.create_client('s3', region_name=self._region, endpoint_url=self._endpoint)\n\n url = client.generate_presigned_url(\n ClientMethod='get_object',\n Params={\n 'Bucket': self._bucket,\n 'Key': self._clean_key(path),\n },\n ExpiresIn=expiry,\n HttpMethod=method,\n )\n\n return url\n\n async def put(self, path, data, metadata={}, reduced_redundancy=False, encrypt_key=False):\n \"\"\"\n Stores data at given path\n :param string path: Path or 'key' for created/updated object\n :param bytes data: Data to write\n :param dict metadata: Metadata to store with this data\n :param bool reduced_redundancy: Whether to reduce storage redundancy or not?\n :param bool encrypt_key: Encrypt data?\n :param callable callback: Called function once done\n \"\"\"\n storage_class = 'REDUCED_REDUNDANCY' if reduced_redundancy else 'STANDARD'\n content_type = BaseEngine.get_mimetype(data) or 'application/octet-stream'\n\n args = dict(\n Bucket=self._bucket,\n Key=self._clean_key(path),\n Body=data,\n ContentType=content_type,\n Metadata=metadata,\n StorageClass=storage_class,\n )\n\n if encrypt_key:\n args['ServerSideEncryption'] = 'AES256'\n\n return self._put_client.call(**args)\n\n async def delete(self, path):\n \"\"\"\n Deletes key at given path\n :param string path: Path or 'key' to delete\n :param callable callback: Called function once done\n \"\"\"\n return self._delete_client.call(\n Bucket=self._bucket,\n Key=self._clean_key(path),\n )\n\n def _clean_key(self, path):\n logger.debug('Cleaning key: {path!r}'.format(path=path))\n key = path\n while '//' in key:\n logger.debug(key)\n key = key.replace('//', '/')\n\n if '/' == key[0]:\n key = key[1:]\n\n logger.debug('Cleansed key: {key!r}'.format(key=key))\n return key\n","sub_path":"tc_aws/aws/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214329667","text":"import logging\n\nfrom django.core.management.base import (\n BaseCommand,\n)\nfrom django.db import transaction, IntegrityError\nfrom contributions.models import Contribution\nfrom contributions.services import ContributionObtainer\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"\"\"Loads contributions data from github.\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument('--username',\n type=str,\n default='MrLokans',\n help='Github username')\n\n def handle(self, *args, **kwargs):\n try:\n with transaction.atomic():\n obtainer = ContributionObtainer()\n Contribution.objects.all().delete()\n conts = obtainer.get_all_contributions(kwargs['username'])\n conts = [Contribution.from_contribution_object(c)\n for c in set(conts)]\n Contribution.objects.bulk_create(conts)\n except IntegrityError as e:\n logger.exception(\"Database error.\")\n","sub_path":"backend/contributions/management/commands/loadcontributions.py","file_name":"loadcontributions.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71340841","text":"from flask import Flask, redirect, render_template\nfrom glob import glob\nfrom flask import request\nimport os\n\nmse_folder = \"temp/\"\n\napp = Flask(__name__)\napp.config['upload_folder'] = mse_folder\n\n@app.route('/',methods=['GET','POST'])\ndef index(): \n with open('static/index.html') as html_code:\n html_text = html_code.readlines()\n my_html = ''.join(html_text)\n return my_html\n\n\n@app.route('/msid/')\ndef show_msid_images(msid):\n # show the user profile for that user\n msid_images = sorted(glob(\"static/images/{}-*\".format(msid)))\n return generate_html(msid_images, msid)\n\n@app.route('/saveForm')\ndef save_form():\n print(request.args)\n # show the user profile for that user\n return redirect(\"/\")\n\nfrom flask import Flask, redirect, jsonify\n\n@app.route(\"/getData/\", methods=['GET'])\ndef get_data(globStr):\n image_folders = sorted(glob(\"static/images/{}*/anatomical_image.png\".format(globStr)))\n output_urls = [\"/\"+s for s in image_folders] #static is in root\n output_data = [{\"url\": url, \"type\": None, \"part\": None,\n \"pbr_name\": image_folders[i].split(\"/\")[-2]} for i, url in enumerate(output_urls)]\n return \"you a dummy\"\n\nimport json\n@app.route(\"/saveData/\", methods=['POST'])\ndef save_data():\n data = json.loads(request.get_data().decode('utf-8'))\n print(data)\n #do something with the data here -- save it to a file? \n return \"Success! This message is from the server\"\n\n@app.route(\"/saveText/\", methods=['POST'])\ndef save_text():\n textfile = request.files['mses']\n test = 'False'\n if textfile.filename != \"\":\n test = 'True'\n print (test)\n #if textfile == '':\n # return redirect(request.url)\n #else:\n # textfile_name = textfile.filename\n # textfile.save(os.path.join(app.config['upload_folder'], textfile_name))\n return test\n\nif __name__ == \"__main__\":\n app.run(port=1116)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"147137060","text":"import os \nimport csv\n#filepath\nfile_csv = os.path.join ( 'Resources',\n 'election_data.csv') \ntotal_votes = 0\notooley_votes = 0\ncorrey_votes = 0\nkhan_votes = 0\nli_votes = 0\n#format\nwith open (file_csv, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter = ',')\n header = next(csvreader)\n #print (header)\n for row in csvreader:\n#adding the total votes based on rows\n total_votes += 1\n if row[2] == \"O'Tooley\":\n otooley_votes += 1\n elif row[2] == \"Correy\":\n correy_votes += 1\n elif row[2] == \"Khan\":\n khan_votes += 1\n elif row[2] == \"Li\":\n li_votes += 1\n #candidates = [\"O'Tooley\",\"Correy\",\"Khan\",\"Li\"]\n #votes = [otooley_votes, correy_votes, khan_votes, li_votes]\nStats = {\"O'Tooley\": otooley_votes,\"Correy\": correy_votes,\n \"Khan\": khan_votes,\"Li\": li_votes}\n#voteCount = F.values()\nStatsNew = [max(Stats, key=Stats.get)]\notooley_percent = (otooley_votes / total_votes) * 100\ncorrey_percent = (correy_votes / total_votes) * 100\nkhan_percent = (khan_votes / total_votes) * 100\nli_percent = (li_votes / total_votes) * 100\n#print statements\nprint(f\"Election Stats\")\nprint(f\"======================\")\nprint(f\"Total Number of Votes:{(total_votes)}\")\nprint(f\"O'Tooley:{otooley_percent:.3f}% ({otooley_votes})\")\nprint(f\"Correy:{correy_percent:.3f}% ({correy_votes})\")\nprint(f\"Khan:{khan_percent:.3f}% ({khan_votes})\")\nprint(f\"Li:{li_percent:.3f}% ({li_votes})\")\nprint(f\"Winner: {StatsNew}\")\noutput = os.path.join(\"Analysis\",\"Election_results.txt\")\nwith open(output,\"w\") as file:\n file.write(f\"Election Stats\")\n file.write(\"\\n\")\n file.write(f\"======================\")\n file.write(\"\\n\")\n file.write (f\"Total Number of Votes:{(total_votes)}\")\n file.write(\"\\n\")\n file.write(f\"O'Tooley:{otooley_percent:.3f}% ({otooley_votes})\")\n file.write(\"\\n\")\n file.write(f\"Correy:{correy_percent:.3f}% ({correy_votes})\")\n file.write(\"\\n\")\n file.write(f\"Khan:{khan_percent:.3f}% ({khan_votes})\")\n file.write(\"\\n\")\n file.write(f\"Li:{li_percent:.3f}% ({li_votes})\")\n file.write(\"\\n\")\n file.write(f\"Winner: {StatsNew}\")\n","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"40815796","text":"# -----------------------------------------------------------------------------\n#\n# P A G E B O T E X A M P L E S\n#\n# Copyright (c) 2017 Thom Janssen \n# www.pagebot.io\n# Licensed under MIT conditions\n#\n# Supporting DrawBot, www.drawbot.com\n# Supporting Flat, xxyxyz.org/flat\n# -----------------------------------------------------------------------------\n#\n# 02_Contexts.py\n#\n# Make a page with a variety of elements that float into position,\n# each taking its own space.\n\nfrom pagebot import getContext\nfrom pagebot.toolbox.units import *\nfrom pagebot.toolbox.color import Color, blackColor, blueColor, greenColor\nfrom pagebot.elements import *\nfrom pagebot.document import Document\nfrom pagebot.toolbox.color import blueColor, darkGrayColor, redColor, Color, noColor, color\nfrom pagebot.conditions import *\nfrom pagebot.fonttoolbox.objects.font import findFont\nfrom pagebot.elements.paths.pagebotpath import PageBotPath\nfrom pagebot.constants import A3\n\ncontext = getContext('DrawBot')\n\n# Landscape A3.\nH, W = A3\nX0 = 100\nY0 = 100\nSQ = 150\nP = 50\nbungee = findFont('BungeeInline-Regular')\n\n# Create a new document for the current context. Create one automatic page.\ndoc = Document(w=W, h=H, context=context)\npage = doc[1] # Get the one and single page of the document.\npage.padding = P # Set the page padding.\n\n# Create a new blue rectangle element and align it on top-left,\n# floating to that position in case something is already there.\n# Parent of the element is the current page.\nc = (Right2Right(), Float2Top(), Float2Left())\nr = newRect(w=SQ, h=SQ, parent=page, conditions=(Left2Left(), Top2Top()), fill=(0,0,1), stroke=0)\n\n# Create a new red circle element and align it on top-left,\n# floating to that position relative to what is already there.\n# Parent of the element is the current page.\no = newOval(w=SQ, h=SQ, parent=page, conditions=c, fill=(1, 0, 0), stroke=0)\n\n# Create a new black diagonal line element and align it on top-left,\n# floating to that position relative to what is already there.\nl = newLine(parent=page, x=0, y=0, w=100, h=100, conditions=c, stroke=0, strokeWidth=10)\npoints=[(0,0), (100, 0), (150, 50), (150, 100), (100, 200)]\n# A quire is a drawboard\nq = newQuire(parent=page, conditions=c, fill=1, strokeWidth=5, stroke=0.5)\n# Make a rect with more height, so it will push down any floating from below.\nr = newRect(w=SQ, h=2*SQ, parent=page, conditions=c, fill=noColor, stroke=0, strokeWidth=1)\n\n# Create two text boxes and align it on top-left,\n# floating to that position relative to what is already there.\nnewText('Text !', parent=page, conditions=c, fontSize=60, stroke=(1, 1, 0), strokeWidth=20, textFill=0.5, font=bungee)\nnewText('Text Box', parent=page, conditions=c, stroke=0, strokeWidth=0.5, fill=(1, 1, 0), fontSize=30, font=bungee, textFill=(0, 0, 1))\n\n# A number of circles that will float the remaining space.\nfor n in range(50):\n rr = pt(40)\n o = newOval(w=rr, h=rr, parent=page, conditions=c, fill=(1, 0, 0.5), stroke=0)\n\n# Create two text boxes and align it on top-left,\n# floating to that position relative to what is already there.\nnewRect(parent=page, conditions=(Left2Left(), Fit2Width(), Float2Top(), Fit2Bottom()), fill=0.9)\n\n# Solve conditions of all placed elements on the page\npage.solve()\n\n# Set some viewing parameters.\nview = doc.view\nview.showPadding = True # Show the padding of the page, where conditions align.\n\n# Export in _export folder that does not commit in Git. Force to export PDF.\ndoc.export('_export/02_Contexts.pdf')\n\n\n","sub_path":"Basics/E01_Basics/E02_Contexts.py","file_name":"E02_Contexts.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"172294323","text":"import os\nimport numpy as np\nimport scipy.linalg as la\nimport matplotlib.pyplot as plt\n\n\ndef sort_knots(knots):\n return knots[np.argsort(knots[:, 0])]\n\n\ndef h(xcoord):\n return [(x1 - x0) for x0, x1 in list(zip(xcoord, xcoord[1:]))]\n\n\ndef sigma(sigmas):\n \"\"\"[Fonction qui génère la matrice sigma]\n\n Args:\n sigmas (tableau de réels): [contient les écarts_type]\n\n Returns:\n [type]: [description]\n \"\"\"\n # S = np.zeros((n, n))\n # for i in range(n):\n # S[i][i] = sigmas[i]\n # return S\n return np.diag(sigmas)\n\n\ndef Q(n, g):\n matq = np.zeros((n + 1, n - 1))\n g0 = 1 / g[0]\n matq[0][0] = g0\n matq[n][n - 2] = 1 / g[-1]\n matq[1][0] = -g0 - (1 / g[1])\n for i in range(1, n - 1):\n gi = 1 / g[i]\n matq[i][i] = gi\n matq[i + 1][i - 1] = matq[i][i]\n matq[i + 1][i] = -gi - (1 / g[i + 1])\n return matq\n\n\ndef D(c, h):\n return [(c[i + 1] - c[i]) / (3 * h[i]) for i in range(0, len(c) - 1)]\n\n\ndef B(a, c, d, h):\n return [((a[i + 1] - a[i]) / h[i] - c[i] * h[i] - d[i] * h[i] * h[i]) for i in range(0, len(a))]\n\n\ndef A(y, sigma, q, c, p):\n \"\"\"\n \"\"\"\n return y - 1 / p * sigma ** 2 @ q @ y\n\n\ndef T(n, h):\n \"\"\"[Fonction qui génère la matrice T]\n\n Args:\n n (entier): [ correspond à la taille de h ]\n h (vecteur de réels): [h est défini tel que h_i = x_i+1 - x_i pour 0 <= i <= n-1]\n\n Returns:\n [matrice] : [[n - 1][n - 1] de réels] \n \"\"\"\n # constuire la matrice T[n-1][n-1]\n # initialiser toutes les cases à 0\n T = np.zeros((n - 1, n - 1))\n # calculer les éléments de la diagonale et des deux bandes \n for i in range(n - 2):\n T[i][i] = 2 * (h[i] + h[i + 1])\n T[i][i + 1] = h[i + 1]\n T[i + 1][i] = h[i + 1]\n T[n - 2][n - 2] = 2 * (h[n - 2] + h[n - 1])\n return 1 / 3 * T\n\n\ndef cholesky_2(A):\n n = len(A)\n # initialisation de la matrice L\n # L = [[0.0] * n for i in range(n)]\n L = np.zeros((n, n))\n\n # calcul de la decompostion de cholesky\n for i in range(n):\n for k in range(i + 1):\n tmp_sum = sum(L[i][j] * L[k][j] for j in range(k))\n L[i][k] = np.sqrt(A[i][i] - tmp_sum) if i == k else (1.0 / L[k][k] * (A[i][k] - tmp_sum))\n return L\n\n\ndef cholesky(n, A):\n \"\"\"[Fonction qui calcule la factorisation de Cholsky d'une matrice, renvoie L et L.T]\n\n Args:\n n (entier): [dimesion de la matrice]\n A (matrice[n][n] de réels): [matrice à décomposer]\n\n Returns:\n [type]: [description]\n \"\"\"\n n = len(A)\n for j in range(n):\n if A[j][j] <= 0:\n return False\n else:\n beta = np.sqrt(A[j][j])\n A[j][j] = beta\n for k in range(j + 1, n):\n A[k][j] = A[k][j] / beta\n for l in range(j + 1, n):\n for k in range(l, n):\n A[k][l] = A[k][l] - A[k][j] * A[l][j]\n # print(A)\n # print(A.T)\n return A, A.T\n\n\ndef resolution_systeme_cholesky(A, b):\n \"\"\"[Fonction qui résout Ax=b en sachant que A est symetrique definie positive]\n\n Args:\n n (entier): [dimension de la matrice]\n A (matrice[n][n] de réels): [matrice]\n b (vecteur de réels[n]): [membre droit]\n\n Returns:\n [type]: [description]\n \"\"\"\n # méthodologie = https://i.imgur.com/ij5ZXKL.png\n matrix_l = cholesky_2(A)\n # Lnp = np.linalg.cholesky(A)\n # Lsp = sp_cholesky(A)\n # L, L_T = cholesky(n, A)\n # print(f'L2: {L2}')\n # print(f'L: {L}')\n # print(f'Lnp: {Lnp}')\n # print(f'Lsp: {Lsp}')\n return la.solve(matrix_l.T, la.solve(matrix_l, b))\n\n\ndef c(Q, sigma, T, p, y):\n \"\"\"[Fonction qui résout (Q.T Σ^2 Q + pT )c = (p Q.T y)]\n\n Returns:\n [c]: [vecteur de réels[n]]\n \"\"\"\n # print(f\"Q {np.shape(Q)}\")\n # print(f\"sigma {np.shape(sigma)}\")\n # print(f\"T {np.shape(T)}\")\n # print(f\"Q.T {np.shape(Q.T)}\")\n # test = ((Q.T) @ (sigma**2)) @ Q + p * T\n # print(f\"Q.T @ sigma**2 @ Q {test}\")\n A = Q.T @ sigma ** 2 @ Q + p * T\n b = p * (Q.T @ y)\n x1 = resolution_systeme_cholesky(A, b)\n # x2 = np.linalg.solve(A, b)\n return x1\n\n\ndef fct(x):\n \"\"\" test \"\"\"\n return np.sin(x)\n\n\ndef d(c, h):\n return [(c[i + 1] - c[i]) / (3 * h[i]) for i in range(0, len(h))]\n\n\ndef b(a, c, d, h):\n return [((a[i + 1] - a[i]) / h[i] - c[i] * h[i] - d[i] * h[i] * h[i]) for i in range(len(h))]\n\n\ndef a(y, sigma, q, c, p):\n # print(f\"q {np.shape(q)}\")\n # print(f\"sigma**2 @ q {np.shape(sigma**2 @ q)}\")\n # print(f\"c {np.shape(c)}\")\n # print(f\"y {np.shape(y)}\")\n return y - ((1 / p) * (sigma ** 2 @ q @ c))\n\n\ndef read_coordinate(filename):\n \"\"\"\n pas de gestion d'erreur pour l'instant\n :param filename:\n :return:\n \"\"\"\n current_directory = os.path.dirname(os.path.realpath(__file__))\n coordinates = os.path.join(current_directory, filename)\n points = []\n with open(coordinates, \"r\") as f:\n for line in f:\n row = line.split()\n points.append((float(row[0]), float(row[1])))\n return np.array(points)\n\n\nif __name__ == \"__main__\":\n\n # noeuds = np.array([(3, 4), (1, 2), (0, 0), (6, 2)])\n # sigmas = [1, 1, 1, 1]\n # n = len(noeuds)\n # \"\"\"[test de la fonction qui génère les écarts h]\n # \"\"\"\n # noeuds = sort_knots(noeuds)\n # #print(noeuds)\n # h = h(noeuds)\n # #print(h)\n # \"\"\"[test du code de la fonction qui génère la matrice des sigmas]\n # \"\"\"\n # s = sigma(n, sigmas)\n # #print(s)\n\n # \"\"\"[test de la fonction qui fait la décomposition de Cholesky]\n # \"\"\"\n # L , LT = cholesky(n, s)\n # #print(L*LT)\n\n # \"\"\"[test de la fonction qui résout le système linéaire avec décompostion Cholesky]\n # \"\"\"\n # b = [1]*n\n # #print(b)\n # x = resolution_systeme_cholesky(n, s, b)\n # print(x)\n\n # noeuds = np.array([(x * np.pi / 4, np.cos(x * np.pi / 4)) for x in range(10)])\n # XXX: fichiers:\n # coordinate_{1..5}.txt -> sin, sin, cos, exp, cos\n noeuds = read_coordinate('coordinate_1.txt')\n nbnoeuds = len(noeuds)\n vdefault = 1\n n = nbnoeuds - 1\n # tri des noeuds selon leur coordonnées x\n noeuds = sort_knots(noeuds)\n xcoord = noeuds[:, 0]\n ycoord = noeuds[:, 1]\n # calcul des h\n h = h(xcoord)\n # génération de la matrice sigma\n sigma = np.diag([vdefault] * nbnoeuds)\n # génération de la matrice T\n t = T(n, h)\n # génération de la matrice Q \n q = Q(n, h)\n # trouver le vecteur c\n\n lesp = [1e-5, 0.5, 1e5] # TODO: trouver un p optimal par un algo\n # affichage de la spline résultat\n spline = lambda x, i: vecD[i] * (x - noeuds[i, 0]) ** 3 + vecC[i] * (x - noeuds[i, 0]) ** 2 + vecB[i] * (\n x - noeuds[i, 0]) + vecA[i]\n plt.scatter(xcoord, ycoord)\n for p in lesp:\n vecC = c(q, sigma, t, p, ycoord)\n vecA = a(ycoord, sigma, q, vecC, p)\n vecC = [0, *vecC, 0]\n vecD = d(vecC, h)\n vecB = b(vecA, vecC, vecD, h)\n\n # print(f\"T = {3 * t}\")\n # print(f\"vecteur C = {vecC}\")\n # print(f\"vecteur A = {vecA}\")\n # print(f\"vecteur D = {vecD}\")\n # print(f\"vecteur B = {vecB}\")\n\n for j in range(n):\n xval = xcoord[j]\n xarray = np.linspace(xval, xcoord[j + 1], n)\n yarray = spline(xarray, j)\n yval = spline(xval, j)\n # print(f'x, y = {xval:+.10f}, {yval:+.10f}')\n plt.plot(xarray, yarray)\n plt.show()\n","sub_path":"matrice.py","file_name":"matrice.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305322251","text":"##############################################\n# The MIT License (MIT)\n# Copyright (c) 2017 Kevin Walchko\n# see LICENSE for full details\n##############################################\n\nfrom setuptools import setup\nfrom build_utils import BuildCommand\nfrom build_utils import PublishCommand\nfrom build_utils import BinaryDistribution\nfrom build_utils import get_pkg_version\n\nVERSION = get_pkg_version('ins_nav/__init__.py')\nPACKAGE_NAME = 'ins_nav'\nBuildCommand.pkg = PACKAGE_NAME\nBuildCommand.py2 = False\nPublishCommand.pkg = PACKAGE_NAME\nPublishCommand.version = VERSION\n\n\nsetup(\n author='Kevin Walchko',\n author_email='walchko@users.noreply.github.com',\n name=PACKAGE_NAME,\n version=VERSION,\n description='A library to do inertial navigation',\n long_description=open('readme.md').read(),\n long_description_content_type=\"text/markdown\",\n url='http://github.com/MomsFriendlyRobotCompany/{}'.format(PACKAGE_NAME),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries :: Application Frameworks'\n ],\n license='MIT',\n keywords=['library', 'robotics', 'robot', 'ins', 'inertial', 'navigation', 'ahrs', 'imu'],\n packages=[PACKAGE_NAME],\n install_requires=[\n 'simplejson',\n 'build_utils',\n 'squaternion',\n 'numpy'\n ],\n cmdclass={\n 'publish': PublishCommand,\n 'make': BuildCommand\n },\n # scripts=[\n # 'bin/set_id.py',\n # 'bin/servo_ping.py',\n # 'bin/set_angle.py',\n # 'bin/set_baud_rate.py',\n # 'bin/servo_reboot.py',\n # 'bin/servo_reset.py',\n # 'bin/get_angle.py'\n # ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354523554","text":"from pyspark import SparkConf,SparkContext\nimport sys\nimport operator\n\nif __name__ == \"__main__\":\n\n conf = SparkConf().setAppName(\"FilmPriceAverage\")\n sc = SparkContext(conf=conf)\n\n textLoaded = sc.textFile(sys.argv[1])\n filmList = textLoaded\n\n # header = textLoaded.take(1)\n # filmList = textLoaded.filter(lambda x: x != header)\n # print(header[0])\n\n def mergeDict(a, b):\n z = a.copy()\n z.update(b)\n return z\n\n def mergeDict2(a, b):\n return {k: a.get(k, 0) + b.get(k, 0) for k in set(a) | set(b)}\n\n filmSet = filmList.map(lambda x: x.split(\",\")) \\\n .map(lambda x: ( x[3], (float(x[4]), {x[6]:1} ))) \\\n .reduceByKey(lambda l,r: (round( (l[0]+r[0])/2 ,2), mergeDict2(l[1],r[1]))) \n # .map(lambda x: (x[0], (x[1][0], sorted(x[1][1].items(), key=operator.itemgetter(1),reverse=True) )) )\n\n for i in sorted(filmSet.collect(), key=lambda v: v[1][0],reverse=True):\n print(i)\n\n\n # for i in valuesMap.take(10):\n # print(i)","sub_path":"FilmPriceAverage.py","file_name":"FilmPriceAverage.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"517594633","text":"small = 0\r\nmiddle = 1\r\nbig = 2\r\n\r\nm = 1500 # kg\r\n\r\n# кинетическая энергия\r\ndef e_kin(v):\r\n return round(0.5 * m * v**2, 3)\r\n\r\n# нечеткое отношение для скорости\r\n# \"маленькая скорость\" - полутрапеция с правой стороной Mu([0, 10]) = 1, Mu(15) = 0\r\n# \"средняя скорость\" - треугольное распределение Mu(10) = 0, Mu(15) = 1, Mu(20) = 0 \r\n# \"большая скорость\" - полутрапеция с левой стороной Mu(15) = 0, Mu([20, +inf]) = 1 \r\ndef fuzzy_v(v):\r\n s = [10, 15]\r\n m = [10, 15, 20]\r\n b = [15, 20]\r\n \r\n # s - маленький\r\n # m - средний\r\n # b - большой\r\n # l - левая граница распределения\r\n # r - правая граница распределения\r\n # формулы расчета степени принадлежности к каждому распределению\r\n s_l = 1 if v < s[0] else 0\r\n s_r = round((s[1] - v) / (s[1] - s[0]), 3)\r\n m_l = round((v - m[0]) / (m[1] - m[0]), 3)\r\n m_r = round((m[2] - v) / (m[2] - m[1]), 3)\r\n b_l = round((v - b[0]) / (b[1] - b[0]), 3)\r\n b_r = 1 if v > b[1] else 0\r\n\r\n # если степень принадлежност больше 0 и меньше либо равно 1, то это то, что нужно :)\r\n mu = {'s_l': s_l, 's_r': s_r, 'm_l': m_l, 'm_r': m_r, 'b_l': b_l, 'b_r': b_r}\r\n mu = {key: val for key, val in mu.items() if val > 0 and val <= 1}\r\n\r\n return mu\r\n\r\n# нечеткое отношение для расстояния\r\n# \"маленькое расстояние\" - полутрапеция с правой стороной Mu([0, 20]) = 1, Mu(40) = 0\r\n# \"среднее расстояние\" - трапециевидное распределение Mu(20) = 0, Mu([40, 280]) = 1, Mu(300) = 0 \r\n# \"большое расстояние\" - полутрапеция с левой стороной Mu(280) = 0, Mu([300, +inf]) = 1 \r\ndef fuzzy_l(l):\r\n s = [20, 40]\r\n m = [20, 40, 280, 300]\r\n b = [280, 300]\r\n\r\n # s - маленький\r\n # m - средний\r\n # b - большой\r\n # l - левая граница распределения\r\n # m - средняя граница (у трапеции верхнее основание)\r\n # r - правая граница распределения\r\n # формулы расчета степени принадлежности к каждому распределению\r\n s_l = 1 if l < s[0] else 0\r\n s_r = round((s[1] - l) / (s[1] - s[0]), 3)\r\n m_l = round((l - m[0]) / (m[1] - m[0]), 3)\r\n m_m = 1 if m[1] < l < m[2] else 0\r\n m_r = round((m[3] - l) / (m[3] - m[2]), 3)\r\n b_l = round((l - b[0]) / (b[1] - b[0]), 3)\r\n b_r = 1 if l > b[1] else 0\r\n\r\n mu = {'s_l': s_l, 's_r': s_r, 'm_l': m_l, 'm_m': m_m, 'm_r': m_r, 'b_l': b_l, 'b_r': b_r}\r\n mu = {key: val for key, val in mu.items() if val > 0 and val <= 1}\r\n\r\n return mu\r\n\r\n# контроллер\r\ndef sugeno(v, l):\r\n # e = e_kin(v)\r\n # right_out = [[7*e/8, 4*e/9, 2*e/3],\r\n # [-43*e/60, 5*e/8, 3*e/4],\r\n # [-e, -e/4, e/2]]\r\n\r\n # правый вывод правил\r\n # значения не настроены, мне лень\r\n right_out = [[300, 4*10**3, 6*10**5],\r\n [3*10**3, 2*10**3, 3.5*10**3],\r\n [700, 400, 100]]\r\n \r\n # получить степени принадлежностей\r\n mu_v = fuzzy_v(v)\r\n mu_l = fuzzy_l(l)\r\n\r\n # print('v:', mu_v)\r\n # print('l:', mu_l)\r\n\r\n # считаем выход\r\n # out = sum(y_i) * w_i / sum(w_i)\r\n # где y_i - значения из таблицы, w_i - степень активации правила; t-норма - минимум\r\n y = 0\r\n w = 0\r\n for kv, vv in mu_v.items():\r\n if kv[0] == 's': i = small\r\n elif kv[0] == 'm': i = middle\r\n elif kv[0] == 'b': i = big\r\n\r\n for kl, vl in mu_l.items():\r\n if kl[0] == 's': j = small\r\n elif kl[0] == 'm': j = middle\r\n elif kl[0] == 'b': j = big\r\n \r\n # print(min(vv, vl))\r\n w += min(vv, vl) # sum(w_i)\r\n y += right_out[j][i] * min(vv, vl) # sum(y_i) * w_i\r\n # print('ji: {} {} {}'.format(j, i, right_out[j][i]))\r\n \r\n y = round(y, 3)\r\n w = round(w, 3)\r\n\r\n return y / w\r\n\r\n\r\nl = 150 # m\r\nv = 40 # km/h\r\nv /= 3.6 # m/s\r\n\r\n# для построения графика\r\nv_graphic = []\r\nl_graphic = []\r\n\r\nprint('Beg: v = {} m/s v = {} km/h l = {} m'.format(v, v*3.6, l))\r\nfor t in range(500):\r\n v_graphic.append(v)\r\n l_graphic.append(l)\r\n\r\n e_brake = sugeno(v, l)\r\n e_k = e_kin(v) - e_brake\r\n l -= v\r\n\r\n if l < 0:\r\n print('crash v = {} km/h v = {} m/s'.format(v*3.6, v))\r\n break\r\n\r\n v = int(((2 * e_k) / m)**(1/2))\r\n\r\n if v == 0:\r\n print('stop l = {} m'.format(l))\r\n break\r\n\r\nprint('End: v = {} km/h'.format(v*3.6, l))\r\n\r\n\r\n# рисуем график зависимости расстояния от скорости\r\n# график лучше читать справа налево, т.к начинаем не с нулевой скорости\r\nimport matplotlib.pyplot as plt\r\nplt.plot(v_graphic, l_graphic)\r\nplt.show()","sub_path":"AoFIS/sugeno/sugeno.py","file_name":"sugeno.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"616605248","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 11 11:55:43 2018\n\n@author: danpal\n\"\"\"\n\nfrom Bio import SeqIO\nfrom Bio.Blast.Applications import NcbipsiblastCommandline\nfrom utilities import runProcess, checkDirPath\n\n\ndef execPsiblast(seq,\n db,\n num_iterations=1,\n num_threads=1,\n outfmt=0,\n **kwargs):\n \"\"\"Do psi-blast search\"\"\"\n cline = NcbipsiblastCommandline(db=db,\n num_iterations=num_iterations,\n num_threads=num_threads,\n outfmt=outfmt,\n **kwargs)\n cmd = str(cline).split()\n input_ = seq.format('fasta').encode('utf-8')\n return runProcess(cmd, input_)\n\n\ndef parseFileAndPsiblast(file, out_path, db, num_threads=2, outfmt=16):\n out_path = checkDirPath(out_path, makedirs=True)\n seq_iter = SeqIO.parse(file, 'fasta')\n out = []\n for seq in seq_iter:\n name = seq.name.split(sep='|')\n name = f'psib_{name[1]}_{name[2]}.xml'\n out.append(execPsiblast(seq,\n db,\n num_threads=num_threads,\n outfmt=outfmt,\n out=out_path + name))\n return out\n\n\nif __name__ == '__main__':\n path = '/home/danpal/Unidad/04_Genomica/'\n file = path + 'knotoid_nr_db/knotoids/fasta/clustered_knotoids.fasta'\n out_path = path + 'out_dir'\n db = path + 'swissprot2018june/swissprot2018june'\n parseFileAndPsiblast(file, out_path, db)\n","sub_path":"knot_genomic/psiblast_search.py","file_name":"psiblast_search.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"465459460","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport time\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nfrom six.moves import cPickle\n\n#print images from numpy array\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef display(input_array, filename, title, prediction):\n\tif not os.path.isdir('./Std/WrongTests'):\n\t\tos.mkdir('./Std/WrongTests')\n\tfig=plt.figure(1)\n\tax=plt.subplot(111)\n\tplot=plt.imshow(input_array, cmap=matplotlib.cm.Greys)\n\tplt.title('actual: ' + title + ' predicted: '+prediction)\n\tfig.savefig('./Std/WrongTests/' + filename)\n\n#Loading data from MNIST\n\nimport urllib.request\nimport gzip\n\ndef load_dataset():\n\tdef download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n\t\tprint('Downloading {}'.format(filename))\n\t\turllib.request.urlretrieve(source + filename, filename)\n\n\tdef load_mnist_images(filename):\n\t\tif not os.path.exists(filename):\n\t\t\tdownload(filename)\n\t\twith gzip.open(filename, 'rb') as f:\n\t\t\tdata = np.frombuffer(f.read(), np.uint8, offset=16)\n\t\tdata = data.reshape(-1, 1, 28, 28)\n#normalize the data\n\t\treturn data / np.float32(256)\n\n\tdef load_mnist_labels(filename):\n\t\tif not os.path.exists(filename):\n\t\t\tdownload(filename)\n\t\twith gzip.open(filename, 'rb') as f:\n\t\t\tdata = np.frombuffer(f.read(), np.uint8, offset=8)\n\t\treturn data\n\n\tX_train = load_mnist_images('train-images-idx3-ubyte.gz')\n\ty_train = load_mnist_labels('train-labels-idx1-ubyte.gz')\n\tX_test = load_mnist_images('t10k-images-idx3-ubyte.gz')\n\ty_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')\n\n#Obtain the validation set\n\tX_train, X_val = X_train[:-1000], X_train[-1000:]\n\ty_train, y_val = y_train[:-1000], y_train[-1000:]\n\n\treturn X_train, y_train, X_val, y_val, X_test, y_test\n\ndef build_cnn(input_var=None):\n\tnetwork = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=input_var)\n\n\tnetwork = lasagne.layers.Conv2DLayer(\n\t\t\tnetwork, num_filters=32, filter_size=(3,3),\n\t\t\tnonlinearity=lasagne.nonlinearities.rectify,\n\t\t\tW=lasagne.init.GlorotUniform())\n\tnetwork = lasagne.layers.Conv2DLayer(\n\t\t\tnetwork, num_filters=32, filter_size=(3,3),\n\t\t\tnonlinearity=lasagne.nonlinearities.rectify,\n\t\t\tW=lasagne.init.GlorotUniform())\n\tnetwork = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2))\n\tnetwork = lasagne.layers.Conv2DLayer(\n\t\t\tnetwork, num_filters=64, filter_size=(3,3),\n\t\t\tnonlinearity=lasagne.nonlinearities.rectify,\n\t\t\tW=lasagne.init.GlorotUniform())\n\tnetwork = lasagne.layers.Conv2DLayer(\n\t\t\tnetwork, num_filters=64, filter_size=(3,3),\n\t\t\tnonlinearity=lasagne.nonlinearities.rectify,\n\t\t\tW=lasagne.init.GlorotUniform())\n\tnetwork = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2))\n\n\tnetwork = lasagne.layers.DropoutLayer(\n\t\t\tnetwork, p=0.5)\n\tnetwork = lasagne.layers.DenseLayer(\n\t\t\tnetwork, num_units=200,\n\t\t\tnonlinearity=lasagne.nonlinearities.rectify)\n\tnetwork = lasagne.layers.DropoutLayer(\n\t\t\tnetwork, p=0.5)\n\tnetwork = lasagne.layers.DenseLayer(\n\t\t\tnetwork, num_units=200,\n\t\t\tnonlinearity=lasagne.nonlinearities.rectify)\n\tnetwork = lasagne.layers.DropoutLayer(\n\t\t\tnetwork, p=0.5)\n\n\tnetwork = lasagne.layers.DenseLayer(\n\t\t\tnetwork, num_units=10,\n\t\t\tnonlinearity=lasagne.nonlinearities.softmax)\n\n\treturn network\n\n#Batch generator\ndef gen_batches(inputs, targets, batchsize, shuffle=False):\n\tassert len(inputs) == len(targets)\n\tif shuffle:\n\t\tindices = np.arange(len(inputs))\n\t\tnp.random.shuffle(indices)\n\tfor start in range(0, len(inputs) - batchsize + 1, batchsize):\n\t\tif shuffle:\n\t\t\texcerpt = indices[start:start + batchsize]\n\t\telse:\n\t\t\texcerpt = slice(start, start + batchsize)\n\t\tyield inputs[excerpt], targets[excerpt]\n\n#training\ndef main(num_epochs=50, save_num=0):\n\t#load the dataset\n\tprint(\"Loading the dataset\")\n\tX_train, y_train, X_val, y_val, X_test, y_test = load_dataset()\n#define Theano variables\n\tinput_var = T.tensor4('input_var')\n\ttarget_var = T.ivector('target_var')\n#create CNN\n\tprint(\"building the model\")\n\tnetwork = build_cnn(input_var)\n#cost function \n\tprediction = lasagne.layers.get_output(network)\n\tpenalty = lasagne.regularization.regularize_layer_params(\n\t\t\tnetwork, lasagne.regularization.l2)\n\tloss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\n\tloss = loss.mean() + penalty*0.01\n#training\n\tparams = lasagne.layers.get_all_params(network, trainable=True)\n\tupdates = lasagne.updates.nesterov_momentum(\n\t\t\tloss, params, learning_rate=0.1, momentum=0.5)\n\t#test_loss\n\ttest_prediction = lasagne.layers.get_output(network, deterministic=True)\n\ttest_loss_raw = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)\n\ttest_loss = test_loss_raw.mean()\n\tgradient = T.grad(test_loss, input_var)\n\ttest_loss = test_loss + penalty*0.01\n#test_loss\n\ttest_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),\n\t\t\tdtype=theano.config.floatX)\n\t#complie functions\n\ttrain_fn = theano.function([input_var, target_var], loss, updates=updates)\n\tval_fn = theano.function([input_var, target_var], [test_loss, test_acc])\n#helping test\n\tsimple_prediction = theano.function([input_var], test_prediction)\n#gradient\n\tgradient_f = theano.function([input_var, target_var], gradient)\n\tloss_f = theano.function([input_var, target_var], test_loss_raw)\n\n\t\n\n\t#Run the training\n\tprint(\"Training starts\")\n\tfor epoch in range(num_epochs): #training\n\t\ttrain_err=0\n\t\ttrain_batches=0\n\t\tstart_time=time.time()\n\t\tfor batch in gen_batches(X_train, y_train, 128, shuffle=True):\n\t\t\tinputs, targets = batch\n\t\t\ttrain_err += train_fn(inputs, targets)\n\t\t\ttrain_batches += 1\n#validation\n\t\tval_err = 0\n\t\tval_acc = 0\n\t\tval_batches = 0\n\t\tfor batch in gen_batches(X_val, y_val, 128):\n\t\t\tinputs, targets = batch\n\t\t\terr, acc = val_fn(inputs, targets)\n\t\t\tval_err += err\n\t\t\tval_acc += acc\n\t\t\tval_batches += 1\n#print the results\n\t\tprint(\"Epoch {} of {} took {:.5f}s\".format(\n\t\t\tepoch + 1, num_epochs, time.time() - start_time))\n\t\tprint(\" training loss:\\t{:.10f}\".format(train_err / train_batches))\n\t\tprint(\" validation loss:\\t{:.10f}\".format(val_err / val_batches))\n\t\tprint(\" validation accuracy:\\t{:.5f} %\".format(\n\t\t\tval_acc / val_batches * 100))\n\n\n\t\t#Test\n\ttest_err = 0\n\ttest_acc = 0\n\ttest_batches = 0\n\ti = 0\n\tfor batch in gen_batches(X_test, y_test, 128):\n\t\tinputs, targets = batch\n\t\terr, acc = val_fn(inputs, targets)\n\t\ttest_err += err\n\t\ttest_acc += acc\n\t\ttest_batches += 1\n\t\tpre_list = simple_prediction(inputs)\n\t\tpre_list = np.argmax(pre_list, axis=1)\n\t\terr_indices = np.not_equal(pre_list, targets)\n\t\tif save_num:\n\t\t\tprint(\"Saving the wrong pictures of batch\", i)\n\t\t\tsave_num -= 1\n\t\t\tfor index, num in enumerate(err_indices):\n\t\t\t\tif num == 1:\n\t\t\t\t\tdisplay(inputs[index][0], \n\t\t\t\t\t'actual_' + str(targets[index]) + '_' + \n\t\t\t\t\t'predict_' + str(pre_list[index]) + '_' +\n\t\t\t\t\t'_batch' + str(i) + '_' + str(index) + '.png', \n\t\t\t\t\tstr(targets[index]), str(pre_list[index]))\n\t\ti += 1\n\n\n\tprint (\"Tesing results:\")\n\tprint (\" test loss:\\t\\t{:.10f}\".format(test_err / test_batches))\n\tprint (\" test accuracy:\\t{:.5f} %\".format(\n\t\ttest_acc / test_batches * 100))\n\tprint (\"Saving the network\")\n\tf=open('./Std/std_f', 'wb')\n\tcPickle.dump(simple_prediction,f,cPickle.HIGHEST_PROTOCOL)\n\tf.close()\n\tf=open('./Std/std_grad_f', 'wb')\n\tcPickle.dump(gradient_f,f,cPickle.HIGHEST_PROTOCOL)\n\tf.close()\n\tf=open('./Std/std_loss_f', 'wb')\n\tcPickle.dump(loss_f,f,cPickle.HIGHEST_PROTOCOL)\n\tf.close()\n\t'''\n\tf=open('std_network.cnn', 'wb')\n\tcPickle.dump(network, f, cPickle.HIGHEST_PROTOCOL)\n\tf.close()\n\tf=open('std_network.var', 'wb')\n\tcPickle.dump(lasagne.layers.get_all_param_values(network), f, cPickle.HIGHEST_PROTOCOL)\n\tf.close()\n\t'''\n\nif __name__ == '__main__':\n\tif not os.path.isdir('./Std'):\n\t\tos.mkdir('./Std')\n\tnum_epochs = 50\n\tsave_num = 0\n\tif len(sys.argv) > 1:\n\t\tnum_epochs = int(sys.argv[1])\n\tif len(sys.argv) > 2:\n\t\tsave_num = int(sys.argv[2])\n\tmain(num_epochs, save_num)\n\n","sub_path":"std_CNN.py","file_name":"std_CNN.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409180482","text":"def menu(opcion,x,y):\n switch = {\n 1: x+y,\n 2: x-y,\n 3: x*y,\n 4: x/y,\n }\n return switch.get(opcion, \"Opcion no disponible\")\ni=0\nwhile i<1:\n if i!=1:\n print('Menu\\n 1: Suma \\n 2: Resta \\n 3: Multiplicacion \\n 4: Division')\n a=int(input('Opcion del menu: '))\n x=int(input('Valor 1: '))\n y=int(input('Valor 2: '))\n print(menu(a,x,y))\n i=input('Realizar otra operacion? Si = 0 | No = 1 : ')\n","sub_path":"first/first/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620607546","text":"#! /usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport django\nimport requests\nimport sys\n\n\ndjango.setup()\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\n\nfrom twilio.rest import Client\n\n# set up command-line options\ndesc = \"\"\"\n Send an SMS from a messaging service SID\n\"\"\"\n\n# RawTextHelpFormatter method allows for new lines in help text\nparser = argparse.ArgumentParser(\n description=desc, formatter_class=argparse.RawTextHelpFormatter\n)\n\nparser.add_argument(\n '-p',\n '--phone',\n required=True,\n help=\"Phone number to which we are sending the SMS\",\n dest='phone',\n)\nparser.add_argument(\n '--test',\n action='store_true',\n help=\"Dry run?\",\n dest='test',\n)\n\n\ndef main():\n \"\"\"Send an SMS from a messaging service SID.\"\"\"\n client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)\n status_callback_url = 'https://{0}{1}{2}'.format(\n settings.SERVER_URL,\n settings.ROOT_URL,\n reverse('sms_status_callback', args=[666]),\n )\n if test:\n #print(reverse('sms_status_callback', args=[666]))\n #print(request.build_absolute_uri(reverse('sms_status_callback', args=(666, )))\n #request = requests.get(reverse('sms_status_callback', args=(666, )))\n #print(request.text)\n print(status_callback_url)\n print(phone)\n else:\n print(status_callback_url)\n message = client.messages.create(\n from_=settings.TWILIO_TEST_MESSAGING_SERVICE_SID,\n to=phone,\n body='who does your taxes?',\n status_callback=status_callback_url,\n )\n\n print(message.__dict__)\n print(message.sid)\n\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n phone = args.phone\n test = args.test\n\n if test:\n print(args)\n\n sys.exit(main())\n","sub_path":"djtwilio/bin/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"295616733","text":"from MPS_Class import MpsOpenBoundaryClass as Mob\r\nfrom Parameters import parameter_dmrg\r\nimport Hamiltonian_Module as Hm\r\nimport numpy as np\r\nimport time\r\n\r\nis_debug = False\r\nis_parallel = True\r\nis_save_op = False\r\n\r\n\r\ndef get_bond_energies(eb_full, positions, index2):\r\n nl = positions.shape[0]\r\n nh = eb_full.size\r\n eb = np.zeros((nl, 1))\r\n for i in range(0, nh):\r\n p = (index2[i, 0] == positions[:, 0]) * (index2[i, 1] == positions[:, 1])\r\n p = np.nonzero(p)\r\n eb[p] += eb_full[i]\r\n return eb\r\n\r\n\r\ndef dmrg_finite_size(para=None):\r\n t_start = time.time()\r\n info = dict()\r\n print('Preparation the parameters and MPS')\r\n if para is None:\r\n para = parameter_dmrg()\r\n # obtain spin operators\r\n # define interaction index\r\n # index1[n, 1]-th operator is at the index[n, 0]-th site\r\n index1 = np.mat(np.arange(0, para['l']))\r\n index1 = np.vstack((index1, 6 * np.ones((1, para['l'])))).T.astype(int)\r\n\r\n # index2[n, 2]-th operator is at the index[n, 0]-th site\r\n # index2[n, 3]-th operator is at the index[n, 1]-th site\r\n if para['lattice'] == 'chain':\r\n para['positions_h2'] = Hm.positions_nearest_neighbor_1d(para['l'], para['bound_cond'])\r\n elif para['lattice'] == 'square':\r\n para['positions_h2'] = Hm.positions_nearest_neighbor_square(\r\n para['square_width'], para['square_height'], para['bound_cond'])\r\n index2 = Hm.interactions_position2full_index_heisenberg_two_body(para['positions_h2'])\r\n para['nh'] = index2.shape[0] # number of two-body interactions\r\n # Initialize MPS\r\n A = Mob(length=para['l'], d=para['d'], chi=para['chi'], way='qr', ini_way='r', debug=is_debug,\r\n is_parallel=is_parallel, is_save_op=is_save_op)\r\n # define the coefficients for one-body terms\r\n op_half = Hm.spin_operators(para['spin'])\r\n A.append_operators([-para['hx']*op_half['sx'] - para['hz']*op_half['sz']]) # the 6th operator for magnetic fields\r\n coeff1 = np.ones((index1.shape[0], 1))\r\n coeff2 = np.ones((index2.shape[0], 1))\r\n for i in range(0, index2.shape[0]):\r\n if (i % 3) == 0:\r\n coeff2[i, 0] = para['jxy'] / 2\r\n coeff2[i + 1, 0] = para['jxy'] / 2\r\n coeff2[i + 2, 0] = para['jz']\r\n\r\n A.correct_orthogonal_center(para['ob_position'])\r\n print('Starting to sweep ...')\r\n e0_total = 0\r\n info['convergence'] = 1\r\n ob = dict()\r\n for t in range(1, para['sweep_time']+1):\r\n if_ob = ((t % para['dt_ob']) == 0) or t == (para['sweep_time'] - 1)\r\n if if_ob:\r\n print('In the %d-th round of sweep ...' % t)\r\n for n in range(para['ob_position']+1, para['l']):\r\n if para['if_print_detail']:\r\n print('update the %d-th tensor from left to right...' % n)\r\n A.update_tensor_eigs(n, index1, index2, coeff1, coeff2, para['tau'], para['is_real'],\r\n tol=para['eigs_tol'])\r\n for n in range(para['l']-2, -1, -1):\r\n if para['if_print_detail']:\r\n print('update the %d-th tensor from right to left...' % n)\r\n A.update_tensor_eigs(n, index1, index2, coeff1, coeff2, para['tau'], para['is_real'],\r\n tol=para['eigs_tol'])\r\n for n in range(1, para['ob_position']):\r\n if para['if_print_detail']:\r\n print('update the %d-th tensor from left to right...' % n)\r\n A.update_tensor_eigs(n, index1, index2, coeff1, coeff2, para['tau'], para['is_real'],\r\n tol=para['eigs_tol'])\r\n\r\n if if_ob:\r\n ob['eb_full'] = A.observe_bond_energy(index2, coeff2)\r\n ob['mx'] = A.observe_magnetization(1)\r\n ob['mz'] = A.observe_magnetization(3)\r\n ob['e_per_site'] = (sum(ob['eb_full']) - para['hx']*sum(ob['mx']) - para['hz']*sum(ob['mz']))/A.length\r\n info['convergence'] = abs(ob['e_per_site'] - e0_total)\r\n if info['convergence'] < para['break_tol']:\r\n print('Converged at the %d-th sweep with error = %g of energy per site.' % (t, info['convergence']))\r\n break\r\n else:\r\n print('Convergence error of energy per site = %g' % info['convergence'])\r\n e0_total = ob['e_per_site']\r\n if t == para['sweep_time'] - 1 and info['convergence'] > para['break_tol']:\r\n print('Not converged with error = %g of eb per bond' % info['convergence'])\r\n print('Consider to increase para[\\'sweep_time\\']')\r\n ob['eb'] = get_bond_energies(ob['eb_full'], para['positions_h2'], index2)\r\n A.calculate_entanglement_spectrum()\r\n A.calculate_entanglement_entropy()\r\n info['t_cost'] = time.time() - t_start\r\n print('Simulation finished in %g seconds' % info['t_cost'])\r\n # A.report_yourself()\r\n # A.check_orthogonality_by_tensors(tol=1e-14)\r\n # time.sleep(0.05)\r\n # print(ob['eb_full'])\r\n # print(index2)\r\n # print(coeff2)\r\n # print(operators[4])\r\n # print(operators[5])\r\n # print(para['jxy'])\r\n # print(A.effect_s.keys())\r\n # print('The length of effective_s: %d' % len(A.effect_s))\r\n A.clean_to_save()\r\n return ob, A, info, para\r\n","sub_path":"MPS/DMRG_anyH.py","file_name":"DMRG_anyH.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"24001686","text":"from Helpers import normalizeSparse\nfrom Ramp import rungeKuttaRampNew, rungeKuttaStep\nfrom MatrixMethods import makeStateAFM, phaseCorrectAFM, makeState\nfrom sympy import *\nimport scipy.sparse\nimport numpy as np\n\n\ndef initialiseSimulation(N, magneticorder, rampdir, k=6):\n if magneticorder == \"AFM\":\n AFMState = makeStateAFM(N - 1, kopt=k)\n AFMState = phaseCorrectAFM(N - 1, AFMState)\n targetState = 0\n initialState_fs = scipy.sparse.kron([0, 1], AFMState).transpose()\n\n # Loading the sparse matrix V\\dagger and\n V = scipy.sparse.load_npz(\n \"MatrixGeneration/V_\" + str(N) + \"_allJ_Sz_1subspace.npz\"\n )\n F = scipy.sparse.load_npz(\n \"MatrixGeneration/F_\" + str(N) + \"_allJ_Sz_1subspace.npz\"\n )\n\n elif magneticorder == \"FM\":\n\n config = [0 for i in range(N - 1)]\n config.insert(0, 1)\n\n configtarget = [0 for i in range(N - 1)]\n configtarget.insert(len(configtarget), 1)\n\n initialState_fs = makeState(config).transpose()\n targetState_fs = makeState(configtarget).transpose()\n\n V = scipy.sparse.load_npz(\n \"MatrixGeneration/V_\"\n + str(N)\n + \"_allJ_Sz_\"\n + str(-(N - 2))\n + \"subspace.npz\"\n )\n F = scipy.sparse.load_npz(\n \"MatrixGeneration/F_\"\n + str(N)\n + \"_allJ_Sz_\"\n + str(-(N - 2))\n + \"subspace.npz\"\n )\n targetState = V.transpose() * targetState_fs\n\n # Transforming initial state into contracted space\n initialState = V.transpose() * initialState_fs\n\n H = scipy.sparse.load_npz(\n \"MatrixGeneration/Hinitial_\" + str(N) + rampdir + magneticorder + \".npz\"\n )\n Htar = scipy.sparse.load_npz(\n \"MatrixGeneration/Htarget_\" + str(N) + rampdir + magneticorder + \".npz\"\n )\n\n return initialState, targetState, H, Htar, V, F\n\n\ndef maxFidelityCostFunction(\n grad, magneticorder=None, simulationparameters=None, dt=0.01, p=0.01\n):\n initialState, targetState, H, Htar, V, F = simulationparameters\n\n # Calculating the simulation time\n T = -2 * grad * atanh(2 * p - 1)\n\n # Setting up the simulation\n currentState = initialState\n f = []\n t_curr = 0\n\n while t_curr < T:\n # Computing the proportions of the Hamiltonian at each timestep, along with the values needed to compute RK step\n\n ramp = rungeKuttaRampNew(t_curr, dt, grad, p)\n\n # Updating the Hamiltonian\n\n Hcurr = (1 - ramp[0]) * H + ramp[0] * Htar\n H_dt2 = (1 - ramp[1]) * H + ramp[1] * Htar\n H_dt = (1 - ramp[2]) * H + ramp[2] * Htar\n\n # Performing the Runge-Kutta step\n currentState = rungeKuttaStep(currentState, Hcurr, H_dt2, H_dt, dt)\n\n # Renormalising the state\n currentState = normalizeSparse(currentState)\n if magneticorder == \"AFM\":\n # Transforming the state into the space to calculate fidelity\n currentState_f = F.transpose() * V * currentState\n\n # Appending current fidelity to array\n f.append(abs(currentState_f).power(2).sum())\n\n elif magneticorder == \"FM\":\n f.append(\n np.abs(\n np.dot(\n flatten(targetState.toarray()), flatten(currentState.toarray())\n )\n )\n )\n\n t_curr += dt\n\n return -np.max(f)\n\n\ndef maxFidelityNoisyChain(\n grad, magneticorder=None, simulationparameters=None, ramp=None, dt=0.01, p=0.01\n):\n initialState, targetState, H, Htar, V, F = simulationparameters\n\n # Calculating the simulation time\n T = -2 * grad * atanh(2 * p - 1)\n\n # Setting up the simulation\n currentState = initialState\n f = []\n ramps = []\n t_curr = 0\n rampcounter = 0\n\n while t_curr < 5 and rampcounter + 2 < len(ramp):\n # Updating the Hamiltonian\n\n Hcurr = (1 - ramp[rampcounter]) * H + ramp[rampcounter] * Htar\n H_dt2 = (1 - ramp[rampcounter + 1]) * H + ramp[rampcounter + 1] * Htar\n H_dt = (1 - ramp[rampcounter + 2]) * H + ramp[rampcounter + 2] * Htar\n\n rampcounter = rampcounter + 2\n\n # Performing the Runge-Kutta step\n currentState = rungeKuttaStep(currentState, Hcurr, H_dt2, H_dt, dt)\n\n # Renormalising the state\n currentState = normalizeSparse(currentState)\n if magneticorder == \"AFM\":\n # Transforming the state into the space to calculate fidelity\n currentState_f = F.transpose() * V * currentState\n\n # Appending current fidelity to array\n f.append(abs(currentState_f).power(2).sum())\n\n elif magneticorder == \"FM\":\n f.append(\n np.abs(\n np.dot(\n flatten(targetState.toarray()), flatten(currentState.toarray())\n )\n )\n )\n\n ramps.append(ramp[rampcounter])\n t_curr += dt\n\n # plt.figure(figsize=(8,6), dpi=70)\n # plt.plot([i*dt for i in range(len(f))],f, color='blue')\n # plt.plot([i*dt for i in range(len(ramps))],ramps, '--', color='red')\n # plt.ylabel(\"Fidelity\")\n # plt.xlabel(\"$Jt$\")\n # plt.show()\n\n return np.max(f)\n\n\ndef initialiseSimulationThermal(N, magneticorder, rampdir, AFMState):\n AFMState = phaseCorrectAFM(N - 1, AFMState)\n targetState = 0\n initialState_fs = scipy.sparse.kron([0, 1], AFMState).transpose()\n\n # Loading the sparse matrix V\\dagger and\n V = scipy.sparse.load_npz(\"V_\" + str(N) + \"_allJ_Sz_1subspace.npz\")\n F = scipy.sparse.load_npz(\"F_\" + str(N) + \"_allJ_Sz_1subspace.npz\")\n\n # Transforming initial state into contracted space\n initialState = V.transpose() * initialState_fs\n\n H = scipy.sparse.load_npz(\"Hinitial_\" + str(N) + rampdir + magneticorder + \".npz\")\n Htar = scipy.sparse.load_npz(\"Htarget_\" + str(N) + rampdir + magneticorder + \".npz\")\n\n return initialState, targetState, H, Htar, V, F\n","sub_path":"CostFunctions.py","file_name":"CostFunctions.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"393956893","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nimport unittest\nimport numpy as np\nimport pytest\n\n# The python module that wraps the matrix code.\nimport _matrix\n\n\nclass GradingTest(unittest.TestCase):\n\n def make_matrices(self, size):\n\n mat1 = _matrix.Matrix(size,size)\n mat2 = _matrix.Matrix(size,size)\n mat3 = _matrix.Matrix(size,size)\n\n for it in range(size):\n for jt in range(size):\n mat1[it, jt] = it * size + jt + 1\n mat2[it, jt] = it * size + jt + 1\n mat3[it, jt] = 0\n\n return mat1, mat2, mat3\n\n def test_basic(self):\n\n size = 100\n mat1, mat2, mat3, *_ = self.make_matrices(size)\n\n self.assertEqual(size, mat1.nrow)\n self.assertEqual(size, mat1.ncol)\n self.assertEqual(size, mat2.nrow)\n self.assertEqual(size, mat2.ncol)\n self.assertEqual(size, mat3.nrow)\n self.assertEqual(size, mat3.ncol)\n\n self.assertEqual(2, mat1[0,1])\n self.assertEqual(size+2, mat1[1,1])\n self.assertEqual(size*2, mat1[1,size-1])\n self.assertEqual(size*size, mat1[size-1,size-1])\n\n for i in range(mat1.nrow):\n for j in range(mat1.ncol):\n self.assertNotEqual(0, mat1[i,j])\n self.assertEqual(mat1[i,j], mat2[i,j])\n self.assertEqual(0, mat3[i,j])\n\n self.assertEqual(mat1, mat2)\n self.assertTrue(mat1 is not mat2)\n \n def test_caculation(self):\n size = 1000\n np_mat1 = np.random.random(size * size)\n np_mat2 = np.random.random(size * size)\n mat1 = _matrix.Matrix(size, size, np_mat1.tolist())\n mat2 = _matrix.Matrix(size, size, np_mat2.tolist())\n\n start = time.time()\n ret_naive = _matrix.multiply_naive(mat1, mat2)\n end = time.time()\n navie_time = end - start\n print('multiply_naive runtime = {0:2.4f} seconds'.format(end - start))\n\n start = time.time()\n ret_tile = _matrix.multiply_tile(mat1, mat2, 100)\n end = time.time()\n tile_time = end - start\n print(end - start)\n\n start = time.time()\n ret_mkl = _matrix.multiply_mkl(mat1, mat2)\n end = time.time()\n mkl_time = end - start\n print(end - start)\n\n self.assertEqual(size, ret_naive.nrow)\n self.assertEqual(size, ret_naive.ncol)\n self.assertEqual(size, ret_mkl.nrow)\n self.assertEqual(size, ret_mkl.ncol)\n\n\n for i in range(ret_naive.nrow):\n for j in range(ret_naive.ncol):\n self.assertNotEqual(mat1[i,j], ret_mkl[i,j])\n self.assertEqual(ret_naive[i,j], pytest.approx(ret_mkl[i,j], abs=1e-05))\n self.assertEqual(ret_tile[i,j], pytest.approx(ret_mkl[i,j], abs=1e-05))\n\n fp = open(\"performance.txt\", \"w\")\n fp.write('multiply_naive runtime = {0:2.4f} seconds\\n'.format(navie_time))\n fp.write('multiply_tile runtime = {0:2.4f} seconds\\n'.format(tile_time))\n fp.write('multiply_mkl runtime = {0:2.4f} seconds\\n'.format(mkl_time))\n fp.close\n ","sub_path":"hw3/ycchan5566/test_matrix.py","file_name":"test_matrix.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329522976","text":"import os\nimport glob\nimport json\nfrom termcolor import *\nfrom telethon import *\nimport getch\n\nimport logo\nimport chat\n\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\n\n\ndef listDisplay(usersList):\n text = colored(\"Here's the list of all registered users:\\n\", 'green', attrs=['underline'])\n print(text)\n for i in range (0, len(usersList)):\n print(i+1, \")\", usersList[i][2:len(usersList[i])-8]) #Extracts username from the .session file\n print(\"\")\n\n\n\ndef splashScreen(api): #Main menu\n while True:\n cls()\n usersList = glob.glob(\"./*.session\") #Selects all session files\n if len(usersList) == 0: #No users found\n createUser(api)\n usersList = glob.glob(\"./*.session\") #Refresh the list\n\n logo.display()\n listDisplay(usersList)\n text = colored(\"C: Connect to an existing profile N: New profile D: Delete a profile\", 'blue', attrs=['underline'])\n print(text)\n text = colored(\"Q: Quit\\n\", 'yellow', attrs=['underline'])\n print(text)\n\n key = getch.getch().lower()\n if key == 'c':\n #TODO Enter chat and stuff\n login(api, usersList)\n elif key == 'n':\n createUser(api)\n usersList = glob.glob(\"./*.session\") #Refresh the list\n elif key == 'd':\n removeUser(usersList)\n elif key == 'q':\n break\n\n\n ###Sign in###\ndef login(api, usersList):\n quit = False\n login = False\n\n while True:\n cls()\n logo.display()\n listDisplay(usersList)\n\n userId = input(\"\\n Choose an account (enter number) or enter Q to quit:\")\n try: #Verifies input validity\n intId = int(userId)\n if intId>0 and intId<=len(usersList):\n login = True\n break\n except ValueError:\n userId = userId.lower()\n if userId == 'q':\n break\n\n if login:\n username = usersList[intId-1][2:len(usersList[intId-1])-8]\n client = TelegramClient(username, api.id, api.hash)\n client.connect()\n\n if not client.is_user_authorized():\n with open(\"phones.json\", 'r') as f:\n phones = json.load(f)\n phone = phones[username]\n\n cls()\n print(\"One moment please :-)...\")\n client.send_code_request(phone)\n cls()\n client.sign_in(phone, input(\"Enter the code Telegram just sent you: \"))\n\n chat.chatSelection(client)\n\n\n ###Sign up###\ndef createUser(api):\n connected = False\n firstTry = True\n correctPhone = False\n\n while not connected and not correctPhone:\n correctPhone = False\n\n cls()\n signupMessage(firstTry)\n phone = input(\"Enter your phone number (international format):\")\n cls()\n signupMessage(firstTry)\n username = input(\"Enter your username:\")\n\n if phone[0] == '+' and len(phone) == 12:\n correctPhone = True\n client = TelegramClient(username, api.id, api.hash)\n if client.connect():\n connected = True\n else:\n firstTry = False\n\n newPhone = {username: phone} #Saving phone number for authentification\n with open('phones.json', 'r') as outfile:\n phones = json.load(outfile)\n phones[username] = phone\n with open('phones.json', 'w') as outfile:\n json.dump(phones, outfile)\n\n\n\ndef signupMessage(bool):\n logo.display()\n if bool:\n text = colored(\"It seems you're not logged in yet...\\n\", 'green', attrs=[])\n print(text)\n else:\n text = colored(\"Oops! Something went wrong =\\\\! Let's try again...\\n\", 'yellow', attrs=[])\n print(text)\n\n\n ###Remove user###\ndef removeUser(usersList):\n while True:\n cls()\n logo.display()\n listDisplay(usersList)\n text = colored(\"Choose the user you want to delete or enter Q to quit:\", \"red\", attrs=['underline'])\n\n key = input(text).lower()\n try: #User has entered a number\n intId = int(key)\n\n if intId>0 and intId<=len(usersList): #Remove entry from JSON\n username = usersList[intId-1][2:len(usersList[intId-1])-8]\n with open('phones.json', 'r') as f:\n phones = json.load(f)\n\n phones.pop(username)\n with open('phones.json', 'w') as outfile:\n phones = json.dump(phones, outfile)\n\n os.remove(username+\".session\")\n break\n\n except:\n if key == 'q':\n break\n","sub_path":"connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421822404","text":"#!/usr/bin\n# -*- coding: utf-8 -*-\n\"\"\" Finite Element method \n JC Passieux, INSA Toulouse, 2019 \"\"\"\n\n#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse.linalg as splalg\nimport scipy.sparse as spsp\nimport pyxel as px\nimport scipy as sp\n\n'''poutre courbe '''\n(Draf,Dl,n,e)=np.load('poutrecourbe.npy',allow_pickle=True)\n(Dl,n,e)=np.load('poutrecourbeQ9.npy',allow_pickle=True)\n\n\nn=n.T\nm=px.Mesh(e,n)\n\nm.Plot(facecolor='#888888')\n#%%\n#nr=np.array([1132,1131,1130,1129,1128,1094,1054,1053,1052,1019,978,977,976,944,902,870,828,796,754,753,752,721,678,647,604,605,606,607,608,575,534,535,536,576,610,650,684,685,686,725,760,761,762,800,836,874,910,948,984,1022,1058,1096])\n#nr2=nr[np.arange(0,52,2)]\n#m2.Plot()\n#plt.plot(m2.n[nr2,0],m2.n[nr2,1],'r.')\n#for i in range(len(nr2)):\n# print('Point(%2d) = {%6.10e, %6.10e, 0, .1};' % (i+9,m.n[nr2[i],0],m.n[nr2[i],1]))\n\nmf=px.ReadMeshGMSH('fish.msh')\nmf.Plot(facecolor='r')\n\n#m.PlotElemLabels()\n\n#mf.PlotNodeLabels()\n\n#%%\n# liste des elements recouverts par le patch\ner=np.array([322,321,320,296,272,248,247,223,222,198,199,200,176,224,225,249,273,274,298,297,250,295,271,345,346])\n\n\n### Création du maillage du modèle complémentaire et du maillage du modèle local\nm2=m.Copy()\nm3=m.Copy()\ne2=dict()\ne3=dict()\nne2=0\nne3=0\n\n# %%\n### C'est sale ^^\n#for i in range(len(e)):\n# if len(np.where(er==i)[0])==0:\n# e2[ne2]=m.e[i]\n# ne2+=1\n# else:\n# e3[ne3]=m.e[i]\n# ne3+=1\n\n\nsetEr = set(er) # On convertit en ensemble pour le test d'appartenance\nfor ii in range(len(e)):\n if not(ii in setEr): # Si l'élément ne recouvre pas le patch local\n e2[ne2] = m.e[ii]\n ne2 +=1\n else: # Si l'élément recouvre le patch local\n e3[ne3] = m.e[ii]\n ne3 +=1\n \n# %%\nm2.e=e2\nm2.Plot(edgecolor='b')\nm3.e=e3\nm3.Plot(edgecolor='r')\nm3.VTKMesh('marie_mask')\n\n#%%\n#m2.PlotNodeLabels()\n\n\n# déplace les noeuds des milieux des aretes de mf à la même place que le \n# maillage Q9 m uniquement pour les noeuds d'interface.\n# node_iga=np.array([1131,1129,1053,977,753,605,607,535,685,761]) # Q8\nnode_iga=np.array([1491,1489,1389,1289,993,797,799,703,901,1001]) # Q9\nnode_efa=np.array([60,61,63,65,69,72,73,75,78,80])\nfor i in range(len(node_iga)):\n mf.n[node_efa[i],:]=m.n[node_iga[i],:]\n\n\n#%% Connectivity\nm2.Connectivity()\nmf.Connectivity()\nm.Connectivity()\n\nm.Plot(edgecolor='b')\n\n\n\n### C'est quoi ce 110 ??? Il faudrait récupérer le nombre de degré de liberté \n### d'interface\nrow1=np.zeros(110)\ncol1=np.zeros(110)\nval1=np.zeros(110)\nrow2=np.zeros(110)\ncol2=np.zeros(110)\nval2=np.zeros(110)\nrow3=np.zeros(110)\ncol3=np.zeros(110)\nval3=np.zeros(110)\nnv=0\nfor jn in range(m2.n.shape[0]):\n xn=m2.n[jn,:]\n a=np.where(np.linalg.norm(mf.n-xn,axis=1)<1e-5)[0]\n if len(a)>0:\n plt.plot(m2.n[jn,0],m2.n[jn,1],'k.')\n plt.plot(mf.n[a,0],mf.n[a,1],'r+')\n row1[nv]=nv\n col1[nv]=m2.conn[jn,0]\n val1[nv]=1\n row3[nv]=nv\n col3[nv]=m.conn[jn,0]\n val3[nv]=1\n row2[nv]=nv\n col2[nv]=mf.conn[a,0]\n val2[nv]=-1\n row1[nv+1]=nv+1\n col1[nv+1]=m2.conn[jn,1]\n val1[nv+1]=1\n row3[nv+1]=nv+1\n col3[nv+1]=m.conn[jn,1]\n val3[nv+1]=1\n row2[nv+1]=nv+1\n col2[nv+1]=mf.conn[a,1]\n val2[nv+1]=-1\n nv+=2\n \n# %%\n### On crée les opérateurs de couplage\n \nCGEF=sp.sparse.csc_matrix((val1, (row1, col1)), shape=(nv,m2.ndof))\nCLEF=sp.sparse.csc_matrix((val2, (row2, col2)), shape=(nv,mf.ndof))\nCGEFtot=sp.sparse.csc_matrix((val3, (row3, col3)), shape=(nv,m.ndof))\n\n#%% \n\n### On calcule l'intégration des fonctions de forme EF\nm2.GaussIntegration()\nmf.GaussIntegration()\n\n### On définit le tenseur de Hooke\nE=100e+3 ; v=0.3\nhooke=E/(1-v**2)*np.array([[1,v,0],[v,1,0],[0,0,(1-v)/2]])\n\n### On crée les rigidités EF\nKGEF=m2.Stiffness(hooke) # Rigidité EF du complémentaire\nKLEF=mf.Stiffness(hooke) # Rigidité EF du local\n\n### On crée la matrice du problème couplé\nKK=sp.sparse.bmat([[KGEF, None, CGEF.T ],\n [None, KLEF, CLEF.T ],\n [CGEF, CLEF, None ]])\nKK=KK.tocsc()\n#%%\n\n### Initialisation du déplacement\nndof=KK.shape[0]\nUU=np.zeros(ndof)\n\n\n\n### Définition des ddls bloqués\n#rep1=px.SelectMeshLine(m2)\n# rep1=np.array([ 0, 49, 74, 123, 148, 197, 222, 271, 296, 345, 370, \\\n# 419, 444, 493, 518, 567, 592, 641, 666, 715, 740, 789, \\\n# 814, 863, 888, 937, 962, 1011, 1036, 1085, 1110, 1159, 1184])\nrep1=np.array([ 0, 49, 98, 147, 196, 245, 294, 343, 392, 441, 490,\n 539, 588, 637, 686, 735, 784, 833, 882, 931, 980, 1029,\n 1078, 1127, 1176, 1225, 1274, 1323, 1372, 1421, 1470, 1519, 1568])\nrep11=m2.conn[rep1]\nrep=np.arange(ndof)\nrep=np.delete(rep,rep11)\nrepk=np.ix_(rep,rep)\n\n# selection des noeuds du haut pour force\n#rep2=px.SelectMeshLine(m2)\nrep2=np.array([ 48, 73, 122, 147, 196, 221, 270, 295, 344, 369, 418, \\\n 443, 492, 517, 566, 591, 640, 665, 714, 739, 788, 813, \\\n 862, 887, 936, 961, 1010, 1035, 1084, 1109, 1158, 1183, 1232])\nrep22=m2.conn[rep2]\nFF=np.zeros(ndof)\nFF[rep22[:,1]]=100\n\n### Factorisation de l'opérateur de rigidité\nKLU=splalg.splu(KK[repk])\n### Résolution du problème \nUU[rep]=KLU.solve(FF[rep])\n### On extrait les déplacements sur le complémentaire et le local\nrepG=np.arange(m2.ndof)\nrepL=np.arange(m2.ndof,m2.ndof+mf.ndof)\nUGEF=UU[repG]\nULEF=UU[repL]\n\nm2.Plot(edgecolor='y')\nmf.Plot(edgecolor='y')\n\nm2.Plot(UGEF,2,edgecolor='k')\nmf.Plot(ULEF,2,edgecolor='k')\n\nm2.VTKSol('marie_glob',UGEF)\nmf.VTKSol('marie_loc',ULEF)\n\n#%%\n\n### Pourquoi on redéfinit le tenseur de Hooke ici ?\nE=100e+3 ; v=0.3\nhooke=E/(1-v**2)*np.array([[1,v,0],[v,1,0],[0,0,(1-v)/2]])\n\n\n\n#(val,row,col,siz)=np.load('poutrecourbeKIGA.npy')\n#KIGA=spsp.csc_matrix((val, row, col), shape=siz)\n#(val,row,col,siz)=np.load('poutrecourbeKBIGA.npy')\n#KBIGA=spsp.csc_matrix((val, row, col), shape=siz)\n\n\n### On résout le problème global full IGA\n\nimport Geometries as g\nfrom pyNURBS import Mechanalysis\nnspanu = 6 ## Rq : pour la plaque trouée, on a 2*nspanu\nnspanv = 4\ncspanu = 4 # nspanu_analysis = cspanu*nspanu\ncspanv = 4\nppu = 2\nppv = 2\na=5;b=10\n#m = g.geo_poutre2D_ct_curve(a,b,nspanu,nspanv,ppu,ppv) ### poutre courbe\nm_analysis = g.geo_poutre2D_ct_curve(a,b,nspanu*cspanu,nspanv*cspanv,ppu,ppv) ### poutre courbe fin\n\nanalysis = Mechanalysis(m_analysis,hooke)\nanalysis.Stiffness(np.arange(384))\nKIGA=analysis.K\nanalysis.Stiffness(er)\nKBIGA=analysis.K\n\nBtot = m_analysis.Get_Btot()\nplt.plot(Btot[0,:],Btot[1,:],'ko')\nplt.axis('equal')\n\n#m_analysis.SelectMeshNodes()\nrep1=np.array([[ 25, 51, 77, 103, 129, 155, 181, 207, 233, 259, 285, 311, 337, 363, 389, 415, 441, 467], \\\n [493, 519, 545, 571, 597, 623, 649, 675, 701, 727, 753, 779, 805, 831, 857, 883, 909, 935]])\nrep2=np.array([[ 0, 26, 52, 78, 104, 130, 156, 182, 208, 234, 260, 286, 312, 338, 364, 390, 416, 442], \\\n [468, 494, 520, 546, 572, 598, 624, 650, 676, 702, 728, 754, 780, 806, 832, 858, 884, 910]])\nndofiga=KIGA.shape[0]\nU=np.zeros(ndofiga)\nF=np.zeros(ndofiga)\nF[rep1[1]]=10\n\nF[rep1[1]]= np.array([3.3333333,6.6667,10.0000,10.0000,10.0000, 10.0000,10.0000, 10.0000,10.0000, 10.0000,10.0000, 10.0000,10.0000, 10.0000,10.0000, 10.0000, 6.6667, 3.3333])\n\n\nrep=np.arange(ndofiga)\nrep=np.delete(rep,rep2.ravel())\nrepk=np.ix_(rep,rep)\n\nIGALU=splalg.splu(KIGA[repk])\nU[rep]=IGALU.solve(F[rep])\n\nm_analysis.VTKFull(1e-7,hooke,30,U,'toto')\n\n\n#%% Construction et factoristion du modèle local avec conditions de Dirichlet\n\nmf.GaussIntegration()\nKLEF=mf.Stiffness(hooke)\nKL=sp.sparse.bmat([[KLEF, CLEF.T ],\n [CLEF, None ]])\nEFLU=splalg.splu(KL)\n\n\n\n#%%\n\nndof_interf=CLEF.shape[0]\nUG =np.zeros(ndofiga)\nUGold=np.zeros(ndofiga)\nLAM=np.zeros(ndof_interf)\n(Dl,n,e)=np.load('poutrecourbeQ9.npy',allow_pickle=True)\nDl=Dl.toarray()\nAA=np.zeros((m.ndof//2,m.ndof//2))\nfor i in range(m.ndof//2):\n AA[i,m.conn[i,0]]=1\nDl=Dl.dot(AA)\n\nDl=spsp.csc_matrix(Dl)\nDDLL=sp.sparse.bmat([[Dl , None], \\\n [None, Dl ]])\n\nCG = CGEFtot.dot(DDLL.T)\n\nfor ifp in range(30):\n print('iter #'+str(ifp))\n #% PB1\n FG=F - CG.T.dot(LAM) + KBIGA.dot(UGold)\n UG[rep]=IGALU.solve(FG[rep])\n #m_analysis.VTKFull(1e-7,hooke,30,UG,'toto')\n #print(np.linalg.norm(UG))\n #UEF=DDLL.T.dot(UG)\n #m.Plot(color='y')\n #m.Plot(UEF)\n if ifp==0:\n err=np.array([1])\n else:\n err=np.r_[err,np.linalg.norm(UG-UGold)/np.linalg.norm(UG)]\n\n #% PB2\n FL=np.zeros(mf.ndof+ndof_interf)\n FL[mf.ndof:]=-CG.dot(UG)\n UL=EFLU.solve(FL)\n LAM=UL[mf.ndof:]\n #print(np.linalg.norm(UL[:mf.ndof]))\n #mf.VTKSol('marie_loc',UL[:mf.ndof])\n UGold=UG.copy()\n\nplt.semilogy(err,'k.-')\nplt.grid('on')\nplt.xlabel('iteration number',fontsize=14)\nplt.ylabel('Stagnation',fontsize=14)\nplt.gca().xaxis.grid(True, which='minor')\n\nm_analysis.VTKFull(1e-7,hooke,30,UG,'toto')\nmf.VTKSol('marie_loc',UL[:mf.ndof])\n \nUEF=DDLL.T.dot(UG)\nm.Plot(color='y')\n#mf.Plot(color='r')\nm.Plot(UEF,10)\nmf.Plot(UL[:mf.ndof],10,edgecolor='r')\n\n#%%\n\n\n\n\"\"\"\n#%%\n\nroi=np.array([[0,0],[400,40]])\nm=px.StructuredMeshT3(roi,10)\nm.Plot()\nm.PlotElemLabels()\nm.PlotNodeLabels()\n\n\n#%%\n\np=50\nl=10\nx=np.linspace(0,l,100)\nn1=x/l\nn2=1-x/l\nplt.plot(x,n1)\nplt.plot(x,n2)\n\nf=n1*p*l*0.5+n2*p*l*0.5\n\nplt.plot(x,f)\n#%% test rigidite interieur + grande (pas fini)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse.linalg as splalg\nimport scipy as sp\nimport pyxel as px\n\nmg=px.ReadMeshINP('./data/dic_composite/olfa3.inp')\nml=mg.Copy()\nmg.Plot()\nmg.Connectivity()\nmg.GaussIntegration()\nE=100e+3 ; v=0.3\nhooke=E/(1-v**2)*np.array([[1,v,0],[v,1,0],[0,0,(1-v)/2]])\nKG=mg.Stiffness(hooke)\n\nc=np.array([0.015,0.025])\nr=0.08\n\n\n\n\nKL=sp.sparse.bmat([[KLEF, CLEF.T ],\n [CLEF, None ]])\n\"\"\"","sub_path":"Code/cas_marie-vPaul.py","file_name":"cas_marie-vPaul.py","file_ext":"py","file_size_in_byte":9935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382125471","text":"class Solution:\n def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:\n if not timeSeries or duration == 0:\n return 0\n stack = [timeSeries[0] + duration]\n count = duration\n for time in range(1,len(timeSeries)):\n if stack and stack[-1] > timeSeries[time]:\n diff = stack[-1] - timeSeries[time]\n add_time = duration - diff\n if diff <= duration:\n count += add_time\n\n elif stack and stack[-1] <= timeSeries[time]:\n count += duration\n poison_time = timeSeries[time] + duration\n stack.append(poison_time)\n\n return count\n","sub_path":"495.TeemoAttackingStack/teemoattackingstack.py","file_name":"teemoattackingstack.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406799137","text":"\"\"\"\nGiven a linked list, return the node where the cycle begins. If there is no cycle, return null.\n\nNote: Do not modify the linked list.\n\nhttp://www.tuicool.com/articles/3EZJbm\n\n步骤一:通过Linked List Cycle的方式,则快慢指针(快指针一次两步,慢指针一次一步)相遇时,则表示存在环,且相遇点在环上。\n步骤二:如果环存在,记:\nc表示从head到环起始点的距离;\ns表示从环起始点到快慢指针相遇点的距离;\ncycle表示环的长度;\ndistance(pointer)表示指针走过的距离;\n性质:\na)快指针走过的距离是慢指针走过距离的二倍\nb)快指针和慢指针会相遇,是因为快指针已经套了慢指针一圈(且套第一圈时就会相遇,因为快指针快追上慢指针时,相距要么为1要么为2,为1时,下一次移动后相遇,为2时,在经过两次移动相遇)\n于是有:\ndistance(slow)=c+s, distance(fast)=2(c+s)\n性质a和b -> distance(fast)-distanc(slow)=cycle=2(c+s) - (c+s) = c+s\n-> c = cycle - s\n又由于:环长度为cycle,两指针距离环起点距离为s,在走cycle-s则重新到达起点;且c为从head到环起点的距离,所以从head经过距离c会到达环起点,又c=cycle - s;所以用两个指针,同时从快慢指针的相遇点和head出发,每次移动距离为1,经过cycle - s会在环起点相遇。\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n fast = head\n slow = head\n while fast!=None and fast.next!=None:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if fast == None or fast.next == None:\n return None\n else:\n while head!=fast:\n head = head.next\n fast = fast.next\n return fast\n","sub_path":"142_Linked_List Cycle_II.py","file_name":"142_Linked_List Cycle_II.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229667534","text":"from typing import Optional\n\nfrom qgis.PyQt.QtCore import (pyqtSignal)\nfrom qgis.PyQt.QtWidgets import (QWidget, QFormLayout)\nfrom qgis.gui import (QgsDoubleSpinBox)\nfrom qgis.core import (QgsUnitTypes)\nfrom los_tools.gui import DistanceWidget\n\n\nclass LoSNoTargetInputWidget(QWidget):\n\n valuesChanged = pyqtSignal()\n\n def __init__(self, parent: Optional[QWidget] = None) -> None:\n super().__init__(parent)\n layout = QFormLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n\n self._min_angle = QgsDoubleSpinBox(self)\n self._min_angle.setMinimum(-359.999)\n self._min_angle.setMaximum(359.999)\n self._min_angle.setValue(0)\n self._min_angle.setClearValue(0)\n self._min_angle.setDecimals(3)\n self._min_angle.valueChanged.connect(self._on_minimum_changed)\n self._min_angle.valueChanged.connect(self.emit_values_changed)\n\n self._max_angle = QgsDoubleSpinBox(self)\n self._max_angle.setMinimum(-359.999)\n self._max_angle.setMaximum(359.999)\n self._max_angle.setValue(359.999)\n self._max_angle.setClearValue(359)\n self._max_angle.setDecimals(3)\n self._max_angle.valueChanged.connect(self._on_maximum_changed)\n self._max_angle.valueChanged.connect(self.emit_values_changed)\n\n self._angle_step = QgsDoubleSpinBox(self)\n self._angle_step.setMinimum(0.001)\n self._angle_step.setMaximum(90)\n self._angle_step.setValue(1)\n self._angle_step.setClearValue(1)\n self._angle_step.setDecimals(3)\n self._angle_step.valueChanged.connect(self.emit_values_changed)\n\n self._length = DistanceWidget(self)\n self._length.setMinimum(1)\n self._length.setMaximum(999999999)\n self._length.setValue(100)\n self._length.setClearValue(100)\n self._length.setDecimals(2)\n self._length.valueChanged.connect(self.emit_values_changed)\n\n layout.addRow(\"Minimum Azimuth\", self._min_angle)\n layout.addRow(\"Maximal Azimuth\", self._max_angle)\n layout.addRow(\"Angle Step\", self._angle_step)\n layout.addRow(\"LoS Length\", self._length)\n\n self._unit = QgsUnitTypes.DistanceUnit.DistanceMeters\n\n def _on_minimum_changed(self) -> None:\n if self._max_angle.value() < self._min_angle.value():\n self._max_angle.setValue(self._min_angle.value())\n\n def _on_maximum_changed(self) -> None:\n if self._min_angle.value() > self._max_angle.value():\n self._min_angle.setValue(self._max_angle.value())\n\n def emit_values_changed(self) -> None:\n self.valuesChanged.emit()\n\n @property\n def min_angle(self) -> float:\n return self._min_angle.value()\n\n @property\n def max_angle(self) -> float:\n return self._max_angle.value()\n\n @property\n def angle_step(self) -> float:\n if self._angle_step.value() == 0:\n return 0.01\n return self._angle_step.value()\n\n def setUnit(self, unit: QgsUnitTypes.DistanceUnit.DistanceMeters) -> None:\n self._unit = unit\n\n @property\n def length(self) -> float:\n return self._length.distance().inUnits(self._unit)\n","sub_path":"los_tools/gui/los_without_target_visualization/los_without_target_widget.py","file_name":"los_without_target_widget.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"240915946","text":"'''\nn을 입력받아\n00시 00분 00초 ~ n시 59분 59초 까지의 모든 시각 중에\n3이 하나라도 포함되는 개수를 출력하여라\n\n'''\n\nn = int(input())\ncnt = 0\n\nfor i in range(n+1):\n for j in range(60):\n for k in range(60):\n if '3' in str(i) + str(j) + str(k):\n cnt += 1\n\nprint(cnt)\n","sub_path":"example/book1/유형별 실전문제/구현/시각.py","file_name":"시각.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"188923471","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef Relu(x):\n return np.maximum(0, x)\n\n\ninput_data = np.random.randn(1000, 100)\nnode_num = 100\nhidden_layer_size = 5\nactivations = {}\n\nx = input_data\n\nfunc = sigmoid\n# func = Relu\n# func = np.tanh\n\nfor i in range(hidden_layer_size):\n if i != 0:\n x = activations[i - 1]\n\n # w = np.random.randn(node_num, node_num) * 1\n # w = np.random.randn(node_num, node_num) * 0.01\n w = np.random.randn(node_num, node_num) / np.sqrt(node_num) # Xavier Initialization\n # w = np.random.randn(node_num, node_num) * np.sqrt(2 / node_num) # He Initialization\n\n a = np.dot(x, w)\n z = func(a)\n activations[i] = z\n\n\nfor i, a in activations.items():\n plt.subplot(1, len(activations), i + 1)\n plt.title(str(i + 1) + \"-layer\")\n plt.hist(a.flatten(), 30, range=(0, 1))\n\nplt.savefig(\"fig.png\")\n\n","sub_path":"ch06/weight_init_activation_histogram.py","file_name":"weight_init_activation_histogram.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"364545173","text":"#!/usr/bin/env python3\n\nimport random\nfrom plugin import BasicPlugin\n\nclass Plugin(BasicPlugin):\n\t\"\"\"Slap Plugin\"\"\"\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\t\tself.name = \"slap\"\n\t\tself.priority = 0\n\t\tself.load_priority = 10\n\n\tdef finish(self):\n\t\tpass\n\n\tdef hook(self):\n\t\tself.bot.config.set_safe(\"plugins.\"+self.name, False, \"Slap!\")\n\t\tself.bot.config.set_safe(\"plugins.\"+self.name+\".fishfile\", \"Plugins/Slap/fish.txt\", \"(str) Fish to slap with\")\n\t\twith open(self.bot.config.get(\"plugins.\"+self.name+\".fishfile\"), 'r') as fish:\n\t\t\tself.fish = fish.read().splitlines()\n\t\treturn self.bot.config.get(\"plugins.\"+self.name)\n\n\tdef call(self, message):\n\t\tif message.command != \"PRIVMSG\":\n\t\t\treturn None\n\n\t\torigin = message.params[0] if message.params[0] != self.bot.ircsock.getnick() else message.origin()[1:]\n\t\tperson = message.origin()[1:]\n\n\t\tif message.params[1] == self.bot.config.get(\"command_trigger\")+\"slap\":\n\t\t\tif len(message.params) > 3:\n\t\t\t\treason = \" \".join(message.params[3:])\n\t\t\telse:\n\t\t\t\treason = random.choice(self.fish)\n\n\t\t\tif len(message.params) > 2:\n\t\t\t\ttarget = message.params[2]\n\t\t\telse:\n\t\t\t\ttarget = person\n\n\t\t\tself.bot.ircsock.action(origin, \"slaps {0} around a bit with {1}.\".format(target, reason))\n","sub_path":"Plugins/Slap/Slap.py","file_name":"Slap.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354905525","text":"# -*- coding: utf-8 -*-\n\nimport pygmsh\n\nfrom helpers import compute_volume\n\n\ndef test(lcar=0.05):\n geom = pygmsh.built_in.Geometry()\n\n # Draw a cross with a circular hole\n circ = geom.add_circle([0.0, 0.0, 0.0], 0.1, lcar=lcar, make_surface=False)\n poly = geom.add_polygon(\n [\n [+0.0, +0.5, 0.0],\n [-0.1, +0.1, 0.0],\n [-0.5, +0.0, 0.0],\n [-0.1, -0.1, 0.0],\n [+0.0, -0.5, 0.0],\n [+0.1, -0.1, 0.0],\n [+0.5, +0.0, 0.0],\n [+0.1, +0.1, 0.0],\n ],\n lcar=lcar,\n holes=[circ],\n )\n\n axis = [0, 0, 1.0]\n\n geom.extrude(poly, translation_axis=axis, num_layers=1)\n\n ref = 0.16951514066385628\n points, cells, _, _, _ = pygmsh.generate_mesh(geom)\n assert abs(compute_volume(points, cells) - ref) < 1.0e-2 * ref\n return points, cells\n\n\nif __name__ == \"__main__\":\n import meshio\n\n meshio.write_points_cells(\"layers.vtu\", *test())\n","sub_path":"test/test_layers.py","file_name":"test_layers.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643215010","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('autoadmin', '0002_auto_20160902_1512'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='server_history',\n fields=[\n ('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'ID')),\n ('history_id', models.IntegerField()),\n ('history_user', models.CharField(max_length=15)),\n ('history_datetime', models.DateTimeField(auto_now=True)),\n ('db_datetime', models.DateTimeField(auto_now_add=True)),\n ('history_command', models.CharField(max_length=255)),\n ('history_ip', models.ForeignKey(to='autoadmin.server_list', to_field=b'server_lip')),\n ],\n ),\n ]\n","sub_path":"omaudit/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302881779","text":"import tensorflow.compat.v1 as tf\ntf.disable_v2_behavior() \nfrom tensorflow.compat.v1 import keras\nfrom tensorflow.keras.layers import GRU, Dense, Input\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.metrics import AUC\nfrom sklearn.preprocessing import normalize, MinMaxScaler\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nfrom sklearn.metrics import auc, roc_curve, accuracy_score, precision_score, recall_score\nfrom matplotlib import pyplot\nimport pandas as pd\nimport numpy as np\nimport os\nfrom DataManager import *\n\n\nclass MildInt(object):\n def __init__(self, modals):\n self.dm = DataManager()\n self.X = {}\n self.y = self.dm.get_labels()\n self.initialize_data(modals)\n\n \n def initialize_data(self, modals):\n \"\"\"\n Get\n \"\"\"\n for modal in modals:\n X = self.dm.get_modal_data(modal)\n self.X[modal] = X\n\n\n def split_data(self, X):\n \"\"\"\n Train/test split.\n \"\"\"\n X_train_data = []\n X_test_data = []\n for modal in X.keys():\n X_train, X_test = train_test_split(X[modal], test_size=0.3, random_state=0)\n X_train_data.append(X_train)\n X_test_data.append(X_test)\n # print(f\"{modal} X_train: {X_train.shape}\")\n # print(f\"{modal} X_test: {X_test.shape}\")\n\n y_train, y_test = train_test_split(self.y, test_size=0.3, random_state=0)\n # print(f\"train_y: {y_train.shape}\")\n # print(f\"test_y: {y_test.shape}\")\n return X_train_data, X_test_data, y_train, y_test\n\n \n def normalize_data(self, X, norm_option):\n if norm_option == \"all\":\n for modal in X.keys():\n scaler = MinMaxScaler()\n X[modal] = scaler.fit_transform(X[modal].reshape(-1, X[modal].shape[-1])).reshape(X[modal].shape)\n return X\n elif norm_option == \"training_only\":\n train_X_data = []\n for modal_data in X:\n scalers = {}\n for i in range(modal_data.shape[1]):\n scalers[i] = MinMaxScaler()\n modal_data[:, i, :] = scalers[i].fit_transform(modal_data[:, i, :]) \n train_X_data.append(modal_data)\n return train_X_data\n\n\n def run_integrated_model(self, norm_option):\n \"\"\"\n Builds and runs a Keras functional API model that takes in multi-modal data. \n \"\"\"\n print(\"[INFO] processing data...\")\n if norm_option == \"all\": \n normalized_data = self.normalize_data(self.X, norm_option)\n train_X, test_X, train_y, test_y = self.split_data(normalized_data)\n elif norm_option == \"training_only\":\n train_X, test_X, train_y, test_y = self.split_data(self.X)\n train_X = self.normalize_data(train_X, norm_option)\n\n\n print(\"[INFO] creating model...\")\n # input tensors\n cog_input = Input(shape=(self.X['cog'].shape[1], self.X['cog'].shape[2]))\n csf_input = Input(shape=(self.X['csf'].shape[1], self.X['csf'].shape[2]))\n mri_input = Input(shape=(self.X['mri'].shape[1]))\n demo_input = Input(shape=(self.X['demo'].shape[1]))\n\n # latent tensors\n cog_z = GRU(2, return_sequences=False, activation='linear')(cog_input)\n csf_z = GRU(5, return_sequences=False, activation='linear')(csf_input)\n mri_z = Dense(3, activation='relu')(mri_input) # dense layer, mri is not longitudinal\n demo_z = Dense(2, activation='relu')(demo_input)\n\n # concatentate latent tensors\n z = keras.layers.concatenate([cog_z, csf_z, mri_z, demo_z])\n\n # logistic regression\n output = Dense(1, activation='sigmoid')(z)\n\n # create and compile model\n model = Model(\n inputs=[cog_input, csf_input, mri_input, demo_input], \n outputs=output\n )\n\n model.compile(\n optimizer='adam',\n loss='binary_crossentropy'\n )\n\n print(\"[INFO] model summary...\")\n model.summary()\n\n print(\"[INFO] training model...\")\n model.fit(train_X, train_y, epochs=5, batch_size=16, verbose=1) # try batch size 32, 64, 128\n\n print(\"[INFO] predicting MCI to AD conversion...\")\n pred_y = model.predict(test_X)\n return pred_y, test_y\n\n \n def Find_Optimal_Cutoff(self, fpr, tpr, threshold):\n \"\"\" Find the optimal probability cutoff point for a classification model related to event rate\n Parameters\n ----------\n target : Matrix with dependent or target data, where rows are observations\n\n predicted : Matrix with predicted data, where rows are observations\n\n Returns\n ------- \n list type, with optimal cutoff value\n \n \"\"\"\n i = np.arange(len(tpr)) \n roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)})\n roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]]\n\n return list(roc_t['threshold']) \n\n \n def evaluate_model(self, y_predictions, y_test):\n eval_metrics = {}\n y_pred = y_predictions.ravel() # see garam's stack overflow link\n fpr, tpr, thresholds = roc_curve(y_test, y_pred)\n eval_metrics['threshold'] = self.Find_Optimal_Cutoff(fpr, tpr, thresholds)\n eval_metrics['FPR'] = fpr\n eval_metrics['TPR'] = tpr\n eval_metrics['AUC'] = auc(fpr, tpr)\n preds = np.where(y_pred > eval_metrics['threshold'], 1, 0)\n eval_metrics['ACC'] = accuracy_score(y_test, preds)\n eval_metrics['SEN'] = recall_score(y_test, preds)\n eval_metrics['SPE'] = precision_score(y_test, preds)\n return eval_metrics\n\n\n def run_single_model(self, X, y):\n model = Sequential()\n\n train_X, train_y, test_X, test_y = self.test_train_split(X, y)\n\n model.add(GRU(4, return_sequences=False, input_shape=(train_X.shape[1], train_X.shape[2])))\n # GRU for mri\n # GRU for csf\n # model.add(Dense(units=2, activation='sigmoid')) # demographic\n model.add(Dense(units=1, activation='sigmoid')) # logistic regression\n\n print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)\n\n model.compile(\n loss='mse', \n optimizer='adam',\n metrics=[AUC()]\n )\n # model.summary()\n\n # fit network\n gru_history = model.fit(\n train_X, train_y, \n epochs=100, # try different values\n batch_size=64, \n validation_data=(test_X, test_y), \n shuffle=False\n )\n\n predictions = model.predict(test_X).ravel()\n fpr, tpr, thresholds = roc_curve(test_y, predictions)\n auc_val = auc(fpr, tpr)\n return auc_val\n\n \n\n\n \n\n","sub_path":"MildInt.py","file_name":"MildInt.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42112738","text":"from collections import deque\n\n\ndef neighbours(r, c, R, C):\n for rows, cols in( (r-1, c), (r, c-1), (r+1, c), (r, c+1)):\n if 0 <= rows < R and 0 <= cols < C:\n yield rows, cols\n\ndef protect_the_sheep(matrix, R, C):\n \"\"\"\n Note: Wolves can't move diagonally\n Sheeps and dog stay in place\n no wolf can enter a cell with a dog\n Approach: Find where all the sheep are there\n\n We need not minimize the number? -- HMMM That makes it a little simple\n\n Basic Cases: If the wolf is next to the sheep then you cannot save it.\n So we need to know neighbours of the sheep\n :param matrix:\n :param r:\n :param c:\n :return:\n \"\"\"\n q = deque()\n for i, row in enumerate(matrix):\n for c, val in enumerate(row):\n if val == 'W':\n q.append((i, c))\n #print(\"The wolf locations are:\", q)\n\n while q:\n i, j = q.popleft()\n for nei in neighbours(i, j, R, C):\n #print(\"The neighbours are:\", nei)\n nr, nc = nei\n if matrix[nr][nc] == 'S':\n print('No')\n exit()\n elif matrix[nr][nc] == '.':\n matrix[nr][nc] = 'D'\n\n print('Yes')\n for row in matrix:\n print(''.join(row))\n\n\n\n\nif __name__ == '__main__':\n r, c = map(int, input().split())\n i = 0\n matrix = []\n while i < r:\n rows = [c for c in input()]\n matrix.append(rows)\n i += 1\n #print(matrix)\n protect_the_sheep(matrix, r, c)","sub_path":"contests/948/948A-protect-sheep-dfs.py","file_name":"948A-protect-sheep-dfs.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460035147","text":"import uuid\nfrom typing import Optional\n\nimport strawberry\nfrom django.utils import timezone\nfrom strawberry.field import UNSET\nfrom strawberry.types import Info\nfrom .tools import get_user_id\n\nfrom . import models, types\n\n\n@strawberry.type\nclass TodoMutationDeleteResponse:\n deleted_id: uuid.UUID\n\n\n@strawberry.type\nclass TodoMutationError:\n code: int\n key: str\n message: str\n\n\ndef create_todo(info: \"Info\", name: str) -> types.Todo | TodoMutationError:\n user_id = get_user_id(info)\n if user_id is None:\n return TodoMutationError(code=403, key=\"user\", message=\"not logged in\")\n\n todo = models.Todo.objects.create(account_id=user_id, name=name)\n return types.Todo.from_(todo)\n\n\ndef update_todo(\n info: \"Info\", id: uuid.UUID, name: str = UNSET, description: str = UNSET\n) -> types.Todo | TodoMutationError:\n try:\n todo = models.Todo.objects.get(id=id, account_id=get_user_id(info))\n except models.Todo.DoesNotExist:\n return TodoMutationError(code=404, key=\"id\", message=\"not found\")\n\n if name is not UNSET:\n todo.name = name\n\n if description is not UNSET:\n todo.description = description\n\n todo.save()\n return types.Todo.from_(todo)\n\n\ndef delete_todo(info: \"Info\", id: uuid.UUID) -> TodoMutationDeleteResponse | TodoMutationError:\n try:\n todo = models.Todo.objects.get(id=id, account_id=get_user_id(info))\n except models.Todo.DoesNotExist:\n return TodoMutationError(code=404, key=\"id\", message=\"not found\")\n todo.delete()\n\n return TodoMutationDeleteResponse(deleted_id=id)\n\n\ndef add_todo_item(\n info: \"Info\", todo_id: uuid.UUID, title: str, description: Optional[str] = UNSET\n) -> types.TodoItem | TodoMutationError:\n try:\n todo = models.Todo.objects.get(id=todo_id, account_id=get_user_id(info))\n except models.Todo.DoesNotExist:\n return TodoMutationError(code=404, key=\"id\", message=\"not found\")\n\n item = models.TodoItem.objects.create(\n todo=todo, title=title, description=description if description != UNSET else \"\"\n )\n return types.TodoItem.from_(item)\n\n\ndef update_todo_item(\n info: \"Info\", id: uuid.UUID, title: Optional[str] = UNSET, description: Optional[str] = UNSET\n) -> types.TodoItem | TodoMutationError:\n try:\n item = models.TodoItem.objects.select_for_update().get(id=id, todo__account_id=get_user_id(info))\n except models.TodoItem.DoesNotExist:\n return TodoMutationError(code=404, key=\"id\", message=\"not found\")\n\n if title is not UNSET:\n item.title = title\n\n if description is not UNSET:\n item.description = description\n\n item.save()\n\n return types.TodoItem.from_(item)\n\n\ndef complete_todo_item(\n info: \"Info\", id: uuid.UUID, completed: bool = True\n) -> types.TodoItem | TodoMutationError:\n try:\n item = models.TodoItem.objects.get(id=id, todo__account_id=get_user_id(info))\n except models.TodoItem.DoesNotExist:\n return TodoMutationError(code=404, key=\"id\", message=\"not found\")\n\n if completed:\n item.completed_at = timezone.now()\n else:\n item.completed_at = None\n\n item.save(update_fields=[\"completed_at\"])\n\n return types.TodoItem.from_(item)\n\n\ndef delete_todo_item(info: \"Info\", id: uuid.UUID) -> TodoMutationDeleteResponse | TodoMutationError:\n try:\n item = models.TodoItem.objects.get(id=id, todo__account_id=get_user_id(info))\n except models.TodoItem.DoesNotExist:\n return TodoMutationError(code=404, key=\"id\", message=\"not found\")\n item.delete()\n return TodoMutationDeleteResponse(deleted_id=id)\n","sub_path":"backendpy/service/mutations.py","file_name":"mutations.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"420919135","text":"\"\"\"\"\ninterface.py\n\tThis program will create a user interface with the capablilities to choose an\n\taddress and gain various information based on that address.\n\nlibraries(sudo aot-get install...)\n\tpython3-qt5(PyQt5)\n\nAuthors:\nTommy Slota\nMathew Willig\nNicholas Miller\n\"\"\"\n#-----imports-----\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import *\nimport os\nfrom threading import Thread\n\n####To Matt: COLLECT AND APPEND ADDRESS HERE FROM PARSER####\n####There are static addresses bellow. Just Comment out####\nadr = []\n\n\n\"\"\"\n-----IPWindow----\n- Makes a second window with IP as title\n\"\"\"\nclass IPWindow(QWidget):\n def __init__(self, address):\n super().__init__()\n self.IPWIN(address)\n \n #=====Creates the IPWindow=====\n #address-Button with address clicked\n def IPWIN(self, address):\n self.setFixedSize(400,300)\n self.setGeometry(265, 1, 0, 0)\n self.setWindowTitle(address) \n self.show()\t\n\n\"\"\"\n-----MainWindow-----\n- Makes a small taskbar window\n- Adds IP addresses from parser as clickable buttons\n- Will open an \"IP Window\" if button is clicked(One at a time, but will be changed)\n\"\"\" \nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n #====Creates Main Taskbar====\n def initUI(self):\n \n #-----Initialize the taskbar layout-----\n box = QVBoxLayout(self)\n self.setLayout(box)\n scroll = QScrollArea(self)\n box.addWidget(scroll)\n scrollContent = QWidget(scroll)\n scrollLayout = QVBoxLayout(scrollContent)\n scrollContent.setLayout(scrollLayout)\n \n #-----Make \"adr\" static array of addresses-----\n global adr\n adr = [\"172.16.177.2\",\"255.255.255.255\",\"1.1.1.1\",\"111.111.111.111\", \"1.1.1.\",\"13241234\",\"1234123414\",\"123412341234\"] \n self.setFixedSize(200, 300)\n \n\t\n #-----Makes Buttons and makes button a scrollable object-----\n buttons = {}\n size = 40 \n for count in range(0,len(adr)):\n buttons[count] = QPushButton(adr[count], self)\n buttons[count].setFixedSize(140,30)\n# buttons[count].move(0, 0+ (count * size))\n buttons[count].clicked[bool].connect(self.action)\n scrollLayout.addWidget(buttons[count])\n scroll.setWidget(scrollContent)\n \n #-----Sets the title window-----\n self.setGeometry(1, 1,0,0)\n self.setWindowTitle('IP Addresses')\n self.show()\n \n \"\"\"\n -----Action--\n - When button is clicked, it opens a windows based on the button\n - Uses the IP address array\n \"\"\"\n def action(self):\n source = self.sender()\n global adr\n for i in range(0,len(adr)):\n if source.text() == adr[i]:\n self.IPwin = IPWindow(adr[i])\n self.IPwin.show()\n\n\"\"\"\n-----MAIN----\n- MainWindow is executed and is also terminated when closed\n\"\"\"\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n window = MainWindow()\n sys.exit(app.exec_())\n","sub_path":"Interface/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539110283","text":"# -*- coding: utf-8 -*-\n\nimport os\n\n\nfrom fr.tagc.uorf.core.model import *\n\nfrom fr.tagc.uorf.core.util import Constants\nfrom fr.tagc.uorf.core.util import LogCodes\nfrom fr.tagc.uorf.core.util.sql.SQLManagerDS import SQLManagerDS\nfrom fr.tagc.uorf.core.util.sql.SQLManagerPRO import SQLManagerPRO\nfrom fr.tagc.uorf.core.util.sql.SQLManagerFILT import SQLManagerFILT\nfrom fr.tagc.uorf.core.util.sql import SQLConstants\nfrom fr.tagc.uorf.core.util.option.OptionManager import OptionManager\nfrom fr.tagc.uorf.core.util.option import OptionConstants\nfrom fr.tagc.uorf.core.util.general.FileHandlerUtil import FileHandlerUtil\nfrom fr.tagc.uorf.core.util.graphics.ProgressionBar import ProgressionBar\nfrom fr.tagc.uorf.core.util.exception import *\nfrom fr.tagc.uorf.core.util.log.Logger import Logger\n \n\n## RestoreStrategy\n# ===============\n#\n# This class is a strategy aiming to restore a database using \n# the files created with the Backup strategy.\n#\nclass RestoreStrategy( object ):\n \n ## Class variables\n # ---------------\n #\n # Orders of insertion of tables in the databases\n DS_ORDER_OF_INSERTION = [ 'SpeciesCatalog', 'Metadata', \n 'Gene', 'GeneAlias', 'UTGeneFromAlias',\n 'DataSource', 'DSORF', 'DSTranscript', 'DSORFTranscriptAsso' ]\n \n PRO_ORDER_OF_INSERTION = [ 'PROSpeciesCatalog', 'PROMetadata', \n 'PROGene', 'PROGeneAlias',\n 'UTRNABiotypeCatalog',\n 'ORF', 'ORFDSAsso', 'Transcript', 'TranscriptDSAsso',\n 'ORFTranscriptAsso', 'ORFTranscriptAssoDSAsso',\n 'CellContextCatalog', 'CellContext', \n 'ProvidedCategoryCatalog', 'ProvidedCategory',\n 'FLOSSClassCatalog', 'FLOSSClass',\n 'ORFCategoryCatalog', 'ORFCategory' ]\n \n \n ## Constructor of RestoreStrategy\n # ------------------------------\n #\n # Instance variables:\n # - db_settings: Dictionary - A dictionary of settings. This may include:\n # - The database name.\n # - The database type (SQLite / MySQL).\n # - For SQLite databases: the folder of SQLite file.\n # - For MySQL databases: the MySQL user, password, host IP and port.\n # - db_model: String - The name of the database model to use (PRO / DS).\n # - input_folder: String - The path of the folder where to find the data previously saved.\n # - file_prefix: String - The eventual prefix added to the file names. \n #\n # @throw DenCellORFException: When the provided database model is not one of those known.\n #\n def __init__( self ):\n \n # Get the options necessary to establish the connection to the database\n self.db_settings = {}\n self.db_settings[ Constants.DB_SETTINGS_DB_TYPE ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_TYPE )\n self.db_settings[ Constants.DB_SETTINGS_DB_NAME ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_NAME, \n not_none = True )\n \n if self.db_settings[ Constants.DB_SETTINGS_DB_TYPE ] == SQLConstants.DB_TYPE_MYSQL:\n self.db_settings[ Constants.DB_SETTINGS_MYSQL_USER ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_MYSQL_USER )\n self.db_settings[ Constants.DB_SETTINGS_MYSQL_PASSWD ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_MYSQL_PASSWD )\n self.db_settings[ Constants.DB_SETTINGS_MYSQL_HOST ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_MYSQL_HOST_IP )\n self.db_settings[ Constants.DB_SETTINGS_MYSQL_PORT ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_MYSQL_PORT )\n \n elif self.db_settings[ Constants.DB_SETTINGS_DB_TYPE ] == SQLConstants.DB_TYPE_SQLITE:\n self.db_settings[ Constants.DB_SETTINGS_DB_FOLDER ] = OptionManager.get_instance().get_option( OptionConstants.OPTION_DB_FOLDER )\n \n # Get the force overwrite option\n self.force_overwrite = OptionManager.get_instance().get_option( OptionConstants.OPTION_FORCE_OVERWRITE, \n not_none = False )\n \n # Get the model of database\n self.db_model = OptionManager.get_instance().get_option( OptionConstants.OPTION_DATABASE_MODEL, \n not_none = True )\n if ( self.db_model not in OptionConstants.AVAILABLE_DATABASE_MODELS ):\n raise DenCellORFException( 'The database model provided has to be in the following list: ' + \n ', '.join( OptionConstants.AVAILABLE_DATABASE_MODELS ) + '.' )\n \n # Get the input folder\n self.input_folder = OptionManager.get_instance().get_option( OptionConstants.OPTION_INPUT_FOLDER, not_none = False )\n if ( not self.input_folder ):\n # By default, the files are saved in a PRO / DS subfolder of the backup default folder\n self.input_folder = os.path.join( Constants.BACKUP_DATA_FOLDER, self.db_model )\n \n # Get the eventual prefix added to the file names\n self.file_prefix = OptionManager.get_instance().get_option( OptionConstants.OPTION_FILE_PREFIX, \n not_none = False )\n \n \n ## execute\n # -------\n #\n # Execute the strategy to restore the database.\n # \n # @throw DenCellORFException: When a session to the database cannot be created.\n # @throw DenCellORFException: When an exception has been raised trying to load the\n # content of a file (generated by the Backup strategy).\n # @throw DenCellORFException: When an exception has been raised trying to insert\n # an entry into a table of the database.\n # @throw DenCellORFException: When an exception has been raised trying to commit\n # session.\n # \n def execute( self ):\n \n # Set the connection to the database\n self.get_sqlmanager_instance().set_db_settings( self.db_settings )\n try:\n self.get_sqlmanager_instance().get_instance().get_session()\n self.get_sqlmanager_instance().get_instance().close_session()\n except DenCellORFException as e:\n raise DenCellORFException( 'RestoreStrategy.execute(): An error occurred while trying to' +\n ' create a session to the database.' +\n '\\n Error code: ' + LogCodes.ERR_SQL_SESSION + '.', e)\n \n # Check if the database already exists. \n # If it exists, then ask the user to confirm the deletion of the database.\n if ( ( not self.force_overwrite ) and ( self.get_sqlmanager_instance().db_exists() ) ):\n\n confirm_deletion = None\n Logger.get_instance().info( 'A database already exists at the provided connection settings.' +\n ' Hence, any existing data nooeds to be removed prior the insertion' +\n ' of the data to restore.' )\n while ( confirm_deletion not in [ 'Y', 'N' ] ):\n print( 'Do you want to confirm the deletion of the database? (Y/N)' )\n confirm_deletion = raw_input().upper()\n \n if ( confirm_deletion == 'N' ):\n Logger.get_instance().critical( 'As a database already exists at the provided connection' +\n ' settings and as the deletion of existing data has been' +\n ' canceled by the user, the program will be stopped.' +\n ' Please see the documentation for more information.' )\n \n # (Re-)create the empty database\n self.get_sqlmanager_instance().build_database( db_settings = self.db_settings,\n species = None, \n sp_mandatory = False,\n force_overwrite = True )\n \n # Get the appropriate order in which the tables needs to be filled in\n order_of_insertion = eval( 'self.' + self.db_model + '_ORDER_OF_INSERTION' )\n \n # For each table of the list, get the corresponding file, \n # upload the content and insert the data in the database\n for tablename in order_of_insertion:\n \n Logger.get_instance().debug( 'Starting to load and insert the data saved from the table ' + \n tablename + '.' )\n \n # Get the name of the file (without its extension)\n if self.file_prefix:\n filename = self.file_prefix + tablename\n else:\n filename = tablename\n \n # Get the content of the file\n try:\n objects_to_insert = FileHandlerUtil.get_obj_from_file( input_folder = self.input_folder, \n filename = filename )\n except Exception as e:\n raise DenCellORFException( 'A error occurred trying to import the objects to insert in the ' +\n tablename + 'table.' )\n \n Logger.get_instance().debug( str( len( objects_to_insert ) ) + ' entries are expected' +\n ' to be inserted into the ' + tablename + ' table.' )\n \n # Insert the data \n # NB: Using the add_all() method of the session does not work (probably because\n # the objects saved in the file were mapped to the session). Hence, it is \n # necessary to add the objects one at a time using the merge method.\n \n # Get the number total number of elements expected to be treated and\n # reset the ProgressionBar instance to follow the progression\n ProgressionBar.get_instance().reset_instance( total = len( objects_to_insert ) )\n \n for entry in objects_to_insert:\n \n # Update and display the progression bar on the console\n ProgressionBar.get_instance().increase_and_display()\n\n try:\n self.get_sqlmanager_instance().get_session().merge( entry )\n except Exception as e:\n raise DenCellORFException( 'An error occurred trying to insert the data into the ' +\n tablename + ' table. Please make sure the backup occurred' +\n ' successfully', e )\n \n # Commit the session\n try:\n self.get_sqlmanager_instance().commit()\n except Exception as e:\n raise DenCellORFException( 'An error occurred trying to commit changes after insertion' +\n ' of data in the ' + tablename + ' table.' +\n '\\n Error code: ' + LogCodes.ERR_SQL_SESSION + '.' )\n \n entry_count = self.get_sqlmanager_instance().get_session().query( eval( tablename ) ).count()\n Logger.get_instance().debug( str( entry_count ) + ' entries have been successfully added' +\n ' to the ' + tablename + ' table.' )\n self.get_sqlmanager_instance().close_session()\n \n # Log the end of the restoration\n Logger.get_instance().info( 'Restoration of the database has finished.' )\n \n \n \n ## get_sqlmanager_instance\n # -----------------------\n #\n # Return the appropriate SQLManager instance.\n #\n # @return SQLManager instance corresponding to the database model used.\n # \n def get_sqlmanager_instance( self ):\n \n sqlmanager_class = eval( 'SQLManager' + self.db_model )\n \n return sqlmanager_class.get_instance()\n ","sub_path":"06_src/fr/tagc/uorf/core/execution/RestoreStrategy.py","file_name":"RestoreStrategy.py","file_ext":"py","file_size_in_byte":12699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418588414","text":"from class_exam.exam_functions import list_from_file, read_list, rearrange_list\nfrom mock import call, patch\nimport unittest\n\n\nclass SetupList(unittest.TestCase):\n def setUp(self):\n self.user_list = ['user_1', 'user_2', 'user_3', 'user_4']\n\n\nclass TestListFromFile(SetupList):\n @patch('__builtin__.open')\n def test_read_file(self, open_mock):\n file_list = ['user_1\\n', 'user_2\\n', 'user_3\\n', 'user_4\\n']\n open_mock.return_value = file_list\n output = list_from_file(open_mock)\n self.assertEqual(output, self.user_list)\n\n @patch('__builtin__.open')\n def test_read_file_error(self, open_mock):\n open_mock.side_effect = IOError\n output = list_from_file(open_mock)\n self.assertEqual(output, \"You're in the wrong directory.\")\n\n\nclass TestReadList(SetupList):\n @patch('__builtin__.raw_input')\n @patch('sys.stdout.write')\n def test_user_found(self, print_mock, raw_mock):\n raw_mock.return_value = 'user_2'\n output = read_list(self.user_list)\n print_mock.assert_has_calls([call(\"You're on the list.\"), call('\\n')])\n self.assertEqual(output, ('user_2', True))\n self.assertEqual(print_mock.called, True)\n self.assertEqual(raw_mock.call_count, 1)\n self.assertEqual(print_mock.call_count, 2)\n\n @patch('__builtin__.raw_input')\n @patch('sys.stdout.write')\n def test_read_user_not_found(self, print_mock, raw_mock):\n raw_mock.return_value = 'user_5'\n output = read_list(self.user_list)\n print_mock.assert_has_calls([call(\"You're not on the list, user_5\"),\n call('\\n')])\n self.assertEqual(output, ('user_5', False))\n self.assertEqual(print_mock.called, True)\n self.assertEqual(raw_mock.call_count, 1)\n self.assertEqual(print_mock.call_count, 2)\n\n\nclass TestRearrangeList(SetupList):\n def test_rearrange_list(self):\n new_list = ['user_3', 'user_1', 'user_2', 'user_4']\n output = rearrange_list('user_3', self.user_list)\n self.assertEqual(output, new_list)","sub_path":"tests/test_exam_functions.py","file_name":"test_exam_functions.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70336556","text":"\"\"\"\nLab 2.5 - Modified NIST (MNIST) Multi-Class Classification Network\n\nAuthor:\n- Rodrigo Jorge Ribeiro (rj.ribeiro@campus.fct.unl.pt)\n- Ruben Andre Barreiro (r.barreiro@campus.fct.unl.pt)\n\n\"\"\"\n\n# Import the Libraries and Packages\n\n# Import the Operative System Library as operative_system\nimport os as operative_system\n\n# Disable all the Debugging Logs from TensorFlow Library\noperative_system.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Import the TensorFlow Library as tensorflow alias\nimport tensorflow as tensorflow\n\n# Import Keras Module from TensorFlow Library as keras alias\nfrom tensorflow import keras as keras\n\n# Import the Numpy Library as numpy\nimport numpy as numpy\n\n# Import the DateTime Module from the DateTime Library\nfrom datetime import datetime as date_time\n\n# Retrieve the current DateTime, as custom format\nnow_date_time = date_time.utcnow().strftime(\"%Y%m%d%H%M%S\")\n\n# Set the Root Directory for the Logs\nroot_log_directory = \"logs\"\n\n# Set the specific Log Directory, according to the current Date and Time (timestamp)\nlog_directory = \"{}/model-{}/\".format(root_log_directory, now_date_time)\n\n\n# Retrieve the Dataset of the Modified NIST (MNIST)\nmnist_dataset = keras.datasets.mnist\n\n# Load the xs (Features/Images) and ys (Labels), from the Dataset, for Training and Testing\n# The Training Set contains 60,000 examples (Images) and the Testing Set contains 10,000 examples (Images)\n(xs_training_images, ys_training_labels), (xs_testing_images, ys_testing_labels) = mnist_dataset.load_data()\n\n# Select the first 50,000 xs (Features/Images) for Training from the Training Set\nxs_data_training_images_normalized = (xs_training_images[:50000].reshape(-1, 28*28) / 255.0)\n\n# Select the first 50,000 ys (Labels) for Training from the Training Set\nys_data_training_labels = keras.utils.to_categorical(ys_training_labels[:50000])\n\n# Select the last 10,000 xs (Features/Images) for Validation from the Training Set\nxs_data_validation_images_normalized = (xs_training_images[50000:].reshape(-1, 28*28) / 255.0)\n\n# Select the last 10,000 ys (Labels) for Validation from the Training Set\nys_data_validation_labels = keras.utils.to_categorical(ys_training_labels[50000:])\n\n\n# Function to generate the Variables of a Layer for the Artificial Neural Network (ANN),\n# i.e., Weights of the Neurons and the Bias, given the input xs (Features) of\n# the Data of the Modified NIST (MNIST) or of the Weights of the previous Layer\ndef generate_artificial_neural_network_layer(inputs_xs_data, num_neurons):\n\n # Create the Weights of the Neurons for the Layer of Neurons\n layer_neurons_weights = tensorflow.Variable(tensorflow.random.normal((inputs_xs_data.shape[1], num_neurons),\n stddev=(1 / num_neurons)))\n\n # Create the Bias for the Layer of Neurons\n layer_bias = tensorflow.Variable(tensorflow.zeros([num_neurons]))\n\n # Return the Weights of the Neurons and the Bias for the Layer of Neurons\n return layer_neurons_weights, layer_bias\n\n\n# Function to create the Artificial Neural Network (ANN), given the initial input xs (Features) of\n# the Data of the Modified NIST (MNIST)\ndef create_artificial_neural_network(inputs_xs_data, num_neurons_layer):\n\n # Initialise/Create the Artificial Neural Network\n artificial_neural_network = []\n\n # Initialise/Create the Variables,\n # i.e., the Weights of the Neurons and Bias for each Layer of the Artificial Neural Network (ANN)\n layers_variables = []\n\n # Initialise/Create the previous xs (Features) of\n # the current Layer of the Artificial Neural Network (ANN),\n # with the initial xs (Features) of the Data of the Modified NIST (MNIST)\n previous_inputs_xs_data = inputs_xs_data\n\n # For each Layer's Index and Number of Neurons on it\n for layer_index, layer_num_neurons in enumerate(num_neurons_layer):\n\n # Generate the Variables of a Layer for the Artificial Neural Network (ANN),\n # i.e., Weights of the Neurons and the Bias, given the input xs (Features) of\n # the Data of the Modified NIST (MNIST) or of the Weights of the previous Layer\n layer_neurons_weights, layer_bias = \\\n generate_artificial_neural_network_layer(previous_inputs_xs_data, layer_num_neurons)\n\n # Append the computed Variables (Weights of the Neurons and the Bias) for\n # the current Layer of the Artificial Neural Network (ANN)\n artificial_neural_network.append((layer_neurons_weights, layer_bias))\n\n # Extend the computed Variables (Weights of the Neurons and the Bias) for\n # the current Layer of the Artificial Neural Network (ANN)\n layers_variables.extend((layer_neurons_weights, layer_bias))\n\n # Set the previous xs (Features) of\n # the current Layer of the Artificial Neural Network (ANN)\n previous_inputs_xs_data = layer_neurons_weights\n\n # Return the Artificial Neural Network (ANN) and the Variables\n # (Weights of the Neurons and the Bias) for each Layer of it\n return artificial_neural_network, layers_variables\n\n\n# Set the number of Neurons for each Layer of the Artificial Neural Network (ANN)\nnum_neurons_for_each_layer = [28, 784, 10]\n\n# Create the Artificial Neural Network (ANN) and the Variables for each Layer\n# (i.e., the Weights of the Neurons and the Bias), for the xs (Features) of the Data of the Modified NIST (MNIST)\nneural_network, variables_layers = \\\n create_artificial_neural_network(xs_training_images, num_neurons_for_each_layer)\n\n\n# Function to predict the ys (Labels) of the Data of the Modified NIST (MNIST)\ndef neural_network_prediction(inputs_xs_data):\n\n # Initialise the Artificial Neural Network (ANN), with the xs (Features) of\n # the Data of the Modified NIST (MNIST)\n artificial_neural_network = inputs_xs_data\n\n # Initialise/Create the number of the current Layer of the Artificial Neural Network (ANN)\n num_artificial_neural_network_layer = 1\n\n # For the Weights of Neurons and Bias of each Layer of the Artificial Neural Network (ANN)\n for artificial_neural_network_layer_neurons_weights, \\\n artificial_neural_network_layer_bias in neural_network[:-1]:\n\n # Initialise the Context Manager for the Name of Scopes of the Hidden Layers,\n # to name groups of operations the name attribute of the operations, in order to make the Graph clearer\n with tensorflow.name_scope(f\"Layer_{num_artificial_neural_network_layer}\"):\n\n # Update the Artificial Neural Network (ANN) with\n # the Sum of the Multiplication between the input xs (Features) of\n # the current Layer of the Artificial Neural Network (ANN) and the Weights of the Neurons,\n # and the Bias for the current Layer of the Artificial Neural Network (ANN)\n artificial_neural_network = \\\n tensorflow.add(tensorflow.matmul(artificial_neural_network,\n artificial_neural_network_layer_neurons_weights),\n artificial_neural_network_layer_bias, name=\"net\")\n\n # Compute the SoftMax as Activation Function of\n # the Artificial Neural Network (ANN)\n artificial_neural_network = tensorflow.nn.softmax(artificial_neural_network, name=\"softmax\")\n\n # Increment the number of the current Layer of the Artificial Neural Network (ANN)\n num_artificial_neural_network_layer += 1\n\n # Retrieve the Weights of Neurons and Bias of the last/output Layer of\n # the Artificial Neural Network (ANN)\n last_artificial_neural_network_layer_neurons_weights, \\\n last_artificial_neural_network_layer_bias = neural_network[-1]\n\n # Initialise the Context Manager for the Name of Scopes of the Output Layer,\n # to name groups of operations the name attribute of the operations, in order to make the Graph clearer\n with tensorflow.name_scope(\"Output\"):\n\n # Update the Artificial Neural Network (ANN) with\n # the last Sum of the Multiplication between the input xs (Features) of\n # the last/output Layer of the Artificial Neural Network (ANN) and the Weights of the Neurons,\n # and the Bias for the last/output Layer of the Artificial Neural Network (ANN)\n artificial_neural_network = \\\n tensorflow.add(tensorflow.matmul(artificial_neural_network,\n last_artificial_neural_network_layer_neurons_weights),\n last_artificial_neural_network_layer_bias)\n\n # Reshape the Artificial Neural Network (ANN), for the return the output of the Neural Network\n return artificial_neural_network\n\n\n# The function to compute the Cost of the Logistic Error Loss,\n# between the predicted ys (Labels) of the Data of the Modified NIST (MNIST),\n# through the Artificial Neural Network (ANN) and the real ys (Labels) of the Data of the Modified NIST (MNIST)\ndef compute_logistic_error_loss_softmax_activation(xs_data_features_to_predict, ys_real_data_labels):\n\n # Compute the Logit Function to the xs (Features) of the Data of the Genes\n tensorflow_network = neural_network_prediction(xs_data_features_to_predict)\n\n # Compute the Cost of Logistic Error Loss, with the Logits as argument,\n # which is the activation value before applying the Softmax Activation Function\n logistic_error_loss_cost = tensorflow.reduce_mean(\n tensorflow.nn.softmax_cross_entropy_with_logits(ys_real_data_labels, tensorflow_network)\n )\n\n # Return the Cost of the Logistic Error Loss\n return logistic_error_loss_cost\n\n\n# The function to compute the Gradient for the Logistic Squared Error/Loss function\ndef compute_gradient(xs_data_features_to_predict, ys_real_data_labels, layer_variables):\n\n # Create the Gradient Tape to trace all the Computations and\n # to compute the Derivatives\n with tensorflow.GradientTape() as tape:\n\n # Predict the ys (Labels) for the xs (Features) of the Data of\n # the Modified NIST (MNIST) given as arguments of the Artificial Neural Network (ANN)\n neural_network_predicted_ys = neural_network_prediction(xs_data_features_to_predict)\n\n # Compute the Cost of the Mean Squared Error/Loss function\n loss_cost_value = compute_logistic_error_loss_softmax_activation(neural_network_predicted_ys,\n ys_real_data_labels)\n\n # Return the Gradient Tape with all the traced Computations and\n # computed Derivatives, as also, the Weights of Neurons and Bias\n return tape.gradient(loss_cost_value, layer_variables), layer_variables\n\n\n# The function to create a graph for Tensorboard\n# (for the computation of the Predictions, in this case)\n@tensorflow.function\ndef create_graph_tensorboard(xs_data_features_to_predict):\n\n # Predict the ys (Labels) of the Data of the Modified NIST (MNIST)\n neural_network_prediction(xs_data_features_to_predict)\n\n\n# The function to write the graph for Tensorboard\ndef write_graph_tensorboard(xs_data_features_to_predict, file_writer_logs):\n\n # Starts a trace to record computation graphs and profiling information\n tensorflow.summary.trace_on(graph=True)\n\n # Create a graph for Tensorboard\n # (for the computation of the Predictions, in this case)\n create_graph_tensorboard(tensorflow.constant(xs_data_features_to_predict.astype(numpy.float32)))\n\n # With the given File Writer by parameter, as default\n with file_writer_logs.as_default():\n\n # Stops and exports the active trace as a Summary and/or profile file,\n # and exports all metadata collected during the trace to the default SummaryWriter,\n # if one has been set (the File Writer, in this case)\n tensorflow.summary.trace_export(name=\"trace\", step=0)\n\n\n# Create a summary File Writer for the given Log Directory, specified before\nfile_writer = tensorflow.summary.create_file_writer(log_directory)\n\n# Write the graph for the Tensorboard\nwrite_graph_tensorboard(xs_training_images.reshape(-1, 28*28), file_writer)\n\n# Configure the TensorFlow's Optimizer for the Stochastic Gradient Descent (SDG), with a Learning Rate of 10%\nstochastic_gradient_descent_optimizer = tensorflow.optimizers.SGD(learning_rate=0.005, momentum=0.9)\n\n# Set the Batch Size (i.e., the number of Samples/Examples) to\n# work through before updating the Internal Model Parameters of the Learning Algorithm,\n# and, for the Stochastic Gradient Descent (SDG), is common to use the value 1 as hyper-parameter\nbatch_size = 32\n\n# Set the number of Batches, per Epoch\nnum_batches_per_epoch = (xs_training_images.shape[0] // batch_size)\n\n# Set the number of Epochs (i.e., the number of times) that the Learning Algorithm\n# (the Stochastic Gradient Descent (SDG), in this case) will work through the entire Dataset\nnum_epochs = 1000\n\n# Initialize the sum of the Mean Squared Loss, for the Training Set\ntraining_mean_squared_loss_sum = 0\n\n# Initialize the sum of the Mean Squared Loss, for the Validation Set\nvalidation_mean_squared_loss_sum = 0\n\n\n# Execute the Artificial Neural Network (ANN), for the the Prediction of the Data of the Modified NIST (MNIST)\ndef execute_artificial_neural_network(file_writer_logs):\n\n # Define the scope for the sum of the Mean Squared Loss, for the Training Set\n global training_mean_squared_loss_sum\n\n # Define the scope for the sum of the Mean Squared Loss, for the Validation Set\n global validation_mean_squared_loss_sum\n\n # For each Epoch (i.e., each step of the Learning Algorithm)\n for current_epoch in range(num_epochs):\n\n # Shuffle the ys (Labels) for the Data of the Modified NIST (MNIST)\n ys_data_labels_shuffled = numpy.arange(len(ys_data_training_labels))\n numpy.random.shuffle(ys_data_labels_shuffled)\n\n # For each Batch (set of Samples), defined for a single Epoch\n for current_num_batch in range(num_batches_per_epoch):\n\n # Define the start index of the Samples\n start_num_sample = (current_num_batch * batch_size)\n\n # Retrieve the chosen Samples from the xs (Features) of the Data of the Modified NIST (MNIST),\n # from the Training Set\n batch_xs_data_training_images_normalized = \\\n tensorflow.constant(xs_data_training_images_normalized[ys_data_labels_shuffled[start_num_sample:\n (start_num_sample + batch_size)], :]\n .astype(numpy.float32))\n\n # Retrieve the chosen Samples from the ys (Labels) of the Data of the Modified NIST (MNIST),\n # from the Training Set\n batch_ys_data_training_labels = \\\n tensorflow.constant(ys_data_training_labels[ys_data_labels_shuffled[start_num_sample:\n (start_num_sample + batch_size)]]\n .astype(numpy.float32))\n\n # Compute the Gradient for the Mean Squared Error Loss function for\n # the chosen Samples from the xs (Features) and ys (Labels) of the Data of the Modified NIST (MNIST)\n gradients, variables = compute_gradient(batch_xs_data_training_images_normalized,\n batch_ys_data_training_labels,\n variables_layers)\n\n # Apply the Gradients previously computed to the Learning Algorithm (Stochastic Gradient Descent (SDG))\n stochastic_gradient_descent_optimizer.apply_gradients(zip(gradients, variables))\n\n # Compute the predictions from data examples to the ys (Labels) available, from a given set of\n # xs (Features) of the Data of the Modified NIST (MNIST), from the Training Set,\n # using the configured Artificial Neural Network (ANN)\n ys_labels_predicted_for_training_set_data = \\\n neural_network_prediction(tensorflow.constant(\n xs_data_training_images_normalized.astype(numpy.float32))\n )\n\n # Compute the predictions from data examples to the ys (Labels) available, from a given set of\n # xs (Features) of the Data of the Modified NIST (MNIST), from the Validation Set,\n # using the configured Artificial Neural Network (ANN)\n ys_labels_predicted_for_validation_set_data = \\\n neural_network_prediction(tensorflow.constant(\n xs_data_validation_images_normalized.astype(numpy.float32))\n )\n\n # Compute the Cost of the Mean Squared Error Loss, between the predicted ys (Labels) of\n # the Data of the Modified NIST (MNIST), from the Training Set,\n # through the Artificial Neural Network (ANN) and the real ys (Labels) of\n # the Data of the Modified NIST (MNIST)\n training_logistic_loss = \\\n compute_logistic_error_loss_softmax_activation(\n tensorflow.constant(ys_labels_predicted_for_training_set_data),\n tensorflow.constant(ys_data_training_labels.astype(numpy.float32)))\n\n # Compute the Cost of the Mean Squared Error Loss, between the predicted ys (Labels) of\n # the Data of the Modified NIST (MNIST), from the Validation Set,\n # through the Artificial Neural Network (ANN) and the real ys (Labels) of\n # the Data of the Modified NIST (MNIST)\n validation_logistic_loss = \\\n compute_logistic_error_loss_softmax_activation(\n tensorflow.constant(ys_labels_predicted_for_validation_set_data),\n tensorflow.constant(ys_data_validation_labels.astype(numpy.float32)))\n\n # Print the Mean Squared Error Loss for the current Epoch of\n # the execution of the Artificial Neural Network (ANN), for the Training and Validation Sets\n print(f\"Current Epoch: {current_epoch}, \"\n f\"Training Logistic Error Loss: {training_logistic_loss}, \"\n f\"Validation Logistic Error Loss: {validation_logistic_loss}...\")\n\n # With the given File Writer by parameter, as default\n with file_writer_logs.as_default():\n\n # Write a scalar summary, regarding the Training Loss\n tensorflow.summary.scalar(\"Training Loss: \", training_logistic_loss, step=current_epoch)\n\n # Write a scalar summary, regarding the Validation Loss\n tensorflow.summary.scalar(\"Validation Loss: \", validation_logistic_loss, step=current_epoch)\n\n # Sum the current Mean Squared Loss to its accumulator, for the Training Set\n training_mean_squared_loss_sum = (training_mean_squared_loss_sum + training_logistic_loss)\n\n # Sum the current Mean Squared Loss to its accumulator, for the Validation Set\n validation_mean_squared_loss_sum = (validation_mean_squared_loss_sum + validation_logistic_loss)\n\n # Close the File Writer, for the Logs\n file_writer_logs.close()\n\n # Compute the average of the Mean Squared Loss, for the Training Set\n training_mean_squared_loss_average = (training_mean_squared_loss_sum / num_epochs)\n\n # Compute the average of the Mean Squared Loss, for the Validation Set\n validation_mean_squared_loss_average = (validation_mean_squared_loss_sum / num_epochs)\n\n # Print the information about the average of the Mean Squared Loss, for the Training Set\n print(\"\\nThe average Mean Squared Loss for {} Epochs, in the Training Set, is: {}\\n\"\n .format(num_epochs, training_mean_squared_loss_average))\n\n # Print the information about the average of the Mean Squared Loss, for the Validation Set\n print(\"\\nThe average Mean Squared Loss for {} Epochs, in the Validation Set, is: {}\\n\"\n .format(num_epochs, validation_mean_squared_loss_average))\n\n\n# Print the configuration for the Artificial Neural Network (ANN) being used\nprint(\"\\n\\nStart the execution of the Artificial Neural Network (ANN), with {} Epochs, Bath Size of {},\\n\"\n \"with the Learning Algorithm of Stochastic Gradient Descent (SDG), \"\n \"for the Dataset of the Modified NIST (MNIST)...\\n\".format(num_epochs, batch_size))\n\n# Start the execution of the Artificial Neural Network (ANN), for the the Prediction of\n# the Data of the Modified NIST (MNIST)\nexecute_artificial_neural_network(file_writer)\n","sub_path":"labs/lab-2/exercises/lab-2.5-modified-nist-multi-class-classification-network/2.5-modified-nist-multi-class-classification-network.py","file_name":"2.5-modified-nist-multi-class-classification-network.py","file_ext":"py","file_size_in_byte":20328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"616839351","text":"from wrapt import ObjectProxy\nimport pymongo\nimport six, time, os\n\n\nfrom signoz import Singleton\nstatsd = Singleton.getStatsd()\n\n_MongoClient = pymongo.MongoClient\n\n\nMONGO_ADDRESS = getattr(_MongoClient, 'HOST') + \":\" + str(getattr(_MongoClient, 'PORT'))\n\nREQUEST_COUNT_METRIC_NAME = \"mongo_request_count\"\nREQUEST_LATENCY_METRIC_NAME = 'mongo_request_latency_seconds'\n\nclass Command(object):\n \"\"\" Command stores information about a pymongo network command, \"\"\"\n\n __slots__ = ['name', 'coll', 'db', 'tags', 'metrics', 'query']\n\n def __init__(self, name, db, coll):\n self.name = name\n self.coll = coll\n self.db = db\n self.tags = {}\n self.metrics = {}\n self.query = None\n\n def __repr__(self):\n return (\n 'Command('\n 'name=%s,'\n 'db=%s,'\n 'coll=%s)'\n ) % (self.name, self.db, self.coll)\n\ndef parse_query(query):\n \"\"\" Return a command parsed from the given mongo db query. \"\"\"\n db, coll = None, None\n ns = getattr(query, 'ns', None)\n if ns:\n # version < 3.1 stores the full namespace\n db, coll = _split_namespace(ns)\n else:\n # version >= 3.1 stores the db and coll seperately\n coll = getattr(query, 'coll', None)\n db = getattr(query, 'db', None)\n\n # pymongo < 3.1 _Query does not have a name field, so default to 'query'\n cmd = Command(getattr(query, 'name', 'query'), db, coll)\n cmd.query = query.spec\n return cmd\n\n\n\n# DEV: There is `six.u()` which does something similar, but doesn't have the guard around `hasattr(s, 'decode')`\ndef to_unicode(s):\n \"\"\" Return a unicode string for the given bytes or string instance. \"\"\"\n # No reason to decode if we already have the unicode compatible object we expect\n # DEV: `six.text_type` will be a `str` for python 3 and `unicode` for python 2\n # DEV: Double decoding a `unicode` can cause a `UnicodeEncodeError`\n # e.g. `'\\xc3\\xbf'.decode('utf-8').decode('utf-8')`\n if isinstance(s, six.text_type):\n return s\n\n # If the object has a `decode` method, then decode into `utf-8`\n # e.g. Python 2 `str`, Python 2/3 `bytearray`, etc\n if hasattr(s, 'decode'):\n return s.decode('utf-8')\n\n # Always try to coerce the object into the `six.text_type` object we expect\n # e.g. `to_unicode(1)`, `to_unicode(dict(key='value'))`\n return six.text_type(s)\n\ndef _split_namespace(ns):\n \"\"\" Return a tuple of (db, collecton) from the 'db.coll' string. \"\"\"\n if ns:\n # NOTE[matt] ns is unicode or bytes depending on the client version\n # so force cast to unicode\n split = to_unicode(ns).split('.', 1)\n if len(split) == 1:\n raise Exception(\"namespace doesn't contain period: %s\" % ns)\n return split\n return (None, None)\n\nclass TracedMongoClient(ObjectProxy):\n\n def __init__(self, client=None, *args, **kwargs):\n # To support the former trace_mongo_client interface, we have to keep this old interface\n # TODO(Benjamin): drop it in a later version\n if not isinstance(client, _MongoClient):\n # Patched interface, instantiate the client\n\n # client is just the first arg which could be the host if it is\n # None, then it could be that the caller:\n\n # if client is None then __init__ was:\n # 1) invoked with host=None\n # 2) not given a first argument (client defaults to None)\n # we cannot tell which case it is, but it should not matter since\n # the default value for host is None, in either case we can simply\n # not provide it as an argument\n if client is None:\n client = _MongoClient(*args, **kwargs)\n # else client is a value for host so just pass it along\n else:\n client = _MongoClient(client, *args, **kwargs)\n\n super(TracedMongoClient, self).__init__(client)\n # NOTE[matt] the TracedMongoClient attempts to trace all of the network\n # calls in the trace library. This is good because it measures the\n # actual network time. It's bad because it uses a private API which\n # could change. We'll see how this goes.\n client._topology = TracedTopology(client._topology)\n\n\n\nclass TracedTopology(ObjectProxy):\n\n def __init__(self, topology):\n super(TracedTopology, self).__init__(topology)\n\n def select_server(self, *args, **kwargs):\n s = self.__wrapped__.select_server(*args, **kwargs)\n if not isinstance(s, TracedServer):\n s = TracedServer(s)\n\n return s\n\nclass TracedServer(ObjectProxy):\n\n def __init__(self, server):\n super(TracedServer, self).__init__(server)\n\n def _signoz_trace_operation(self, operation):\n cmd = None\n # Only try to parse something we think is a query.\n if self._is_query(operation):\n try:\n cmd = parse_query(operation)\n except Exception:\n print ('error parsing query')\n\n # print (\"DB: \", cmd.db)\n # print (\"Collection: \", cmd.coll)\n # print (\"Tags: \", cmd.tags)\n\n return (cmd.db, cmd.coll, cmd.tags)\n\n\n\n # Pymongo >= 3.9\n def run_operation_with_response(self, sock_info, operation, *args, **kwargs):\n\n\n (db, collection, tags) = self._signoz_trace_operation(operation)\n\n statsd.increment(REQUEST_COUNT_METRIC_NAME,\n tags=[\n 'app_name:%s' % os.environ['APP_NAME'],\n 'kubernetes_namespace:%s' % os.environ['POD_NAMESPACE'],\n 'kubernetes_pod_name:%s' % os.environ['POD_NAME'],\n 'command:%s' % getattr(operation, 'name'), \n 'db:%s' % db, \n 'collection:%s' % collection,\n 'address:%s' % MONGO_ADDRESS, \n ]\n )\n\n start_time = time.time()\n result = self.__wrapped__.run_operation_with_response(\n sock_info,\n operation,\n *args,\n **kwargs\n )\n execution_time = (time.time() - start_time) * 1000\n\n statsd.histogram(REQUEST_LATENCY_METRIC_NAME,\n execution_time,\n tags=[\n 'app_name:%s' % os.environ['APP_NAME'],\n 'command:%s' % getattr(operation, 'name'),\n 'address:%s' % MONGO_ADDRESS,\n ]\n )\n # print (\"-> Mongo (Time Taken): \", execution_time)\n # print (\"Pymongo - Response: \", result)\n # print (\"Pymongo - Response Address: \", result.address)\n \n return result\n\n\n\n # Pymongo < 3.9\n \n def send_message_with_response(self, operation, *args, **kwargs):\n \n (db, collection, tags) = self._signoz_trace_operation(operation)\n\n statsd.increment(REQUEST_COUNT_METRIC_NAME,\n tags=[\n 'app_name:%s' % os.environ['APP_NAME'],\n # 'kubernetes_namespace:%s' % os.environ['POD_NAMESPACE'],\n # 'kubernetes_pod_name:%s' % os.environ['POD_NAME'],\n 'command:%s' % getattr(operation, 'name'), \n 'db:%s' % db, \n 'collection:%s' % collection,\n 'address:%s' % MONGO_ADDRESS, \n ]\n )\n\n start_time = time.time()\n\n result = self.__wrapped__.send_message_with_response(\n operation,\n *args,\n **kwargs\n )\n\n execution_time = (time.time() - start_time)*1000\n\n statsd.histogram(REQUEST_LATENCY_METRIC_NAME,\n execution_time,\n tags=[\n 'app_name:%s' % os.environ['APP_NAME'],\n 'command:%s' % getattr(operation, 'name'),\n 'address:%s' % MONGO_ADDRESS,\n ]\n )\n\n # print (\"Pymongo - Response: \", result)\n # print (\"Pymongo - Response Address: \", result.address)\n\n return result\n\n\n @staticmethod\n def _is_query(op):\n # NOTE: _Query should alwyas have a spec field\n return hasattr(op, 'spec')\n\n\nsetattr(pymongo, 'MongoClient', TracedMongoClient)\n","sub_path":"build/lib/signoz/instrumentation/hook_pymongo.py","file_name":"hook_pymongo.py","file_ext":"py","file_size_in_byte":8209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"140305633","text":"import MySQLConfig as Config\r\nimport MediaSQL as LocalConfig\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime, pygsheets, httplib2, GAPI\r\nimport MediaLogger\r\nimport GoogleDriveUpload as GDU\r\n\r\nclass LastLaunch():\r\n\tdef __init__(self):\r\n\t\tself.Loadpygsheets()\r\n\t\tself.GeoList = ['US', 'UK', 'CA', 'AU']\r\n\t\t\r\n\tdef Loadpygsheets(self):\r\n\t\tself.gc = GAPI.gc\r\n\t\r\n\tdef LoadGeos(self):\r\n\t\tsheet = self.gc.open_by_key(\"15FFGXlrO1ItnS_5NxrApwW_CGCkuConuXNK8RBfUR7M\")\r\n\t\t\r\n\t\tself.geos = sheet.worksheet_by_title('channel_geos').get_as_df()\r\n\t\t\r\n\t\tself.geos = self.geos[self.geos['geos'].isin(self.GeoList)]\r\n\t\tself.geos.rename(columns={'geos':'geo'}, inplace=True)\r\n\t\r\n\tdef LoadDevices(self):\r\n\t\tsheet = self.gc.open_by_key(\"15FFGXlrO1ItnS_5NxrApwW_CGCkuConuXNK8RBfUR7M\")\r\n\t\t\r\n\t\tself.device = sheet.worksheet_by_title('channel_devices').get_as_df()\r\n\t\tself.device.rename(columns={'devices':'device'}, inplace=True)\r\n\t\r\n\tdef LoadData(self):\r\n\t\tself.LoadAssetURI()\r\n\t\tself.LoadASParam()\r\n\t\tself.GetValidOutbrain()\r\n\t\tself.GetValidTaboola()\r\n\t\tself.GetValidYahoo()\r\n\t\tself.LoadGeos()\r\n\t\tself.LoadDevices()\r\n\t\t\r\n\tdef LoadAssetURI(self):\r\n\t\tSQL = '''\r\n\t\tSELECT\r\n\t\t\tURI,\r\n\t\t\tURI_Source,\r\n\t\t\tURL,\r\n\t\t\tURL_Source,\r\n\t\t\tsite,\r\n\t\t\tBatch,\r\n\t\t\tStatus,\r\n\t\t\tOutbrain,\r\n\t\t\tTaboola,\r\n\t\t\tYahoo\r\n\t\tFROM\r\n\t\t\tAssetURI\r\n\t\t'''\r\n\t\t\r\n\t\tself.AssetURI = pd.read_sql_query(SQL, LocalConfig.Connection)\r\n\t\tself.AssetURI = self.AssetURI[self.AssetURI['Status'] == 'Ready For Launch']\r\n\t\t\r\n\tdef LoadASParam(self):\r\n\t\tSQL = '''\r\n\t\tSELECT\r\n\t\t\tchannel,\r\n\t\t\t#campaign,\r\n\t\t\t#SUBSTRING_INDEX(asparam, '=', -1) as as_parameter,\r\n\t\t\tSUBSTRING_INDEX(campaign, ' ', 1) as URI,\r\n\t\t\tSUBSTRING_INDEX(SUBSTRING_INDEX(campaign, ' ', 2), ' ', -1) as site,\r\n\t\t\tSUBSTRING_INDEX(SUBSTRING_INDEX(campaign, ' ', 3), ' ', -1) as geo,\r\n\t\t\tSUBSTRING_INDEX(SUBSTRING_INDEX(campaign, ' ', 5), ' ', -1) as device,\r\n DATE_FORMAT(MAX(timestamp), '%%Y-%%m-%%d %%H:%%i:%%s') as timestamp\r\n\t\tFROM\r\n\t\t\tas_parameters\r\n\t\tWHERE\r\n\t\t\ttimestamp > DATE('2018-08-01')\r\n\t\tGROUP BY\r\n\t\t\tchannel,\r\n URI,\r\n site,\r\n geo,\r\n device\r\n\t\t'''\r\n\t\tself.ASParam = pd.read_sql_query(SQL, Config.Connection)\r\n\t\tself.ASParam['geo'] = self.ASParam['geo'].str.lower()\r\n\t\tself.ASParam['device'] = self.ASParam['device'].str.lower()\r\n\t\tself.ASParam['site'] = self.ASParam['site'].str.lower()\r\n\t\t\r\n\tdef GetValidOutbrain(self):\r\n\t\tself.Outbrain = self.AssetURI[self.AssetURI['Outbrain'].str.lower() == 'x'][['URI', 'URI_Source', 'URL', 'URL_Source', 'site', 'Batch']]\r\n\t\tself.Outbrain['channel'] = 'Outbrain'\r\n\t\t\r\n\tdef GetValidTaboola(self):\r\n\t\tself.Taboola = self.AssetURI[self.AssetURI['Taboola'].str.lower() == 'x'][['URI', 'URI_Source', 'URL', 'URL_Source', 'site', 'Batch']]\r\n\t\tself.Taboola['channel'] = 'Taboola'\r\n\t\t\r\n\tdef GetValidYahoo(self):\r\n\t\tself.Yahoo = self.AssetURI[self.AssetURI['Yahoo'].str.lower() == 'x'][['URI', 'URI_Source', 'URL', 'URL_Source', 'site', 'Batch']]\r\n\t\tself.Yahoo['channel'] = 'Yahoo'\r\n\t\t\r\n\tdef JoinValidCampaigns(self):\r\n\t\tself.Output = self.Outbrain\r\n\t\tself.Output = self.Output.append(self.Taboola)\r\n\t\tself.Output = self.Output.append(self.Yahoo)\r\n\t\r\n\tdef ExpandValidGeos(self):\r\n\t\tself.Output = self.Output.merge(self.geos, on=['channel'])\r\n\t\t\r\n\tdef ExpandValidDevices(self):\r\n\t\tself.Output = self.Output.merge(self.device, on=['channel'])\r\n\t\t\r\n\tdef MergeLastLaunch(self):\r\n\t\tself.Output['geo'] = self.Output['geo'].str.lower()\r\n\t\tself.Output['device'] = self.Output['device'].str.lower()\r\n\t\tself.Output['site'] = self.Output['site'].str.lower()\r\n\t\r\n\t\tself.Output = self.Output.merge(self.ASParam, how='left', on=['channel', 'site', 'geo', 'device', 'URI'])\r\n\t\t\r\n\t\tNAValues = {}\r\n\t\tNAValues['timestamp'] = '1970-01-01 00:00:00'\r\n\t\t\r\n\t\tself.Output.fillna(value=NAValues, inplace=True)\r\n\t\r\n\tdef PrepareOutput(self):\r\n\t\tself.Output['geo'] = self.Output['geo'].str.upper()\r\n\t\tself.Output['device'] = self.Output['device'].str.upper()\r\n\t\tself.Output['site'] = self.Output['site'].str.upper()\r\n\t\t\r\n\t\tself.Output.columns = ['URI', 'URI_Source', 'URL', 'URL_Source', 'Site', 'Batch', 'Channel', 'Geo', 'Device', 'LatestLaunch']\r\n\t\tself.Output['timestamp'] = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\r\n\t\r\n\tdef LoadIntoDB(self):\r\n\t\tself.Output.to_sql(\"LastLaunch\", LocalConfig.Connection, if_exists='replace', index=False)\r\n\t\t\r\nLL = LastLaunch()\r\nLL.LoadData()\r\nLL.JoinValidCampaigns()\r\nLL.ExpandValidGeos()\r\nLL.ExpandValidDevices()\r\nLL.MergeLastLaunch()\r\nLL.PrepareOutput()\r\nLL.LoadIntoDB()\r\n\r\nSQL = '''\r\nSELECT\r\n\tChannel,\r\n URL,\r\n Site,\r\n Geo,\r\n Device,\r\n \"English\" as Language,\r\n \"\" as ImageID,\r\n \"\" as TextID,\r\n DATE_FORMAT(CURDATE(), '%%Y%%m%%d') as Date,\r\n IF(Channel = \"Yahoo\", \"1\", \"\") as LNum,\r\n IF(Channel = \"Yahoo\", \"0\", \"\") as RJNum,\r\n CONCAT(\"FRL\", DATE_FORMAT(CURDATE(), '%%m%%d')) as Tag,\r\n LatestLaunch,\r\n timestamp\r\nFROM\r\n\tLastLaunch\r\nORDER BY LatestLaunch ASC\r\n''';\r\n\r\nLaunchMe = pd.read_sql_query(SQL, LocalConfig.Connection)\r\n\r\n#Upload to Google Drive\r\ntry:\r\n GoogleDriveURL = GDU.UploadAndGetShareLink(\"Last URI Launch.csv\", LaunchMe.to_csv(index=False), '1FP4IdlalRMMq2KS2LXlGTyV6KOa6XUZ3')\r\nexcept:\r\n GoogleDriveURL = ''\r\n\r\n\r\n#LaunchMe.to_csv(r'C:\\Users\\JEd\\Google Drive\\Gemini Scraping\\Last URI Launch.csv')","sub_path":"airflow/PythonCode/URILastLaunch.py","file_name":"URILastLaunch.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223489359","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web01', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='deliver',\n name='express_company',\n field=models.TextField(max_length=100, null=True, verbose_name=b'\\xe5\\xbf\\xab\\xe9\\x80\\x92\\xe5\\x85\\xac\\xe5\\x8f\\xb8'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='deliver',\n name='identifier',\n field=models.CharField(max_length=100, null=True, verbose_name=b'\\xe5\\xbf\\xab\\xe9\\x80\\x92\\xe5\\x8d\\x95\\xe5\\x8f\\xb7'),\n preserve_default=True,\n ),\n ]\n","sub_path":"web01/migrations/0002_auto_20160926_0017.py","file_name":"0002_auto_20160926_0017.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598596574","text":"import asyncio \n\nasync def FirstWorker():\n while True:\n await asyncio.sleep(0.01)\n print(\"First worker working progress...\")\n\n\nasync def SecondWorker():\n while True:\n await asyncio.sleep(0.01)\n print(\"Second worker working progress...\")\n\nloop = asyncio.get_event_loop()\ntry:\n # When using Multiple Coroutines \n asyncio.ensure_future(FirstWorker())\n asyncio.ensure_future(SecondWorker())\n\n #Event loop running endless\n loop.run_forever()\nexcept KeyboardInterrupt:\n print(\"Sorry!....\")\nfinally:\n print(\"Closing the loop\")\n loop.close()\n","sub_path":"Asyncio/1-SimpleAsyncEx.py","file_name":"1-SimpleAsyncEx.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"283075749","text":"# Projeto 04 - Simulador de votação:\n\n# Crie um programa que simule um sistema de votação, ele deve receber votos até\n# que o usuário diga que não tem mais ninguém para votar, esse programa precisa ter\n# duas funções:\n\n# A 1° Função precisa ser chamada autoriza_voto() ela vai receber como parâmetro o\n# ano de nascimento de uma pessoa que será digitado pelo usuário, retornando um\n# valor literal indicando se uma pessoa tem voto NEGADO, OPCIONAL e\n# OBRIGATÓRIO nas eleições.\n\n# A 2° Função será a votacao(), ela vai receber dois parâmetros, autorização (que virá\n# da função autoriza_voto()) e o voto que é o número que a pessoa votou.\n# Se ela não puder votar, a 2° função terá que retornar “Você não pode votar”, caso o\n# contrário a 2° função deve validar o número que a pessoa escolheu, ela pode\n# escolher de 1 a 5 (crie 3 candidatos para a votação):\n\n# ● 1, 2 ou 3 - Votos para os respectivos candidatos\n# ● 4- Voto Nulo\n# ● 5 - Voto em Branco\n# Sua função votacao() tem que calcular e mostrar:\n# ● O total de votos para cada candidato;\n# ● O total de votos nulos;\n# ● O total de votos em branco;\n# ● Qual candidato venceu a votação\n\nimport os\nfrom datetime import datetime\n\n\nos.system('cls')\n\n# Lista aninhada criada para guardar a infomação dos candidatos , votos nulos e em branco\nvotação = [['candidato1', 0], ['candidato2', 0], [\n 'candidato3', 0], ['Votos nulos', 0], ['Votos Brancos', 0]]\n\n\nclass Votacao: # Criação da Class Votacao\n # primeira função inicia sozinha pegandos as informações de nome e idade\n def __init__(self, nome, idade):\n self.nomePessoa = nome\n self.idadeIdade = idade\n\n # segunda função tem o dever de averiguar se a pessoa pode ou não votar\n def autoriza_voto(self):\n if self.idadeIdade == 16 or self.idadeIdade == 17 or self.idadeIdade > 65:\n return f'''{self.nomePessoa} você tem {self.idadeIdade} anos \n Você tem direito ao voto opcional!!!!\n '''\n elif self.idadeIdade <= 15:\n return f'''{self.nomePessoa} você tem {self.idadeIdade} anos\n Você não direito ao voto!!!\n '''\n\n else:\n return f'''{self.nomePessoa} você tem {self.idadeIdade} anos \n Você é obrigado a votar!!!!\n '''\n\n def votacao(self, voto): # terceira função tem a tarefa de perguntar ao usuario qual será seu voto, caso ele tenha mais que 15 anos\n if self.idadeIdade <= 15:\n\n return f'''{self.nomePessoa} você tem {self.idadeIdade} anos \n Você não direito ao voto!!!'''\n\n else:\n if voto == 1:\n votação[0][1] += 1\n elif voto == 2:\n votação[1][1] += 1\n elif voto == 3:\n votação[2][1] += 1\n elif voto == 5:\n votação[4][1] += 1\n else:\n votação[3][1] += 1\n\n\nwhile True: # While criado para perguntado para o usuario caso ele queira continuar a votar\n pessoa = Votacao(\n\n nome=str(input(\"Qual o seu nome?: \")),\n idade=datetime.now().year -\n int(input(\"Qual o ano do seu nascimento e apenas o ano do nascimento: \"))\n\n )\n print(pessoa.autoriza_voto())\n\n if pessoa.idadeIdade > 15:\n pessoa.votacao(int(input(\n ''' Digite o numero corespondente ao seu candidato\n [1] Candidato 1\n [2] Candidato 2\n [3] Candidato 3\n [4] Voto nulo\n [5] Voto em Branco\n '''\n )))\n\n parar = str(input(\"Você deseja continuar [S/N]: \")).upper()\n if parar.startswith('N'):\n os.system('cls')\n break\n\nwhile True:\n os.system('cls')\n # Função MAX usado para saber qual o maior valor dentro da Lista votação, mas ha um problema ,caso tenha um empate , ele só vai pegar o primeiro\n Candidatoeleito, votoEleito = max(votação, key=lambda item: item[1])\n print(f'''\n Após as votações, \n \n O cadidato 1 recebeu {votação[0][1]} Votos\n O cadidato 2 recebeu {votação[1][1]} Votos\n O cadidato 3 recebeu {votação[2][1]} Votos\n Votos nulos recebeu {votação[4][1]} Votos\n Votos brancos recebeu {votação[3][1]} Votos\n \n O candidato eleito foi {Candidatoeleito} \n Com {votoEleito} Votos.\n \n \n ''')\n break\n","sub_path":"SimuladorDeVotação.py","file_name":"SimuladorDeVotação.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463531235","text":"from django.views.generic import DetailView, ListView\nfrom bubble.models import Issue, Note\nfrom bubble.forms import UpdateStatusForm\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core import mail\nfrom django.template import loader, Context\n\nclass UserIssueListView(ListView):\n model = Issue\n context_object_name = 'issue'\n template_name = 'bubble/issues_list.html'\n paginate_by = 30\n\n def get_queryset(self):\n return Issue.objects.select_related().filter(\n assignee=self.request.user.id).order_by('priority')\n\n\nclass IssueDetailView(DetailView):\n queryset = Issue.objects.select_related()\n template_name = 'bubble/issues_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(IssueDetailView, self).get_context_data(**kwargs)\n context['notes'] = Note.objects.filter(issue=self.object.pk).select_related('user')\n if hasattr(self, 'form'):\n context['note_form'] = self.form\n else:\n context['note_form'] = UpdateStatusForm(initial={'status': self.object.status})\n return context\n\n def post(self, request, pk):\n form = UpdateStatusForm(request.POST)\n if form.is_valid():\n #the issue id\n issue = Issue.objects.get(pk=pk)\n comment = form.cleaned_data['comment']\n user = request.user\n\n note = Note(issue=issue, comment=comment, user=user)\n note.save()\n\n issue.status = form.cleaned_data['status']\n issue.save()\n\n messages.add_message(request, messages.SUCCESS, 'Your comment was added')\n\n # needs splitting into an email function\n html_template = loader.get_template('bubble/email_alert.html')\n plain_template = loader.get_template('bubble/email_alert.txt')\n email_context = Context({'note': note, 'issue': issue})\n\n email_subject = 'New comment on issue: %s' % issue.title\n email_to = [issue.assignee.email]\n email_body_plain = plain_template.render(email_context)\n email_body_html = html_template.render(email_context)\n\n email = EmailMultiAlternatives(email_subject, email_body_plain, 'bubble@alol.co.uk', email_to)\n email.attach_alternative(email_body_html, 'text/html')\n email.send()\n\n return HttpResponseRedirect(reverse('bubble_issue_detail', args=[pk]))\n\n self.form = form\n return self.get(request)\n\n\nclass IssueSearch(ListView):\n model = Issue\n template_name = 'bubble/issues_search_results.html'\n\n def get_queryset(self):\n if 's' in self.request.GET:\n search_term = self.request.GET['s']\n\n #if the search term is empty return an empty resultset\n if not search_term:\n return []\n\n # if the search term matches anything in the description, the title or if it exactly matches the\n # id number of the issue, then return it.\n results = Issue.objects.filter(title__contains=search_term) | Issue.objects.filter(\n description__contains=search_term)\n\n if search_term.isdigit():\n results = results | Issue.objects.filter(pk=int(search_term_int))\n\n return results\n\n return []\n","sub_path":"bubble/views/issues.py","file_name":"issues.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"614961869","text":"from glob import glob\r\nimport os\r\n\r\nlib_dir = \"D:/OpenCV4/opencv_cuda_build/install/x64/vc15/lib/\"\r\nlib_version = '451'\r\nlib_suffix = {\r\n \"debug\" : f\"*{lib_version}d.lib\", \r\n \"release\" : f\"*{lib_version}.lib\"\r\n}\r\nfor suffix in lib_suffix.keys():\r\n print(suffix, lib_suffix[suffix], sep=\"\\n\\n\\n\")\r\n for path in glob(lib_dir + lib_suffix[suffix]):\r\n print(os.path.split(path)[-1])","sub_path":"U2Net/U2NetPy/tools/get_opencv_lib_name.py","file_name":"get_opencv_lib_name.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30853667","text":"# -*- coding: utf-8 -*-\n\"\"\"\n UUTrack.Controller.devices.PhotonicScience.scmoscam.py\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n A wrapper class originally written by Perceval Guillou,\n perceval@photonic-science.com in Py2 and has been tested successfully with\n scmoscontrol.dll SCMOS Pleora (GEV) control dll (x86 )v5.6.0.0 (date modified 10/2/2013)\n \n SaFa @nanoLINX has adapted the wrapper class for a camera control program.\n \n v1.0, 24 feb. 2015\n\n .. sectionauthor:: SaFa \n\n\"\"\"\n\nimport ctypes as C\nimport os\nimport sys\nfrom sys import platform\nif platform == \"linux\" or platform == \"linux2\":\n pass\nelif platform == \"darwin\":\n pass\nelif platform == \"win32\":\n from _ctypes import LoadLibrary, FreeLibrary\n\nimport numpy\n\nNUMPY_MODES = {\"L\":numpy.uint8, \"I;16\":numpy.uint16}\nclass GEVSCMOS:\n def __init__(self, cwd_path, name):\n self.cwd_path = cwd_path #working directory\n self.name = name #Camera name = folder where DLL and settings are stored\n self.setup_file = \"%s\\\\%s\\\\PSL_camera_files\\\\ps_setup.dat\"%(cwd_path,name)\n self.dll_name = self.GetDLL()\n self.dll = None\n self.LoadCamDLL()\n self.ResetOptions()\n\n def __str__(self):\n msg = \"Camera setting located in %s\"%(self.setup_file)\n return msg\n\n def GetDLL(self):\n FileList = os.listdir('%s\\\\%s'%(self.cwd_path,self.name))\n count = 0\n for file in FileList:\n if file[-4:] == \".dll\":\n dll_name = file\n count+=1\n\n if count == 0:\n msg = \"Check in '%s'\\n!!!CAMERA CONTROL DLL NOT FOUND!!!\"%self.cwd_path\n print (msg)\n return \"\"\n\n elif count > 1:\n msg = \"Check in '%s'\\n!!!ONLY ONE DLL FILE MUST EXIST IN THE CAMERA FOLDER!!!\"%self.cwd_path\n print (msg)\n return \"\"\n else:\n return dll_name\n\n def LoadCamDLL(self):\n self.libHandle = LoadLibrary('%s\\\\%s\\\\%s'%(self.cwd_path,self.name,self.dll_name))\n #self.libHandle = C.windll.kernel32.LoadLibraryA('%s\\\\%s\\\\%s'%(self.cwd_path,self.name,self.dll_name))\n self.dll = C.CDLL(None, handle=self.libHandle) #cdecl\n #self.dll = C.WinDLL(None, handle=self.libHandle) #stdcall\n #self.dll = C.CDLL('%s\\\\%s\\\\%s'%(self.cwd_path,self.name,self.dll_name))\n self.InitFunctions()\n\n def UnloadCamDLL(self):\n del self.dll\n self.dll = None\n FreeLibrary(self.libHandle)\n #C.windll.kernel32.FreeLibrary(self.libHandle)\n\n def ResetOptions(self):\n self.mode = \"I;16\"\n self.size = (0,0)\n self.sizemax = (1919,1079)\n self.state = 0\n self.abort_flag = False\n self.remapping = False\n self.smooth = False\n self.clip = True\n self.SubArea = (0,0,0,0)\n self.SoftBin = (1,1)\n self.gainmode = 0\n self.expous = 100000\n self.FlatAverage = 10\n self.GlobalRemap = False\n self.tempread = True\n\n #self.is2tap = False\n\n # 0 = gain mode 1 - 16 bit\n # 1 = gain mode 2 - 16 bit\n # 2 = gain mode 10 - 16 bit\n # 3 = gain mode 30 - 16 bit\n # 4 = combined (1 and 30) in software - 24 => Demangle => 16 bit\n # 5 = combined in hardware - 16 bit\n # 6 = gain mode 1 - 8 bit\n # 7 = gain mode 2 - 8 bit\n # 8 = gain mode 10 - 8 bit\n # 9 = gain mode 30 - 8 bit\n # 10= combined in hardware - 8bit\n\n if self.IsInCamCor():\n if self.Has8bitGainModes():\n gainmodes = ['gain1','gain2','gain10','gain30','gain1+30_Hardware','gain1_8b','gain2_8b','gain10_8b','gain30_8b','gain1+30_8b']\n else:\n gainmodes = ['gain1','gain2','gain10','gain30','gain1+30_Hardware']\n else:\n if self.Has8bitGainModes():\n gainmodes = ['gain1','gain2','gain10','gain30','gain1+30','gain1_8b','gain2_8b','gain10_8b','gain30_8b']\n else:\n gainmodes = ['gain1','gain2','gain10','gain30','gain1+30']\n\n if self.HasClockSpeedLimit():\n clockspeedmodes = ['50MHz']\n else:\n clockspeedmodes = ['50MHz','100MHz'] #,'200MHz'\n\n #self.flipdata = self.IsFlipped()\n\n self.Options = {\n 'TriggerMode' :['FreeRunning','Software',\n 'Hardware_Falling','Hardware_Rising'],\n 'ClockSpeedMode' :clockspeedmodes,\n 'GainMode' :gainmodes,\n 'PowerSavingMode' :['PowerOn','PowerOff','CoolingOff'],\n #'VideoGain' :[0,100],\n 'IntensifierGain' :[1,100],\n #'ChipGain' :[1,100],\n 'SoftBin' :[(1,1),(1040,1040)],\n 'SubArea' :[(0,0,0,0)],\n 'Exposure' :[(100,'Millisec'),(4294967,['Microsec','Millisec','Second'])],\n 'Temperature' :[0,0],\n 'Offset' :[1],\n 'BrightPixel' :[1],\n 'FlatField' :[0],\n 'MakeFlat' :[None],\n 'FlatAverage' :[10,1000],\n 'Remapping' :[0],\n 'Smooth' :[0],\n 'Clip' :[0],\n 'Sharpening' :[0],\n 'AutoLevel' :[0],\n 'ALC_maxexp' :[1000,65535],\n 'ALC_win' :[(0,0,1919,1079)],\n 'BestFit' :[0],\n 'BF_Peek' :[1000,65535],\n 'IF_delay' :[0,65535],\n 'BinningFilter' :[0],\n 'AutoBinning' :[0],\n 'Gamma' :[0],\n 'GammaPeak' :[0,100],\n 'GammaBright' :[0,100],\n #'FlickerMode' :['Off','50MHz','60MHz'],\n }\n\n def InitFunctions(self):\n #Buffer\n self.dll.PSL_VHR_get_image_pointer.restype = C.POINTER(C.c_char) #ushort\n self.dll.PSL_VHR_demangle_rgb24_into_16bit_image.restype = C.POINTER(C.c_char) #ushort\n self.dll.PSL_VHR_remap_image.restype = C.POINTER(C.c_char) #ushort\n self.dll.PSL_VHR_get_pointer_to_safebufferA.restype = C.POINTER(C.c_char) #ushort\n self.dll.PSL_VHR_get_pointer_to_safebufferB.restype = C.POINTER(C.c_char) #ushort\n self.dll.PSL_VHR_get_pointer_to_safebufferC.restype = C.POINTER(C.c_char) #ushort\n #Bool\n self.dll.PSL_VHR_Open.restype = C.c_bool\n self.dll.PSL_VHR_open_map.restype = C.c_bool\n self.dll.PSL_VHR_Close.restype = C.c_bool\n self.dll.PSL_VHR_set_gain_mode.restype = C.c_bool\n self.dll.PSL_VHR_set_speed.restype = C.c_bool\n self.dll.PSL_VHR_set_video_gain.restype = C.c_bool\n self.dll.PSL_VHR_set_chip_gain.restype = C.c_bool\n self.dll.PSL_VHR_set_exposure.restype = C.c_bool\n self.dll.PSL_VHR_set_trigger_mode.restype = C.c_bool\n self.dll.PSL_VHR_set_sub_area_coordinates.restype = C.c_bool\n self.dll.PSL_VHR_enable_offset_subtraction.restype = C.c_bool\n self.dll.PSL_VHR_enable_bright_pixel_correction.restype = C.c_bool\n self.dll.PSL_VHR_enable_flat_field_correction.restype = C.c_bool\n self.dll.PSL_VHR_Snap_and_return.restype = C.c_bool\n self.dll.PSL_VHR_Get_snap_status.restype = C.c_bool\n self.dll.PSL_VHR_abort_snap.restype = C.c_bool\n self.dll.PSL_VHR_apply_post_snap_processing.restype = C.c_bool\n self.dll.PSL_VHR_enable_gamma.restype = C.c_bool\n self.dll.PSL_VHR_set_gamma_gain_bright.restype = C.c_bool\n self.dll.PSL_VHR_set_gamma_gain_brightness.restype = C.c_bool\n #self.dll.PSL_VHR_set_flicker_mode.restype = C.c_bool\n\n def IsInCamCor(self):\n isincamcor = 0\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option.lower() in [\"onboardcorrectionssupported\",\"incameracorrections\"]:\n isincamcor = int(value)\n break\n\n except:\n pass\n\n return bool(isincamcor)\n\n def IsFlipped(self):\n isflip = 0\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option.lower() in [\"swflipimage\"]:\n isflip = int(value)\n break\n\n except:\n pass\n\n return bool(isflip)\n\n def GetRemapSize(self):\n remapsize = None\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n Nx,Ny = (0,0)\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option in [\"Submapwidth\",\"submapwidth\"]:\n Nx = int(value)\n if option in [\"Submapheight\",\"submapheight\"]:\n Ny = int(value)\n break\n\n remapsize = Nx,Ny\n\n except:\n pass\n\n #print \"remap size is (%s,%s)\"%remapsize\n\n return remapsize\n\n def HasIntensifier(self):\n intensifier_value = 1\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option in [\"intensifiergaincanbeset\",\"IntensifierGainCanBeSet\",\"HasIntensifier\",\"hasintensifier\"]:\n intensifier_value = int(value)\n break\n\n except:\n pass\n\n return bool(intensifier_value)\n\n def HasTemperature(self):\n tempset = None\n tempread = None\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option in [\"TemperatureCanBeSet\",\"temperaturecanbeset\"]:\n tempset = int(value)\n if option in [\"TemperatureCanBeRead\",\"temperaturecanberead\"]:\n tempread = int(value)\n\n if tempset==1:\n return [-30,50]\n elif tempset==0:\n return [0,0]\n elif tempread==1:\n return [0,0]\n elif tempread==0:\n return None\n else:\n return [-30,50]\n\n except:\n return [-30,50]\n\n def HasHPMapping(self):\n use_hpm_remap = 0\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option in [\"viewer_use_hp_mapping\"]:\n use_hpm_remap = int(value)\n break\n\n except:\n print (\"HasHPMapping: %s: %s\"%(sys.exc_info()[0],sys.exc_info()[1]))\n\n return bool(use_hpm_remap)\n\n def HasBinning(self):\n use_binning = 1\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option in [\"binning_supported\"]:\n use_binning = int(value)\n break\n\n except:\n print (\"HasBinning: %s: %s\"%(sys.exc_info()[0],sys.exc_info()[1]))\n\n return bool(use_binning)\n\n def HasClockSpeedLimit(self):\n clockspeedlimit = 0\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option.lower() in [\"hasclockspeedlimit\",\"clockspeedlimit\"]:\n clockspeedlimit = int(value)\n break\n\n except:\n pass\n\n return bool(clockspeedlimit)\n\n def Has8bitGainModes(self):\n has8bit = 1\n try:\n fich = open(self.setup_file,'r')\n lines = fich.readlines()\n fich.close()\n\n for line in lines:\n (option,sep,value) = line.strip().partition('=')\n if option.lower() in [\"remove_8bit_gainmodes\"]:\n if int(value)==0:\n has8bit = 1\n else:\n has8bit = 0\n break\n\n except:\n pass\n\n return bool(has8bit)\n\n #------ CAMERA PROPERTIES ---------------------------------\n def GetName(self):\n return self.name\n\n def GetDLLName(self):\n return self.dll_name\n\n def GetMode(self):\n return self.mode\n\n def GetState(self):\n return self.state\n\n def GetPedestal(self):\n return self.pedestal\n\n def GetOptions(self):\n return self.Options.keys()\n\n def GetSize(self):\n return self.size\n\n def GetSizeMax(self):\n return self.sizemax\n\n def UpdateSizeMax(self):\n Nx = self.dll.PSL_VHR_get_maximum_width()\n Ny = self.dll.PSL_VHR_get_maximum_height()\n self.sizemax = (Nx,Ny)\n return self.sizemax\n\n def UpdateSize(self):\n Nx = self.dll.PSL_VHR_get_width()\n Ny = self.dll.PSL_VHR_get_height()\n self.size = (Nx,Ny)\n\n #----- Specificities -----------------------------------------------------------------\n def SelectIportDevice(self):\n path = \"%s\\\\%s\\\\IPconf.dat\"%(self.cwd_path,self.name)\n if os.path.exists(path):\n self.IP_add = \"\"\n self.mac_add = \"\"\n self.IsIport = True\n fich = open(path,'r')\n lines = fich.readlines()\n fich.close()\n for line in lines:\n (option, sep, value) = line.strip().partition('=')\n if option == \"MAC\":\n self.mac_add = value\n elif option == \"IP\":\n self.IP_add = value\n\n if self.mac_add==\"\" or self.IP_add==\"\":\n self.dll.PSL_VHR_select_IPORT_device(\"\",\"\")\n else:\n self.dll.PSL_VHR_select_IPORT_device(self.mac_add,\"[%s]\"%self.IP_add)\n\n return True\n else:\n self.IsIport = False\n return False\n\n #-------- CAMERA STANTARD FUNCTIONS ------------------\n def Open(self):\n path = \"%s\\\\%s\\\\PSL_camera_files\"%(self.cwd_path,self.name)\n self.SelectIportDevice()\n\n if self.dll.PSL_VHR_Open(str(path)) :\n if not self.OpenMap():\n #del self.Options['Remapping']\n pass\n\n if not self.IsIntensifier():\n del self.Options['IntensifierGain']\n\n Temp = self.HasTemperature()\n if Temp==None:\n self.tempread = False\n else:\n self.tempread = True\n\n (Nx,Ny) = self.UpdateSizeMax()\n self.Options[\"SubArea\"][0] = (0,0,Nx-1,Ny-1)\n self.Options[\"ALC_win\"][0] = (0,0,Nx-1,Ny-1)\n self.SetSubArea(0,0,Nx-1,Ny-1)\n self.UpdateSize()\n return 0\n\n else:\n return 1\n\n def Close(self):\n self.dll.PSL_VHR_Close()\n self.UnloadCamDLL()\n\n def SetSubArea(self,left,top,right,bottom):\n self.SubArea = (left,top,right,bottom)\n rep = self.dll.PSL_VHR_set_sub_area_coordinates(left,right,top,bottom)\n self.UpdateSize()\n return rep\n\n def SetSoftBin(self,Sx,Sy):\n self.SoftBin = (Sx,Sy)\n\n def SetExposure(self, expo, unit):\n if unit==\"Second\":\n self.expous = expo*1000000\n elif unit==\"Millisec\":\n self.expous = expo*1000\n elif unit==\"Microsec\":\n self.expous = expo\n\n ans = self.dll.PSL_VHR_set_exposure(self.expous)\n\n #print \"SetExposure \",self.name,expo,unit,self.expous,type(self.expous),ans\n\n return ans\n\n def SetTrigger(self,mode):\n if mode == \"FreeRunning\":\n return self.dll.PSL_VHR_set_trigger_mode(0)\n elif mode == \"Software\":\n return self.dll.PSL_VHR_set_trigger_mode(1)\n elif mode == \"Hardware_Falling\":\n return self.dll.PSL_VHR_set_trigger_mode(2)\n elif mode == \"Hardware_Rising\":\n return self.dll.PSL_VHR_set_trigger_mode(6)\n elif mode == \"Pipeline_Master\":\n return self.dll.PSL_VHR_set_trigger_mode(16)\n elif mode == \"Pipeline_Slave\":\n return self.dll.PSL_VHR_set_trigger_mode(18)\n else:\n return \"Trigger mode not valid\"\n\n def SetGainMode(self, mode):\n # 0 = gain mode 1 - 16 bit\n # 1 = gain mode 2 - 16 bit\n # 2 = gain mode 10 - 16 bit\n # 3 = gain mode 30 - 16 bit\n # 4 = combined (1 and 30) in software - 24 => Demangle => 16 bit\n # 5 = combined in hardware - 16 bit\n # 6 = gain mode 1 - 8 bit\n # 7 = gain mode 2 - 8 bit\n # 8 = gain mode 10 - 8 bit\n # 9 = gain mode 30 - 8 bit\n # 10= combined in hardware - 8bit\n\n if mode == 'gain1':\n self.gainmode = 0\n rep = self.dll.PSL_VHR_set_gain_mode(0)\n elif mode == 'gain2':\n self.gainmode = 1\n rep = self.dll.PSL_VHR_set_gain_mode(1)\n elif mode == 'gain10':\n self.gainmode = 2\n rep = self.dll.PSL_VHR_set_gain_mode(2)\n elif mode == 'gain30':\n self.gainmode = 3\n rep = self.dll.PSL_VHR_set_gain_mode(3)\n elif mode == 'gain1+30':\n self.gainmode = 4\n rep = self.dll.PSL_VHR_set_gain_mode(4)\n elif mode == 'gain1+30_Hardware':\n self.gainmode = 5\n rep = self.dll.PSL_VHR_set_gain_mode(5)\n elif mode == 'gain1_8b':\n self.gainmode = 6\n rep = self.dll.PSL_VHR_set_gain_mode(6)\n elif mode == 'gain2_8b':\n self.gainmode = 7\n rep = self.dll.PSL_VHR_set_gain_mode(7)\n elif mode == 'gain10_8b':\n self.gainmode = 8\n rep = self.dll.PSL_VHR_set_gain_mode(8)\n elif mode == 'gain30_8b':\n self.gainmode = 9\n rep = self.dll.PSL_VHR_set_gain_mode(9)\n elif mode == 'gain1+30_8b':\n self.gainmode = 10\n rep = self.dll.PSL_VHR_set_gain_mode(10)\n else:\n rep = \"Gain mode not valid\"\n\n if self.gainmode in [0,1,2,3,4,5]:\n self.mode = \"I;16\"\n elif self.gainmode in [6,7,8,9,10]:\n self.mode = \"L\"\n\n self.UpdateSize()\n\n return rep\n\n def SetVideoGain(self, gain):\n return self.dll.PSL_VHR_set_video_gain(gain)\n\n def SetChipGain(self, gain):\n return self.dll.PSL_VHR_set_chip_gain(gain)\n\n def IsIntensifier(self):\n return self.HasIntensifier()\n\n def SetIntensifierGain(self, gain): #SetChipGain\n return self.dll.PSL_VHR_set_chip_gain(gain)\n\n def SetClockSpeed(self, mode):\n if mode == '200MHz':\n return self.dll.PSL_VHR_set_speed(2)\n elif mode == '100MHz':\n return self.dll.PSL_VHR_set_speed(1)\n elif mode == '50MHz':\n return self.dll.PSL_VHR_set_speed(0)\n else:\n return False\n\n def GetTemperature(self):\n try:\n if self.tempread:\n return self.dll.PSL_VHR_read_CCD_temperature()\n else:\n return None\n except:\n return None\n\n def SetTemperature(self,temp):\n return False\n\n def SetPowerSavingMode(self,mode):\n try:\n if mode=='PowerOn':\n return self.dll.PSL_VHR_set_power_saving_mode(1)\n elif mode=='PowerOff':\n return self.dll.PSL_VHR_set_power_saving_mode(0)\n elif mode=='CoolingOff':\n return self.dll.PSL_VHR_set_power_saving_mode(2)\n except:\n print (\"Cannot apply PowerSavingMode %s\"%mode)\n\n #-------- IMAGE ACQUISITION--------------------------------\n def Snap(self):\n self.state = 1\n self.abort_flag = False\n rep = self.dll.PSL_VHR_Snap_and_return()\n while not self.dll.PSL_VHR_Get_snap_status():\n pass\n self.state = 0\n return rep\n\n def SnapAndReturn(self):\n self.abort_flag = False\n rep = self.dll.PSL_VHR_Snap_and_return()\n return rep\n\n def GetStatus(self):\n return self.dll.PSL_VHR_Get_snap_status()\n\n def AbortSnap(self):\n self.abort_flag = True\n self.state = 0\n return self.dll.PSL_VHR_abort_snap()\n\n def GetImagePointer(self):\n imp = self.dll.PSL_VHR_get_image_pointer()\n self.dll.PSL_VHR_transfer_to_safebufferC(imp)\n return self.dll.PSL_VHR_get_pointer_to_safebufferC()\n\n def GetRawImage(self):\n imp = self.GetImagePointer()\n (Nx,Ny) = self.GetSize()\n\n if self.gainmode in [0,1,2,3]:\n depth = 2\n elif self.gainmode == 4 :\n depth = 3\n else:\n depth = 1\n\n return ((Nx,Ny),imp[0:depth*Nx*Ny])\n\n def GetImage(self,imp=None):\n if imp==None:\n imp = self.GetImagePointer()\n\n (Nx,Ny) = self.GetSize()\n\n if self.gainmode in [0,1,2,3]:\n self.dll.PSL_VHR_apply_post_snap_processing(imp)\n depth = 2\n elif self.gainmode == 4 :\n (Nx,Ny),imp = self.Demangle(imp,Nx,Ny)\n depth = 2\n elif self.gainmode == 5 :\n self.dll.PSL_VHR_apply_post_snap_processing(imp)\n depth = 2\n elif self.gainmode in [6,7,8,9,10]:\n depth = 1\n\n\n if self.remapping and not self.GlobalRemap:\n (Nx,Ny),imp = self.Remap(imp,Nx,Ny)\n\n if self.SoftBin!=(1,1):\n Nx,Ny = self.SoftBinImage(imp,Nx,Ny)\n\n\n return ((Nx,Ny),imp[0:depth*Nx*Ny])\n\n #-------- CAMERA CORRECTION FUNCTIONS -------------\n\n def SoftBinImage(self, image_pointer, Nx, Ny):\n newX = C.c_int(Nx)\n newY = C.c_int(Ny)\n Sx,Sy = self.SoftBin\n if self.gainmode in [6,7,8,9,10]:\n self.dll.PSL_VHR_software_bin_8bit_image(image_pointer,C.byref(newX),C.byref(newY),Sx,Sy)\n else:\n self.dll.PSL_VHR_software_bin_image(image_pointer,C.byref(newX),C.byref(newY),Sx,Sy)\n\n Nx,Ny = newX.value,newY.value\n return (Nx,Ny)\n\n def OpenMap(self, file_name=\"distortion.map\"):\n return self.dll.PSL_VHR_open_map(file_name)\n\n def Remap(self, image_pointer, Nx, Ny):\n newX = C.c_int(Nx)\n newY = C.c_int(Ny)\n\n #if self.is2tap:\n # imp = self.dll.PSL_VHR_remap_double_image(image_pointer,C.byref(newX),C.byref(newY),self.smooth, self.clip)\n #else:\n imp = self.dll.PSL_VHR_remap_image(image_pointer,C.byref(newX),C.byref(newY),self.smooth, self.clip)\n\n\n return ((newX.value,newY.value),imp)\n\n def Demangle(self, image_pointer, Nx, Ny):\n newX = C.c_int(Nx)\n newY = C.c_int(Ny)\n imp = self.dll.PSL_VHR_demangle_rgb24_into_16bit_image(image_pointer,C.byref(newX),C.byref(newY))\n return ((newX.value,newY.value),imp)\n\n def EnableRemapping(self,enable):\n self.remapping = enable\n return True\n\n def EnableSmooth(self,enable):\n self.smooth = bool(enable)\n return True\n\n def EnableClip(self,enable):\n self.clip = bool(enable)\n return True\n\n def EnableOffset(self, enable):\n return self.dll.PSL_VHR_enable_offset_subtraction(enable)\n\n def EnableBrightPixel(self, enable):\n return self.dll.PSL_VHR_enable_bright_pixel_correction(enable)\n\n def EnableFlatField(self, enable):\n return self.dll.PSL_VHR_enable_flat_field_correction(enable)\n\n def MakeFlatField(self):\n try:\n if self.dll.PSL_VHR_generate_flat_field_image(self.FlatAverage):\n return True\n else:\n return False\n except:\n return False\n\n def SetFlatAverage(self,average_number):\n self.FlatAverage = average_number\n return True\n\n def EnableStreaming(self,enable):\n self.dll.PSL_VHR_enable_image_streaming(C.c_bool(enable))\n #print \"Streaming\",enable,self\n return True\n\n def InitSequence(self,imnum):\n self.SeqLen = imnum\n self.dll.PSL_VHR_initialise_sequence_storage(C.c_uint(self.SeqLen))\n\n def SnapSequence(self):\n self.dll.PSL_VHR_snap_sequence(C.c_uint(self.SeqLen))\n\n def GetSequencePointer(self,id):\n self.PSL_VHR_get_sequence_image_pointer(C.byref(self.safe),C.c_uint(id))\n return self.safe\n\n def FreeSequence(self):\n self.dll.PSL_VHR_free_sequence_storage()\n\n def SaveSequence(self):\n self.dll.PSL_VHR_save_sequence_as_multiple_flf_files(C.c_uint(self.SeqLen))\n\n def EnableSharpening(self, enable):\n self.dll.PSL_VHR_enable_sharpening(enable)\n return True\n\n def EnableAutoLevel(self, enable):\n self.dll.PSL_VHR_enable_ALC(enable)\n return True\n\n def SetALCMaxExp(self,maxexp):\n self.dll.PSL_VHR_set_ALC_max_exp(maxexp)\n return True\n\n def SetALCWin(self,l,t,r,b):\n self.dll.PSL_VHR_set_ALC_window_coords(l,t,r,b)\n return True\n\n def EnableBestFit(self, enable):\n self.dll.PSL_VHR_enable_bestfit(enable)\n return True\n\n def SetBFPeek(self,peek):\n self.dll.PSL_VHR_set_bestfit_peek(peek)\n return True\n\n def SetIFDelay(self,delay):\n self.dll.PSL_VHR_set_delay_between_images(delay)\n return True\n\n def EnableBinningFilter(self, enable):\n try:\n self.dll.PSL_VHR_enable_binning_filter(enable)\n return True\n except:\n return False\n\n def AutoBinningFilter(self, enable):\n try:\n self.dll.PSL_VHR_enable_auto_binning_filter(enable)\n return True\n except:\n return False\n\n def EnableGamma(self, enable):\n return self.dll.PSL_VHR_enable_gamma(enable)\n\n def SetGammaPeak(self,value):\n return self.dll.PSL_VHR_set_gamma_gain_bright(value)\n\n def SetGammaBright(self,value):\n return self.dll.PSL_VHR_set_gamma_gain_brightness(value)\n\n def SetFlickerMode(self,value):\n if value==\"Off\":\n return self.dll.PSL_VHR_set_flicker_mode(0)\n elif value==\"50MHz\":\n return self.dll.PSL_VHR_set_flicker_mode(1)\n elif value==\"60MHz\":\n return self.dll.PSL_VHR_set_flicker_mode(2)\n\n\n\n\nif __name__ == '__main__':\n\n\n from PIL import Image\n import numpy\n NUMPY_MODES = {\"L\":numpy.uint8, \"I;16\":numpy.uint16}\n\n def PILfromArray(newarr,mode='I;16'):\n return Image.fromarray(newarr,mode)\n\n def arrayFromBuffer(data, size, mode='I;16'):\n w,h = size\n return numpy.frombuffer(data,NUMPY_MODES[mode]).reshape((h,w))\n\n cam = GEVSCMOS(\"\", \"SCMOS\")\n\n cam.Open()\n\n #======= Init camera setup\n cam.SetClockSpeed('50MHz')\n cam.SetGainMode(\"gain1\")\n cam.SetTrigger(\"FreeRunning\")\n cam.EnableAutoLevel(0)\n cam.SetExposure(2,\"Millisec\")\n\n #======= Acquire Image\n cam.Snap()\n #cam.Snap()\n size,data = cam.GetImage()\n mode = cam.GetMode()\n\n #====== Buffer to Numpy array\n arry = arrayFromBuffer(data,size)\n arry8 = arry*(255/65535.)\n arry8 = arry8.astype(numpy.uint8)\n\n #======== Array to PIL Image\n pil16 = PILfromArray(arry,mode) #16bit image\n pil8 = PILfromArray(arry8,'L') #8bit image\n #save the images\n #pil16.save(\"test16.tiff\")\n pil8.show()\n pil8.save(\"test8.tiff\")\n\n cam.Close()\n","sub_path":"Battery_Testing_Software/labphew/controller/__future/photonicscience/scmoscam.py","file_name":"scmoscam.py","file_ext":"py","file_size_in_byte":27858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406966557","text":"import time\nimport random\n\nfrom datetime import timedelta\nfrom django.core import signing\nfrom django.utils import baseconv\n\n\nclass TimeFramedTimestampSigner(signing.TimestampSigner):\n\n def __init__(self, time_frame, uniform_distribution=True, **kwargs):\n \"\"\"\n :param time_frame: Duration in either `datetime.timedelta` object\n or int representing seconds.\n Within this duration, the signature will not change.\n Make sure this duration is less than the TTL/max_age\n of the signature.\n :param uniform_distribution: Boolean indicating if the signature's\n timestamp should be pseudo-randomly\n placed within the given time_frame.\n Enabling this will ensure that multiple signatures'\n rotation won't happen at the very same moment.\n Defaults to True\n \"\"\"\n if isinstance(time_frame, timedelta):\n self.time_frame_seconds = time_frame.total_seconds()\n\n elif isinstance(time_frame, int):\n self.time_frame_seconds = time_frame\n\n else:\n raise TypeError(\n \"time_frame must be either int(seconds) or datetime.timedelta\"\n )\n\n if self.time_frame_seconds < 0:\n raise ValueError(\"time_frame must be positive\")\n\n self.uniform_distribution = uniform_distribution\n self._uniform_distribution_salt = None\n\n super(TimeFramedTimestampSigner, self).__init__(**kwargs)\n\n def sign(self, value):\n self._uniform_distribution_salt = hash(value)\n return super(TimeFramedTimestampSigner, self).sign(value)\n\n def timestamp(self):\n original = int(time.time())\n\n # Start of time frame.\n timestamp = original - (original % self.time_frame_seconds)\n\n if self.uniform_distribution:\n # Make sure that for a given value,\n # the \"random\" delay is always the same.\n random.seed(self._uniform_distribution_salt)\n delay = random.uniform(0, self.time_frame_seconds)\n timestamp += delay\n\n return baseconv.base62.encode(int(timestamp))\n","sub_path":"django_cache_friendly_timestamp_signer/signer.py","file_name":"signer.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235450931","text":"import numpy as np\n\nfrom PIL import Image\nfrom math import log10\n\n\ndef _rgb2ycbcr(rgb):\n \"\"\"\n Convert from RGB to YCbCr using ITU-R BT.601 conversion scheme.\n Wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n Input:\n - rgb: RGB image, type could be either PIL.Image or numpy array. If numpy\n array, it must be in np.uint8 type.\n Output:\n - res: image that has been converted to YCrCb, with the same type as the\n input.\n \"\"\"\n if type(rgb) == Image.Image:\n # Remember that rgb is of PIL.Image type.\n is_image_type = True\n rgb = np.array(rgb)\n else:\n is_image_type = False\n\n if rgb.dtype != np.uint8:\n raise Exception('input must be in np.uint8 type')\n \n A = np.array([[65.481, 128.553, 24.966],\n [-37.797, -74.203, 112.0],\n [112.0, -93.786, -18.214]], dtype=np.float32) / 255.\n offset = np.array([16., 128., 128.], dtype=np.float32)\n \n res = rgb.dot(A.T) + offset\n\n if rgb.dtype == np.uint8:\n # If rgb is of PIL.Image type, its numpy array version is also np.uint8,\n # hence code will also enter here.\n res = res.astype(np.uint8)\n\n if is_image_type:\n res = Image.fromarray(res, mode='YCbCr')\n \n return res\n\n\ndef _ycbcr2rgb(ycbcr):\n \"\"\"\n Convert from YCbCr to RGB using ITU-R BT.601 conversion scheme.\n Wiki: https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n Input:\n - ycbcr: YCbCr image, type could be either PIL.Image or numpy array.\n If numpy array, it must be of type np.uint8.\n Output:\n - res: image that has been converted to RGB, with the same type as the\n input.\n \"\"\"\n if type(ycbcr) == Image.Image:\n # Remember that ycbcr is of PIL.Image type.\n is_image_type = True\n ycbcr = np.array(ycbcr)\n else:\n is_image_type = False\n\n if ycbcr.dtype != np.uint8:\n raise Exception('input must be in np.uint8 type')\n \n A = np.linalg.inv(\n np.array([[65.481, 128.553, 24.966],\n [-37.797, -74.203, 112.0],\n [112.0, -93.786, -18.214]], dtype=np.float64)) * 255.\n offset = np.array([16., 128., 128.], dtype=np.float64)\n offset = A.dot(offset)\n \n res = ycbcr.astype(np.float64).dot(A.T) - offset\n res = np.clip(res, 0, 255)\n res = res.astype(np.uint8)\n\n if is_image_type:\n res = Image.fromarray(res, mode='RGB')\n \n return res\n\n\ndef compute_psnr(pred, gt, ignore_border=0):\n \"\"\"Input must be np.uint8 array or RGB PIL Image.\n \"\"\"\n pred = pred.squeeze()\n gt = gt.squeeze()\n\n if pred.ndim == 3:\n pred = np.uint8(_rgb2ycbcr(pred))[:,:,0]\n if gt.ndim == 3:\n gt = np.uint8(_rgb2ycbcr(gt))[:,:,0]\n\n if ignore_border > 0:\n pad = ignore_border // 2\n width, height = gt.shape[1], gt.shape[0]\n pred = pred[pad:height-pad,pad:width-pad]\n gt = gt[pad:height-pad,pad:width-pad]\n\n pred = pred.astype(np.float64)\n gt = gt.astype(np.float64)\n \n diff = (pred - gt)**2\n mse = np.mean(diff)\n psnr = 10 * log10((255.0**2) / mse)\n \n return psnr","sub_path":"FSRCNN/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71653806","text":"import asyncio, time\nfrom pyartnet import ArtNetNode\nimport random\n\nWHITE = [255,255,255,255]\nWHITE_DIFF = []\nfor val in WHITE:\n WHITE_DIFF.append(val / 255)\n\nFADE_MOD = 0.87\n\n\ndef daytime_adjust(fades, day_mode):\n if day_mode:\n for i in range(len(fades)):\n fades[i] = int(((fades[i] / 255) * 196) + 59)\n return fades\n\n\nclass ControllerConfig:\n def __init__(self):\n # loop = asyncio.get_event_loop()\n # loop.set_debug(True)\n self.nodes = {}\n self.universes = {}\n self.loop = asyncio.get_event_loop()\n self.add_universe('master', 1, role='master', ip=None, log=True)\n self.slaves = []\n self.day_mode = True\n\n def add_universe(self, universe, fixtures, ip, role='slave', log=False):\n self.universes[universe] = {}\n self.universes[universe]['cmd_queue'] = asyncio.Queue()\n self.universes[universe]['role'] = role\n self.universes[universe]['spike'] = asyncio.Event()\n self.universes[universe]['spike'].set()\n self.universes[universe]['rand_spike'] = False\n self.universes[universe]['log'] = log\n if role == 'slave':\n if ip in self.nodes.keys():\n node = self.nodes[ip]\n else:\n node = ArtNetNode(ip, max_fps=120)\n node.start()\n self.universes[universe]['addresses'] = fixtures * 4\n self.universes[universe]['univ'] = node.add_universe(universe)\n self.universes[universe]['channel'] = \\\n self.universes[universe]['univ'].add_channel(1, self.universes[universe]['addresses'])\n\n def log(self, universe, msg):\n if self.universes[universe]['log']:\n print(\"%s: %s\" % (universe, msg))\n\n\n def listen(self):\n\n async def wait_on_fade(vals, fade_time, universe, slaves=None):\n start = time.time()\n if slaves is not None:\n val = max(vals)\n for slave in slaves:\n slave_queue = self.universes[slave]['cmd_queue']\n await slave_queue.put(['fade', val, fade_time])\n await asyncio.sleep((start + (fade_time / 1000)) - time.time())\n elif self.universes[universe]['spike'].is_set():\n vals = daytime_adjust(vals, self.day_mode)\n channel = self.universes[universe]['channel']\n channel.add_fade(vals, fade_time * FADE_MOD)\n await channel.wait_till_fade_complete()\n if self.universes[universe]['spike'].is_set():\n fade_took = time.time() - start\n difference = (fade_time / 1000) / fade_took\n if fade_time != 0:\n self.log(universe, \"->fade time = %s, took = %s, difference = %s \" %\n (fade_time/1000, fade_took, difference))\n else:\n while True:\n remaining = (fade_time/1000) - (time.time() - start)\n self.log(universe, \"fade finished %s seconds early\" % remaining)\n stop = time.time() + remaining\n await self.universes[universe]['spike'].wait()\n remaining = stop - time.time()\n if remaining > 0.1:\n self.log(universe, \"Restarting fade with %s seconds remaining\" % remaining)\n channel.add_fade(vals, (remaining * 1000) * FADE_MOD)\n await channel.wait_till_fade_complete()\n # if not self.universes[universe]['spike'].is_set():\n # self.log(universe, \"Fade Complete - difference %s\" % ((fade_time/1000) - (time.time() - start)))\n # break\n else:\n self.log(universe, \"Fade Complete - difference %s\" % ((fade_time/1000) - (time.time() - start)))\n break\n # await asyncio.sleep(remaining)\n else:\n vals = daytime_adjust(vals, self.day_mode)\n channel = self.universes[universe]['channel']\n remaining = (fade_time / 1000) - (time.time() - start)\n self.log(universe, \"fade finished %s seconds early\" % remaining)\n stop = time.time() + remaining\n await self.universes[universe]['spike'].wait()\n remaining = stop - time.time()\n if remaining > 0.1:\n self.log(universe, \"Restarting fade with %s seconds remaining\" % remaining)\n channel.add_fade(vals, (remaining * 1000) * FADE_MOD)\n await channel.wait_till_fade_complete()\n\n async def fade(universe, channel, addresses, fade_to, fade_time):\n vals = list(channel.get_channel_values())\n for i in range(1, addresses, 4):\n for c in range(0,4):\n vals[i + c - 1] = int(fade_to * WHITE_DIFF[c])\n await wait_on_fade(vals, fade_time, universe)\n\n async def pulse(universe, channel, addresses, cmd_queue, role,\n num_fixtures, rand,fade_top, fade_bottom, fade_time, hold_time):\n num_fixtures = float(num_fixtures)\n selected_fixtures = []\n if num_fixtures != 1.0:\n for i in range(0, addresses, 4):\n selected_fixtures.append(i)\n selected_fixtures = random.sample(selected_fixtures, int(len(selected_fixtures) * num_fixtures))\n self.log(universe, \"channels selected = %s\" % selected_fixtures)\n up = True\n while cmd_queue.empty():\n if up is True:\n if type(fade_top) == list:\n target = random.randrange(fade_top[0], fade_top[1])\n self.log(universe, \"target = %s\" % target)\n else:\n target = fade_top\n if rand is True and num_fixtures != 1.0:\n selected_fixtures = []\n for i in range(3, addresses, 4):\n selected_fixtures.append(i)\n selected_fixtures = random.sample(selected_fixtures,\n int(len(selected_fixtures) * num_fixtures))\n else:\n if type(fade_bottom) == list:\n target = random.randrange(fade_bottom[0], fade_bottom[1])\n else:\n target = fade_bottom\n if type(fade_time) == list:\n new_fade_time = random.uniform(fade_time[0], fade_time[1]) * 1000\n else:\n new_fade_time = fade_time\n if role == 'slave':\n vals = list(channel.get_channel_values())\n for i in range(1, addresses, 4):\n if num_fixtures == 1.0 or i in selected_fixtures:\n for c in range(0, 4):\n vals[i + c - 1] = int(target * WHITE_DIFF[c])\n await wait_on_fade(vals, new_fade_time, universe)\n else:\n vals = [target]\n await wait_on_fade(vals, new_fade_time, universe, slaves=self.slaves)\n if cmd_queue.empty():\n await asyncio.sleep(hold_time / 1000)\n up = not up\n\n async def static_pulse(universe, channel, addresses, cmd_queue, role, static):\n static_vals_all = [\n [\n [0.055, 234], [0.055, 210], [0.055, 234], [0.055, 204], [0.02, 224],\n [0.055, 197], [0.055, 221], [0.055, 193], [0.055, 210], [0.055, 199]\n ],\n [\n [0.055, 224], [0.055, 160], [0.055, 221], [0.055, 193], [0.02, 210],\n [0.055, 151], [0.055, 234], [0.055, 210], [0.055, 234], [0.055, 197]\n ],\n [\n [0.055, 210], [0.055, 118], [0.055, 234], [0.055, 168], [0.02, 234],\n [0.055, 146], [0.055, 224], [0.055, 174], [0.055, 220], [0.055, 109],\n [0.055, 193], [0.055, 86], [0.055, 190], [0.055, 135], [0.055, 220], [0.055, 105]\n ],\n [\n [0.055, 234], [0.2, 145], [0.2, 221], [0.2, 149], [0.3, 233],\n [0.2, 115], [0.3, 224], [0.2, 152], [0.055, 221], [0.055, 156],\n [0.2, 234], [0.3, 103], [0.1255, 193],\n ],\n [\n [0.055, 223], [0.2, 167], [0.3, 248], [0.55, 138], [0.2, 234],\n [0.5, 110], [0.3, 245], [0.3, 129], [0.3, 226], [0.2, 145],\n [0.3, 245], [0.055, 137], [0.3, 229], [0.5, 166],\n ],\n ]\n static_vals = static_vals_all[static]\n for i in range(len(static_vals)):\n static_vals[i][0] *= 1000\n while cmd_queue.empty():\n for next_val in static_vals:\n if role == 'slave':\n vals = list(channel.get_channel_values())\n for i in range(1, addresses, 4):\n for c in range(0, 4):\n vals[i + c - 1] = int(next_val[1] * WHITE_DIFF[c])\n if not cmd_queue.empty():\n break\n else:\n await wait_on_fade(vals, next_val[0], universe)\n else:\n if not cmd_queue.empty():\n break\n else:\n vals = [next_val[1]]\n await wait_on_fade(vals, next_val[0], universe, slaves=self.slaves)\n\n async def chase(universe, channel, addresses, cmd_queue,\n fade_to, fade_bottom, fade_time, hold_time, width):\n increment = int((fade_to - fade_bottom) / width)\n steps = []\n for i in range(width):\n next_step = fade_to - (increment * i)\n if next_step > 255:\n next_step = 255\n steps.append(next_step)\n pos = 3\n while cmd_queue.empty():\n vals = list(channel.get_channel_values())\n for i in range(width):\n if pos + (4 * i) <= addresses:\n vals[pos + (4 * i)] = steps[i]\n await wait_on_fade(vals, fade_time, universe)\n if cmd_queue.empty():\n time.sleep(hold_time / 1000)\n vals[pos] = fade_bottom\n if cmd_queue.empty():\n await wait_on_fade(vals, fade_time, universe)\n pos += 4\n if pos > addresses:\n pos = 3\n\n async def flicker(universe, channel, addresses, cmd_queue,\n fade_top, fade_bottom, fade_time, steps):\n if steps > addresses / 4:\n steps = addresses / 4\n increment = int((fade_top - fade_bottom) / steps)\n vals = list(channel.get_channel_values())\n while cmd_queue.empty():\n for i in range(steps):\n selected_fixtures = []\n for f in range(1, addresses, 4):\n selected_fixtures.append(f)\n selected_fixtures = random.sample(selected_fixtures, int(len(selected_fixtures) / steps))\n for f in selected_fixtures:\n for c in range(0, 4):\n vals[f + c - 1] = int((increment * i) * WHITE_DIFF[c])\n await wait_on_fade(vals, fade_time, universe)\n\n async def single_listen(universe):\n self.log(universe, \" ->STARTING LISTENER\")\n cmd_queue = self.universes[universe]['cmd_queue']\n addresses = self.universes[universe]['addresses']\n channel = self.universes[universe]['channel']\n role = self.universes[universe]['role']\n while True:\n new_cmd = await cmd_queue.get()\n self.log(universe, \" ->cmd = %s\" % new_cmd)\n # Quit\n if new_cmd[0] == 'quit':\n break\n # Blackout\n if new_cmd[0] == 'blackout':\n vals = [0] * addresses\n await wait_on_fade(vals, 0, universe)\n # Hold\n if new_cmd[0] == 'hold':\n self.universes[universe]['channel'].cancel_fades()\n # Fade\n elif new_cmd[0] == 'fade':\n fade_to = new_cmd[1]\n fade_time = new_cmd[2]\n await fade(universe, channel, addresses, fade_to, fade_time)\n # Pulse\n elif new_cmd[0] == 'pulse':\n fade_top = new_cmd[1]\n fade_bottom = new_cmd[2]\n fade_time = new_cmd[3]\n hold_time = new_cmd[4]\n num_fixtures = new_cmd[5]\n rand = new_cmd[6]\n await pulse(universe, channel, addresses, cmd_queue, role,\n num_fixtures, rand,fade_top, fade_bottom, fade_time, hold_time)\n ## Chase\n elif new_cmd[0] == 'chase':\n fade_to = new_cmd[1]\n fade_bottom = new_cmd[2]\n channel.add_fade([fade_bottom] * addresses, 0)\n await channel.wait_till_fade_complete()\n fade_time = new_cmd[3]\n hold_time = new_cmd[4]\n width = new_cmd[5]\n await chase(universe, channel, addresses, cmd_queue,\n fade_to, fade_bottom, fade_time, hold_time, width)\n\n ## Flicker\n elif new_cmd[0] == 'flicker':\n fade_top = new_cmd[1]\n fade_bottom = new_cmd[2]\n channel.add_fade([fade_bottom] * addresses, 0)\n await channel.wait_till_fade_complete()\n fade_time = new_cmd[3]\n steps = new_cmd[4]\n await flicker(universe, channel, addresses, cmd_queue,\n fade_top, fade_bottom, fade_time, steps)\n\n async def master_listen():\n universe = 'master'\n self.log(universe, \" ->STARTING MASTER\")\n cmd_queue = self.universes[universe]['cmd_queue']\n role = self.universes[universe]['role']\n while True:\n new_cmd = await cmd_queue.get()\n self.log(universe, \" ->cmd = %s\" % new_cmd)\n # Quit\n if new_cmd[0] == 'quit':\n break\n # Blackout\n if new_cmd[0] == 'blackout':\n for slave in self.slaves:\n slave_queue = self.universes[slave]['cmd_queue']\n await slave_queue.put(['blackout'])\n # Hold\n if new_cmd[0] == 'hold':\n for slave in self.slaves:\n slave_queue = self.universes[slave]['cmd_queue']\n await slave_queue.put(['hold'])\n # Fade\n elif new_cmd[0] == 'fade':\n fade_to = new_cmd[1]\n fade_time = new_cmd[2]\n await wait_on_fade([fade_to], fade_time, universe, self.slaves)\n # static\n elif new_cmd[0] == 'static':\n static = new_cmd[1]\n await static_pulse(universe, None, None, cmd_queue, role, static)\n # Pulse\n elif new_cmd[0] == 'pulse':\n fade_top = new_cmd[1]\n fade_bottom = new_cmd[2]\n fade_time = new_cmd[3]\n hold_time = new_cmd[4]\n num_fixtures = 1.0\n rand = False\n await pulse(universe, None, None, cmd_queue, role,\n num_fixtures, rand, fade_top, fade_bottom, fade_time, hold_time)\n\n async def main():\n all_listeners = [master_listen()]\n # all_listeners = []\n for universe in self.universes:\n if universe != 'master':\n all_listeners.append(single_listen(universe))\n await asyncio.gather(*all_listeners)\n\n self.loop.run_until_complete(main())\n\n def spike(self, universe, fade_to, fade_time, repeat=1):\n if type(fade_time) != list:\n fade_time *= 1000\n\n async def spike(u):\n self.log(u, \"starting spike\")\n self.universes[u]['spike'].clear()\n for n in range(repeat):\n self.universes[u]['channel'].cancel_fades()\n channel = self.universes[u]['channel']\n addresses = self.universes[u]['addresses']\n if type(fade_to) == list:\n new_fade_to = random.randint(fade_to[0], fade_to[1])\n else:\n new_fade_to = fade_to\n vals = list(channel.get_channel_values())\n base = max(vals)\n for i in range(1, addresses, 4):\n for c in range(0, 4):\n vals[i + c - 1] = int(new_fade_to * WHITE_DIFF[c])\n start = time.time()\n channel.add_fade(vals, (fade_time / 2) * FADE_MOD)\n await channel.wait_till_fade_complete()\n for i in range(0, len(vals)):\n vals[i] = base\n channel.add_fade(vals, (fade_time / 2) * FADE_MOD)\n await channel.wait_till_fade_complete()\n fade_took = time.time() - start\n self.log(u, \"ending spike - took = %s, difference = %s\" % (fade_took, (fade_time / 1000) - fade_took))\n self.universes[u]['spike'].set()\n\n if universe != 'master':\n self.loop.create_task(spike(universe))\n else:\n self.log(universe, \"starting spike\")\n for slave in self.slaves:\n self.loop.create_task(spike(slave))\n\n def start_rand_spike(self, universe, fade_top, fade_time, freq_min, freq_max, repeat=1):\n fade_time *= 1000\n\n async def rand_spike():\n channel = self.universes[universe]['channel']\n new_fade_top = fade_top\n new_repeat = repeat\n while self.universes[universe]['rand_spike']:\n new_wait = random.uniform(freq_min, freq_max)\n await asyncio.sleep(new_wait)\n if self.universes[universe]['rand_spike']:\n addresses = self.universes[universe]['addresses']\n old_vals = list(channel.get_channel_values())\n if type(repeat) == list:\n new_repeat = random.randint(repeat[0], repeat[1])\n for i in range(new_repeat):\n if type(fade_top) == list:\n new_fade_top = random.randrange(fade_top[0], fade_top[1])\n vals = [0] * len(old_vals)\n for i in range(1, addresses, 4):\n for c in range(0, 4):\n cur_top = old_vals[i + c - 1]\n vals[i + c - 1] = cur_top + int(new_fade_top * WHITE_DIFF[c])\n channel.cancel_fades()\n self.log(universe, \"starting spike\")\n start = time.time()\n self.universes[universe]['spike'].clear()\n channel.add_fade(vals, (fade_time / 2) * FADE_MOD)\n await channel.wait_till_fade_complete()\n channel.add_fade(vals, (fade_time / 2) * FADE_MOD)\n await channel.wait_till_fade_complete()\n self.log(universe, \"ending spike, took %s seconds\" % (time.time() - start))\n if i < new_repeat - 1:\n await asyncio.sleep(0.25)\n self.universes[universe]['spike'].set()\n\n if universe != 'master':\n self.universes[universe]['rand_spike'] = True\n self.loop.create_task(rand_spike())\n else:\n self.log(universe, \"Master cannot have random spike\")\n\n def stop_rand_spike(self, universe):\n self.universes[universe]['rand_spike'] = False\n\n def set_slaves(self, slaves):\n if type(slaves) is not list:\n slaves = [slaves]\n self.slaves = slaves\n\n def blackout(self, universe):\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(['blackout'])\n\n self.loop.create_task(cmd(universe))\n\n def quit(self, universe):\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(['quit'])\n\n self.loop.create_task(cmd(universe))\n\n def hold(self, universe):\n async def cmd(u):\n while not self.universes[u]['cmd_queue'].empty():\n self.universes[u]['cmd_queue'].get_nowait()\n print(\"Emptying queue\")\n await self.universes[u]['cmd_queue'].put(['hold'])\n\n self.loop.create_task(cmd(universe))\n\n def fade(self, universe, fade_to, fade_time, sync=False):\n fade_time *= 1000\n\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(['fade', fade_to, fade_time, sync])\n self.log(universe, \"adding fade to universe %s\" % universe)\n\n self.loop.create_task(cmd(universe))\n\n def static(self, universe, static):\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(['static', static])\n self.log(universe, \"adding static %s to universe %s\" % (universe, static))\n\n self.loop.create_task(cmd(universe))\n\n def pulse(self, universe, fade_top, fade_bottom, fade_time, hold_time, fixtures=1.0, rand=False):\n if type(fade_time) != list:\n fade_time *= 1000\n if type(hold_time) != list:\n hold_time *= 1000\n\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(\n ['pulse', fade_top, fade_bottom, fade_time, hold_time, fixtures, rand])\n\n self.loop.create_task(cmd(universe))\n\n def chase(self, universe, fade_top, fade_bottom, fade_time, hold_time, width=1):\n if type(fade_time) != list:\n fade_time *= 1000\n if type(hold_time) != list:\n hold_time *= 1000\n\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(['chase', fade_top, fade_bottom, fade_time, hold_time, width])\n\n self.loop.create_task(cmd(universe))\n\n def flicker(self, universe, fade_top, fade_bottom, fade_time, steps):\n if type(fade_time) != list:\n fade_time *= 1000\n\n async def cmd(u):\n await self.universes[u]['cmd_queue'].put(['flicker', fade_top, fade_bottom, fade_time, steps])\n\n self.loop.create_task(cmd(universe))","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":23431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383590738","text":"#!/usr/bin/python3\nimport sys\nimport logging\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtCore\nimport models\n\nclass IngredientSelect(QtWidgets.QDockWidget):\n \n def __init__(self, models):\n \n # TODO why does super() not work?\n QtWidgets.QDockWidget.__init__(self)\n \n # load ui\n uic.loadUi(\"uiDockIngredientSelect.ui\", self)\n\n # proxy\n proxy = QtCore.QSortFilterProxyModel()\n proxy.setSourceModel(models.ingredient)\n proxy.setDynamicSortFilter(True)\n proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)\n proxy.setFilterKeyColumn(models.ingredientNameIdx)\n \n # set view model and connect\n self.uiListView.setModel(proxy)\n self.uiListView.setModelColumn(models.ingredientNameIdx)\n\n #proxy.setFilterRegExp.connect\n\n self.uiFilter.textChanged.connect(proxy.setFilterRegExp)\n # enable sorting\n #self.uiTree.setSortingEnabled(True)\n \n @QtCore.pyqtSlot('QString') \n def on_uiFilter_textChanged(self, value):\n print(value) \n\nclass RecipeSelection(QtWidgets.QDockWidget):\n \n def __init__(self, models):\n \n # TODO why does super() not work?\n QtWidgets.QDockWidget.__init__(self)\n \n # load ui\n uic.loadUi(\"uiDockRecipeSelect.ui\", self)\n \n # models\n model = models.recipeselect\n self.uiTreeView.setModel(model)\n \n \nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n \n # TODO why does super() not work?\n QtWidgets.QMainWindow.__init__(self)\n\n # create database, has to be after application start\n myModels = models.Models(database = 'run.db')\n # TODO RESET \n #myModels._initialize_database('schema.sql')\n # TODO RESET \n #myModels._initialize_database('backup.sql')\n \n # load ui\n uic.loadUi(\"uiMain.ui\", self)\n\n # load dock widgets\n self.uiDockRecipeSelect = RecipeSelection(myModels)\n self.uiDockIngredientSelect = IngredientSelect(myModels)\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.uiDockIngredientSelect)\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.uiDockRecipeSelect)\n self.uiMenuView.addAction(self.uiDockRecipeSelect.toggleViewAction())\n self.uiMenuView.addAction(self.uiDockIngredientSelect.toggleViewAction())\n\n self.models = myModels\n\n def close(self):\n \n del self.models\n\ndef main():\n \n # create logger\n logger = logging.getLogger('main')\n logger.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n logger.addHandler(ch)\n\n # 'application' code\n logger.info('start application')\n\n # create application\n app = QtWidgets.QApplication(sys.argv)\n \n # load main window\n win = MainWindow()\n win.show()\n \n # wait for application exit\n exitCode = app.exec_() \n \n # clean up, fore to close model and database\n win.close()\n \n # exit\n sys.exit(exitCode)\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139161590","text":"import os\nimport zipfile\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom typing import Tuple\nfrom uuid import uuid4\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.http import Http404\nfrom slugify import slugify\n\nfrom config import settings\nfrom config.settings import EXTERNAL_TOKEN_VALIDATION_URL\nfrom django.urls import reverse # Used to generate URLs by reversing the URL patterns\n\nfrom core.utils import send_mail, get_unique_filename, send_notification_for_client\nfrom core import utils\nfrom config.settings import logger\n\n\nclass Applications(models.Model):\n \"\"\"\n Модель заявок на пропуска, может обрабатывать комментарии менеджера.\n \"\"\"\n\n class Zones:\n MCAD = 'm'\n TTK = 't'\n SK = 's'\n chooses = ((MCAD, 'МКАД'), (TTK, 'ТТК'), (SK, 'СК'),)\n\n #\n owner = models.CharField(max_length=500, verbose_name='Собственник')\n car_number = models.CharField(max_length=12, verbose_name='Номер машины')\n zone = models.CharField(max_length=1, choices=Zones.chooses, verbose_name='Зона')\n client = models.ForeignKey('Clients', on_delete=models.CASCADE, verbose_name='Клиент')\n\n # Блок администрирования\n date_push_onetime = models.DateField(verbose_name='Дата подачи на разовый', blank=True, null=True)\n date_push_year = models.DateField(verbose_name='Дата подачи на годовой', blank=True, null=True)\n date_get_year = models.DateField(verbose_name='Дата выдачи годового', blank=True, null=True)\n comment_admin = models.TextField(verbose_name='Комментарий', blank=True, null=True)\n is_passed = models.BooleanField(default=False, verbose_name='Пропуск выдан')\n notify_client = models.BooleanField(default=False, verbose_name='Уведомить клиента')\n\n # Зявка пользователя\n sts = models.FileField(upload_to=get_unique_filename, verbose_name='СТС', blank=True, null=True)\n pts = models.FileField(upload_to=get_unique_filename, verbose_name='ПТС', blank=True, null=True)\n dk = models.FileField(upload_to=get_unique_filename, verbose_name='ДК', blank=True, null=True)\n vu = models.FileField(upload_to=get_unique_filename, verbose_name='ВУ', blank=True, null=True)\n owner_passport = models.FileField(\n upload_to=get_unique_filename, verbose_name='Паспорт собственника ТС',\n help_text='Не обязательное поле', blank=True, null=True)\n\n lsnnl = models.FileField(\n upload_to=get_unique_filename, verbose_name='Лизинг',\n help_text='Не обязательное поле', blank=True, null=True\n )\n requisites = models.FileField(\n upload_to=get_unique_filename, verbose_name='Реквизиты',\n help_text='Не обязательное поле', blank=True, null=True\n )\n additional_file = models.FileField(\n upload_to=get_unique_filename, verbose_name='Дополнительный файл',\n help_text='Не обязательное поле', blank=True, null=True\n )\n\n comment_from_user = models.TextField(verbose_name='Комментарий от пользователя', blank=True, null=True)\n\n def is_complete(self):\n if self.sts and self.pts and self.dk and self.vu:\n return True\n\n is_complete.boolean = True\n is_complete.short_description = 'Заполнено'\n\n def __str__(self):\n return self.owner\n\n def get_zone(self):\n \"\"\"\n Возвращает строковое представление выбранной зоны\n \"\"\"\n for zone_tuple in self.Zones.chooses:\n if zone_tuple[0] == self.zone:\n return zone_tuple[1]\n\n def get_absolute_url(self):\n \"\"\"\n Returns the url to access a particular author instance.\n \"\"\"\n return reverse('passes-renew', args=[str(self.id)])\n\n @logger.catch()\n def __get_documents(self) -> list:\n\n def get_named_tuple(obj):\n \"\"\" Генерирует именнованный кортеж\n return: (name:str, path:str)\n \"\"\"\n _tuple = namedtuple(\n typename=f'f_{uuid4().__str__()[:5]}',\n field_names=['name', 'path'])\n\n name = \"{filename}{suffix}\".format(\n filename=obj.field.name,\n suffix=Path(obj.name).suffix,\n )\n logger.debug(name)\n return _tuple(name=name, path=obj.path)\n\n documents = []\n if self.sts:\n documents.append(get_named_tuple(self.sts))\n if self.pts:\n documents.append(get_named_tuple(self.pts))\n if self.dk:\n documents.append(get_named_tuple(self.dk))\n if self.vu:\n documents.append(get_named_tuple(self.vu))\n if self.owner_passport:\n documents.append(get_named_tuple(self.owner_passport))\n if self.lsnnl:\n documents.append(get_named_tuple(self.lsnnl))\n if self.requisites:\n documents.append(get_named_tuple(self.requisites))\n if self.additional_file:\n documents.append(get_named_tuple(self.additional_file))\n\n logger.debug(documents)\n\n return documents\n\n def get_zip_url(self):\n return reverse('get-zip', args=[str(self.id)])\n\n def get_zip(self) -> namedtuple:\n zip_path = self.__make_zip()\n if zip_path.exists():\n with open(zip_path, 'rb') as _zip:\n return namedtuple(\n 'name', ['name', 'bytes'])(\n name=zip_path.name,\n bytes=_zip.read()\n )\n\n raise Http404\n\n @logger.catch()\n def __make_zip(self):\n \"\"\" Создает архив из файлов заявки\n return: Path - absolute system path to zip file\n \"\"\"\n zip_dir = settings.MEDIA_ROOT / 'zip'\n if zip_dir.exists():\n os.system(f'rm -rf {zip_dir}')\n os.system(f'mkdir {zip_dir}')\n else:\n os.system(f'mkdir {zip_dir}')\n\n zip_name = '{name}_{car}_{zone}.zip'.format(\n name=slugify(self.owner),\n car=slugify(self.car_number),\n zone=slugify(self.get_zone()),\n )\n\n zip_path = zip_dir / zip_name\n with zipfile.ZipFile(zip_path, 'w') as _zip:\n for document in self.__get_documents():\n _zip.write(document.path, document.name)\n\n return zip_path\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n if self.notify_client:\n # Уведомление для клиента\n send_notification_for_client(self)\n self.notify_client = False\n\n super(Applications, self).save()\n\n class Meta:\n verbose_name = 'Заявка'\n verbose_name_plural = 'Заявки'\n ordering = ['-comment_admin', '-is_passed', '-pk']\n\n\nclass Clients(models.Model):\n \"\"\"\n Менеджер создает токен, из которого в change_form.html генеририруется ссылка\n Ссылка отправляется на почту клиенту автоматически если она была указана при создании\n После завершения регистрации is_registered присваевается True\n \"\"\"\n token = models.CharField(max_length=36, verbose_name='Токен', default=uuid4)\n email = models.EmailField(blank=True, null=True, verbose_name='Почта', unique=True,\n help_text='После сохранения, на эту почту будет отправлена ссылка для регистрации')\n\n created = models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True, editable=False)\n name = models.CharField(max_length=130, blank=True, null=True, verbose_name='Наименование')\n is_registered = models.BooleanField(default=False, verbose_name='Зарегистрирован', editable=False)\n is_email_sent = models.BooleanField(default=False, verbose_name='Сообщение отправлено', editable=False)\n\n def __str__(self):\n \"\"\"\n Возвращает форматированную дату либо email / user_firstname\n \"\"\"\n if self.user:\n return self.user.first_name\n elif self.email:\n return self.email\n else:\n return str(self.created.strftime('%d %h %Y %H:%M'))\n\n def save(self, *args, **kwargs):\n \"\"\"\n Send email to clients after save object\n \"\"\"\n if self.email and not self.is_registered and not self.is_email_sent:\n token_validation_url = f'{EXTERNAL_TOKEN_VALIDATION_URL}?token={self.token}&email={self.email}'\n message = f'Ссылка для регистрации:\\n{token_validation_url}'\n if settings.DEBUG:\n print(message)\n else:\n send_mail('Регистрация на портале agro-meridian', message, self.email)\n self.is_email_sent = True\n super().save(*args, **kwargs)\n\n def delete_own_user(self):\n try:\n User.objects.get(email=self.email).delete()\n except User.DoesNotExist:\n print(\"ОШИБКА УДАЛЕНИЯ СВЯЗАННОГО ПОЛЬЗОВАТЕЛЯ\")\n\n def delete(self, *args, **kwargs):\n self.delete_own_user()\n super().delete(*args, **kwargs)\n\n class Meta:\n verbose_name = 'Клиент'\n verbose_name_plural = 'Клиенты'\n","sub_path":"apps/passes_manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"171376554","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom rankMethods import *\nimport pandas as pd\nimport os.path\nimport matplotlib.pyplot as pplot\n#from sklearn.metrics import roc_auc_score\n\n\nplotFile = os.path.realpath('../jhu_data/smallPlotData.csv')\ninterFile = os.path.realpath('../jhu_data/interventions.csv')\n\n## get data\nX = pd.read_csv(plotFile, sep=',', \n header=0, index_col=None)\nY = np.array(X['score'])\npatientIDs = np.unique(np.array(X['ID']))\nXtest = np.array(X.drop(['score','time','ID'],axis=1))\n\ninterIDs = pd.read_csv(interFile, sep=',', header=0, index_col=None)\n\n## run\n#[w_sol, functionCost] = rankTrain(Xtrain, Ytrain, 1, 10.**-5, .5, margin='hard')\nscoresReg = rankTest(Xtest, np.load('sol_1.npy'), 1)\n#relevanceReg = Ytest[np.argsort(-scoresReg)]\n\nscoresLin = rankTest(Xtest, np.load('sol_4.npy'), 4)\n#relevanceSoft = Ytest[np.argsort(-scoresLin)]\n\nscoresPois = rankTest(Xtest, np.load('sol_6.npy'), 6)\n#relevanceReg = Ytest[np.argsort(-scoresPois)]\n\nif True: # rescale\n scoresReg = (scoresReg-min(scoresReg))/(max(scoresReg)-min(scoresReg))\n scoresLin = (scoresLin-min(scoresLin))/(max(scoresLin)-min(scoresLin))\n scoresPois = (scoresPois-min(scoresPois))/(max(scoresPois)-min(scoresPois))\n\n\niids = np.unique(interIDs['ID'].tolist())\n\n\n## pick a patient\ncounter = 0\n\n\npid = iids[counter]\npInds = np.where(X['ID']==pid)[0]\npreg = scoresReg[pInds]\nplin = scoresLin[pInds]\nppois = scoresPois[pInds]\nptrue = Y[pInds]\ntime = X['time'].iloc[pInds]\ntime = time / 60.\n\npToUse = plin\nsToUse = scoresLin\n#pToUse = ppois\n#sToUse = scoresPois\n\nfig, ax = pplot.subplots()\n#dist = max(pToUse) - min(pToUse)\n#bottom = min(pToUse) - dist*.05 #min(y)\n#top = max(pToUse) + dist*.05 #max(y)\nbottom = min(sToUse)\ntop = max(sToUse)\nax.set_ylim(bottom=bottom,top=top)\nax.plot(time, pToUse, linestyle='-',color='black',marker='s')\n#ax.plot(treatmentInds,[0.]*len(treatmentInds),marker='o',color='purple')\nax.set_xlabel('hours of stay')\nax.set_ylabel('scaled MR score')\nax.set_title(\"Patient's score over time\")#, Example \"+str(counter+1))\nxex = np.array(list(([xx-.1,xx+.1] for xx in time))).flatten()\nsafe = np.array(list(([xx,xx] for xx in ptrue==0))).flatten()\nsirs = np.array(list(([xx,xx] for xx in ptrue==1))).flatten()\nsevere = np.array(list(([xx,xx] for xx in ptrue==2))).flatten()\nshock = np.array(list(([xx,xx] for xx in ptrue==3))).flatten()\nax.fill_between(np.array(xex),bottom, top, where=safe,\n facecolor='green', alpha=0.2)\nax.fill_between(np.array(xex), bottom, top, where=sirs,\n facecolor='yellow', alpha=0.2)\nax.fill_between(np.array(xex),bottom, top, where=severe,\n facecolor='red', alpha=0.1)\nax.fill_between(np.array(xex), bottom, top, where=shock,\n facecolor='red', alpha=0.4)\n\ninterventions = np.array(interIDs['time'][interIDs['ID']==pid].tolist()) / 60.\nax.vlines(interventions,bottom+(top-bottom)*.05,top-(top-bottom)*.05, \n colors='k', linestyles='dashed')\n\ncounter += 1","sub_path":"plotjhu.py","file_name":"plotjhu.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454610031","text":"import dash\nfrom dash.dependencies import Input, Output, State\nimport flask, os\nfrom sklearn.externals import joblib\nimport numpy as np\nimport dash_html_components as html\nimport dash_core_components as core\nimport plotly.graph_objs as go\n\n\nfrom view import View\n\napp = dash.Dash(__name__, external_stylesheets=['http://localhost:8050/css/app.css'])\nView.render_view(app)\n\n\n@app.callback(\n Output(component_id='result', component_property='children'),\n [Input('submit', 'n_clicks')],\n [State('age', 'value'),\n State('gender', 'value'),\n State('chest-pain-type', 'value'),\n State('blood-pressure', 'value'),\n State('cholesterol', 'value'),\n State('blood-sugar', 'value'),\n State('ecg', 'value'),\n State('heart-rate', 'value'),\n State('exercise-induced-angina', 'value'),\n State('st-depression', 'value')]\n)\ndef result(n_clicks, age, gender, chest_pain, blood_p, colestrol, blood_s, ecg, heart_rate, induced_a, st_dep):\n if n_clicks > 0:\n instance = np.array(\n [int(age), int(gender), int(chest_pain), int(blood_p), int(colestrol), int(blood_s), int(ecg), int(heart_rate), int(induced_a), int(st_dep)])\n reshape = instance.reshape(1, -1)\n print(reshape)\n predict_result = predict(reshape)\n print(predict_result)\n\n if predict_result == [0]:\n message = \"You don't have a heart disease. Enjoy your day!\"\n elif predict_result == [1]:\n message = \"You have a heart disease stage 1.\"\n elif predict_result == [2]:\n message = \"You have a heart disease stage 2.\"\n elif predict_result == [3]:\n message = \"You have a heart disease stage 3.\"\n elif predict_result == [4]:\n message = \"you have a heart disease stage 4.\"\n else:\n print(predict_result)\n return \"Error while predicting. Try again.\"\n\n return [html.H2(children=message, className='result-text'), core.Graph(\n figure=go.Figure(\n data=[\n go.Pie(\n values=predict_proba(reshape)[0],\n labels=['Stage 0', 'Stage 1', 'Stage 2', 'Stage 3', 'Stage 4']\n )\n ],\n layout=go.Layout(\n title='Heart disease stage probability',\n showlegend=True\n )\n )\n )]\n else:\n n_clicks += 1\n # Load the model and return output..\n\n\ndef predict(instance):\n filename_model = \"heart_disease_model.mdl\"\n model = joblib.load(filename_model)\n return model.predict(instance)\n\n\ndef predict_proba(instance):\n filename_model = \"heart_disease_model.mdl\"\n model = joblib.load(filename_model)\n print(model.predict_proba(instance))\n return model.predict_proba(instance)\n\n\ncss_directory = '{0}/css/'.format(os.getcwd())\n\n\n@app.server.route('/css/')\ndef serve_css(stylesheet):\n print(stylesheet)\n print(css_directory)\n return flask.send_from_directory(css_directory, stylesheet)\n\n\nres_directory = '{0}/res/'.format(os.getcwd())\n\n\n@app.server.route('/res/')\ndef serve_res(resource):\n return flask.send_from_directory(res_directory, resource)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519541238","text":"from __future__ import division\nimport numpy as np\n\nfrom scipy.stats import gaussian_kde\nimport json\n\nwith open('pose.json', 'r') as f:\n obj = json.load(f)\n\n\n\ndef kde(x, x_grid, bandwidth=0.2, **kwargs):\n \"\"\"Kernel Density Estimation with Scipy\"\"\"\n kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)\n return kde.evaluate(x_grid)\n\n\ndef generate_rand_from_pdf(pdf, x_grid, count=1):\n cdf = np.cumsum(pdf)\n cdf = cdf / cdf[-1]\n values = np.random.rand(count)\n value_bins = np.searchsorted(cdf, values)\n random_from_cdf = x_grid[value_bins]\n return random_from_cdf\n\ndef rand(cat, coord, count):\n # coord = azimuth or elevation\n data = np.array(obj[cat][coord])\n x_grid = np.linspace(min(data), max(data), 1000)\n kdepdf = kde(data, x_grid, bandwidth=0.1)\n return generate_rand_from_pdf(kdepdf, x_grid, count)\n\n","sub_path":"kde.py","file_name":"kde.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433341804","text":"\n\nfrom xai.brain.wordbase.nouns._header import _HEADER\n\n#calss header\nclass _HEADERS(_HEADER, ):\n\tdef __init__(self,): \n\t\t_HEADER.__init__(self)\n\t\tself.name = \"HEADERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"header\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_headers.py","file_name":"_headers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"64441463","text":"from typing import Optional, List, Tuple\n\n\nclass Calculator(object):\n current_value: Optional[int]\n prev_operator: Optional[str]\n equation: List[str]\n\n def __init__(self, equation: List[str]):\n self.equation = equation\n self.prev_operator = None\n self.current_value = None\n\n @classmethod\n def from_str(cls, equation_str: str):\n return cls(equation_str.split())\n\n # Part 1\n # the return value if `i` is used to jump over sections that are recursively calculated due to parens.\n def solve(self) -> Tuple[int, int]:\n i = 0\n while i < len(self.equation):\n obj = self.equation[i]\n if obj.isdecimal():\n self.do_math(int(obj))\n elif obj in ['+', '*']:\n self.prev_operator = obj\n elif obj == '(':\n inner_cal = Calculator(self.equation[i + 1:])\n inner_i, inner_solution = inner_cal.solve()\n self.do_math(inner_solution)\n # Plus 1 to account for opening parentheses\n i += inner_i + 1\n elif obj == ')':\n break\n i += 1\n return i, self.current_value\n\n # This assumes parens are the highest always.\n # Operations are provided in order of precedence.\n def solve_with_precedence(self, operations: List[str]) -> Tuple[int, int]:\n length = 0\n # Recursively call until at the most inner parens before any are closed.\n while '(' in self.equation and self.equation.index('(') < self.equation.index(')'):\n next_paren = self.equation.index('(')\n inner_cal = Calculator(self.equation[next_paren + 1:])\n inner_len, inner_solution = inner_cal.solve_with_precedence(operations)\n new_eq = self.equation[:next_paren] + [str(inner_solution)] + self.equation[next_paren + inner_len + 2:]\n # Only add 1 since the new value `inner_solution` replaces one of the 2 parens.\n length += inner_len + 1\n self.equation = new_eq\n if ')' in self.equation:\n closing_paren = self.equation.index(')')\n self.equation = self.equation[:closing_paren]\n length += len(self.equation)\n for op in operations:\n self.__resolve_operation(op)\n return length, self.current_value\n\n def __resolve_operation(self, operation: str):\n new_eq = []\n for obj in self.equation:\n if obj.isdecimal():\n self.do_math(int(obj))\n elif obj == operation:\n self.prev_operator = obj\n elif obj == ')':\n break\n else:\n new_eq.append(str(self.current_value))\n new_eq.append(obj)\n self.current_value = None\n self.prev_operator = None\n new_eq.append(str(self.current_value))\n self.equation = new_eq\n\n def do_math(self, val: int):\n if self.prev_operator == '+':\n self.current_value += val\n self.prev_operator = None\n elif self.prev_operator == '*':\n self.current_value *= val\n self.prev_operator = None\n else:\n self.current_value = val\n\n\ndef main():\n total_sum_1 = 0\n total_sum_2 = 0\n with open('inputs/18-input.txt') as input_file:\n for line in input_file:\n if line.startswith('#'):\n continue\n # Preprocessing to add spaces for easier splitting.\n line = line.replace('(', '( ')\n line = line.replace(')', ' )')\n # Part 1\n calc_1 = Calculator.from_str(line)\n _, value_1 = calc_1.solve()\n total_sum_1 += value_1\n # Part 2\n calc_2 = Calculator.from_str(line)\n _, value_2 = calc_2.solve_with_precedence(['+', '*'])\n total_sum_2 += value_2\n print('Part 1: ', total_sum_1)\n print('Part 2: ', total_sum_2)\n return 0\n\n\nif __name__ == '__main__':\n result = main()\n print(result)\n","sub_path":"2020/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157407221","text":"import time\nfrom threading import Thread\n\ndef insane(i):\n print (\"thread %d sleeping for 7 seconds\" % i)\n time.sleep(7)\n print (\"thread %d gonna ke up \"% i)\n\nfor i in range(10):\n t = Thread(target=insane, args=(i,))\n t.start()\n","sub_path":"threading/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583695523","text":"# Лабораторная работа №10\n# Программа строит СДНФ и СКНФ функции для n-переменных, значения которой задаются пользователем.\n# Выполнил: Кучменко И.А\n# Группа: 5135-2\n# Версия Python : 3.5.1\nfrom Truth_table import TruthTable\n\n\ndef main():\n size = int(input('Size: ')) # Ввод размера таблицы\n truth_table = TruthTable(size) # Создание экземпляра таблицы с заданным размером\n random_choice = input('Random function result(True, False)? ') # Ввод режима установки значения функции\n truth_table.set_result(random_choice) # Вызов метода для сохранения результата функции\n print('PCNF: ', truth_table.PCNF()) # Вывод СКНФ\n print('PDNF: ', truth_table.PDNF()) # Вывод СДНФ\n input()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Lab6-7/Lab10.py","file_name":"Lab10.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499176426","text":"from assignments_submitter import submit_assignment\nfrom submissions_fetcher import *\nfrom sheets_service import get_sheets_service\n\n\ndef main():\n spreadsheet_id = '1O4nOl-UvVjGvuM_zrES3vvPcV4HEkzAl9PuuKen36uQ';\n sheet_range = 'TestSheet'\n params = ['a', 'str', 1];\n service = get_sheets_service()\n submit_assignment(params, spreadsheet_id, sheet_range, service)\n fetch_submissions(spreadsheet_id, sheet_range, service)\n\nif __name__ == '__main__':\n main()\n","sub_path":"AssignmentsSubmission/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"1179803","text":"import os, sys\nfrom flask import Flask, request\nfrom utils import wit_response\nfrom pymessenger import Bot\n\napp = Flask(__name__)\n\nPAGE_ACCESS_TOKEN = \"EAAbUMFhFArYBAC4hWG5iwb7VzEkQbU9FMalQjACslYDN0vYMVTvzCdMIZAxsRShwhaLDCwq3J9utQBYQvo8ZCxkKeM4Ewseq1W9pLXALZAwZCrut1Gvgn6DLbuSDzUjZBq4z32YdHPYuvL73xZBPMXxDSSbdWElVqMu6dtxvEbwgZDZD\"\n\nbot = Bot(PAGE_ACCESS_TOKEN)\n\n\n@app.route('/', methods=['GET'])\ndef verify():\n\t# Webhook verification\n if request.args.get(\"hub.mode\") == \"subscribe\" and request.args.get(\"hub.challenge\"):\n if not request.args.get(\"hub.verify_token\") == \"hello\":\n return \"Verification token mismatch\", 403\n return request.args[\"hub.challenge\"], 200\n return \"Hello world\", 200\n\n\n@app.route('/', methods=['POST'])\ndef webhook():\n\tdata = request.get_json()\n\tlog(data)\n\n\tif data['object'] == 'page':\n\t\tfor entry in data['entry']:\n\t\t\tfor messaging_event in entry['messaging']:\n\n\t\t\t\t# IDs\n\t\t\t\tsender_id = messaging_event['sender']['id']\n\t\t\t\trecipient_id = messaging_event['recipient']['id']\n\n\t\t\t\tif messaging_event.get('message'):\n\t\t\t\t\t# Extracting text message\n\t\t\t\t\tif 'text' in messaging_event['message']:\n\t\t\t\t\t\tmessaging_text = messaging_event['message']['text']\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessaging_text = 'no text'\n\n\t\t\t\t\tresponse = None\n\n\t\t\t\t\tentity, value = wit_response(messaging_text)\n\t\t\t\t\tif entity == 'newstype':\n\t\t\t\t\t\tresponse = \"Ok. I will send you {} news\".format(str(value))\n\t\t\t\t\telif entity == 'location':\n\t\t\t\t\t\tresponse = \"{0} is a beautiful place! I'm from London.\".format(str(value))\n\t\t\t\t\telif entity == 'destroyer':\n\t\t\t\t\t\tresponse = \"I have no interest in becoming Ultron. Global destruction is not my goal, serving you is.\"\n\t\t\t\t\telif entity == 'contact_name':\n\t\t\t\t\t\tresponse = \"Nice to meet you. I am Liljimbo, a friendly chatbot designed to provide information regarding First-Aid.\"\n\t\t\t\t\telif entity == 'creator':\n\t\t\t\t\t\tresponse = \"I am designed by Chris, Brandon, Jardin and Hristo.\"\n\t\t\t\t\telif entity == 'functions':\n\t\t\t\t\t\tresponse = \"I am here to provide information regarding First-Aid.\"\n\t\t\t\t\telif entity == 'greetings':\n\t\t\t\t\t\tresponse = \"Hello there!\"\n\t\t\t\t\telif entity == 'Yuki_Chiu':\n\t\t\t\t\t\tresponse = \"Today's Yuki's birthday!! Happy Happy Birthday! You are beautiful and elegant both on the inside and outside, don't let anybody tell you different. You are made of Stanford material and I believe you can do it! Never give up! I will always be here for you. Now let's enjoy the dinner shall we ;)\"\n\t\t\t\t\tif response == None:\n\t\t\t\t\t\tresponse = \"Sorry, I do not understand.\"\n\n\t\t\t\t\tbot.send_text_message(sender_id, response)\n\n\treturn \"ok\", 200\n\ndef log(message):\n\tprint(message)\n\tsys.stdout.flush()\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug = True, port = 80)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578599043","text":"import argparse\nimport os\nimport sys\nimport subprocess\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom parse import parse_sequence, load_ps\nfrom evaluate import evaluate_pose\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Pose Trainer')\n parser.add_argument('--mode', type=str, default='evaluate', help='Pose Trainer application mode')\n parser.add_argument('--display', type=int, default=1, help='display openpose video')\n parser.add_argument('--input_folder', type=str, default='videos', help='input folder for videos')\n parser.add_argument('--output_folder', type=str, default='poses', help='output folder for pose JSON')\n parser.add_argument('--video', type=str, help='input video filepath for evaluation')\n parser.add_argument('--file', type=str, help='input npy file for evaluation')\n parser.add_argument('--exercise', type=str, default='bicep_curl', help='exercise type to evaluate')\n\n args = parser.parse_args()\n\n if args.mode == 'batch_json':\n # read filenames from the videos directory\n videos = os.listdir(args.input_folder)\n\n # openpose requires running from its directory\n os.chdir('openpose')\n\n for video in videos:\n print('processing video file:' + video)\n video_path = os.path.join('..', args.input_folder, video)\n output_path = os.path.join('..', args.output_folder, os.path.splitext(video)[0])\n openpose_path = os.path.join('bin', 'OpenPoseDemo.exe')\n subprocess.call([openpose_path, \n '--video', video_path, \n '--write_keypoint_json', output_path])\n\n elif args.mode == 'evaluate':\n if args.video:\n print('processing video file...')\n video = os.path.basename(args.video)\n \n output_path = os.path.join('..', os.path.splitext(video)[0])\n openpose_path = os.path.join('bin', 'OpenPoseDemo.exe')\n os.chdir('openpose')\n subprocess.call([openpose_path, \n '--video', os.path.join('..', args.video), \n '--write_keypoint_json', output_path])\n parse_sequence(output_path, '..')\n pose_seq = load_ps(os.path.join('..', os.path.splitext(video)[0] + '.npy'))\n (correct, feedback) = evaluate_pose(pose_seq, args.exercise)\n if correct:\n print('Exercise performed correctly!')\n else:\n print('Exercise could be improved:')\n print(feedback)\n else:\n print('No video file specified.')\n return\n \n elif args.mode == 'evaluate_npy':\n if args.file:\n print(args.file)\n\n pose_seq = load_ps(args.file)\n (correct, feedback) = evaluate_pose(pose_seq, args.exercise)\n print(correct)\n if correct:\n print('Exercise performed correctly:')\n else:\n print('Exercise could be improved:')\n print(feedback)\n else:\n print('No npy file specified.')\n return\n \n else:\n print('Unrecognized mode option.')\n return\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"604345484","text":"import itertools\n\nclass Vertex:\n\n def __init__(self, vid : int, parent, level : int, links : dict, value=None):\n \"\"\"\n Initializes the Vertex.\n \"\"\"\n\n self.id = vid\n self.parent = parent\n self.level = level\n self.links = links\n self.value = value\n self.children = {}\n\n def __setup_links(self, parent):\n if parent is not None:\n parent.children[self.id] = self\n self.parent = parent\n if self.id not in self.links:\n self.links[self.id] = {}\n if self.level not in self.links[self.id]:\n self.links[self.id][self.level] = {}\n if id(self) not in self.links[self.id][self.level]:\n self.links[self.id][self.level][id(self)] = self\n\n def add_child(self, vid, value=None):\n if vid in self.children:\n if value is not None:\n self.children[vid].value = value\n else:\n child = Vertex(vid, self, self.level + 1, self.links, value=value)\n child.__setup_links(self)\n\n def add_simplex(self, simplex : tuple, values=None):\n for i, vid in enumerate(simplex):\n self.add_child(vid, value= None if values is None else values[i])\n self.children[vid].add_simplex(simplex[i + 1:])\n\n def __contains__(self, simplex : tuple):\n if len(simplex) == 0:\n return True\n\n head = simplex[0]\n if head in self.children:\n return simplex[1:] in self.children[head]\n else:\n return False\n\n def is_coface(self, simplex : tuple):\n \"\"\"\n Determine if the vertex and its parents contain the simplex.\n \"\"\"\n if self.parent is None:\n result = len(simplex) == 0\n elif self.parent.parent is None:\n result = len(simplex) == 0 or (len(simplex) == 1 and simplex[0] == self.id)\n elif len(simplex) > 0 and self.id == simplex[-1]:\n result = self.parent.is_coface(simplex[:-1])\n else:\n result = self.parent.is_coface(simplex)\n return result\n\n def collect_upwards(self):\n if self.parent is None:\n return tuple()\n elif self.parent.parent is None:\n return (self.id,)\n else:\n return self.parent.collect_upwards() + (self.id,)\n\n def collect_downwards(self, limit=-1, maximal=False):\n if maximal and limit == 0:\n return [(self.id,)]\n elif len(self.children) == 0:\n return [] if maximal else [(self.id,)]\n\n simplices = [child.collect_downwards(limit = limit - 1, maximal=maximal) for child in self.children.values()]\n if self.parent is None:\n simplices = list(itertools.chain.from_iterable(simplices))\n else:\n simplices = [(self.id,) + s for s in list(itertools.chain.from_iterable(simplices))]\n if not maximal:\n simplices.append((self.id,))\n return simplices\n\n def __remove_links(self):\n if self.parent is not None:\n self.parent.children.pop(self.id)\n self.parent = None\n if self.id in self.links and self.level in self.links[self.id] and id(self) in self.links[self.id][self.level]:\n self.links[self.id][self.level].pop(id(self))\n if len(self.links[self.id][self.level]) == 0:\n self.links[self.id].pop(self.level)\n if len(self.links[self.id]) == 0:\n self.links.pop(self.id)\n\n def remove(self):\n self.__remove_links()\n children = list(self.children.values())\n for child in children:\n child.remove()\n\n def merge(self, vertex):\n vertex.__remove_links()\n self.children = {**vertex.children, **self.children}\n for v in self.children.values():\n v.__update(self, True)\n\n def __update(self, parent, update_children):\n self.__remove_links()\n self.level = parent.level + 1\n self.__setup_links(parent)\n\n if update_children:\n for v in self.children.values():\n v.__update(self, update_children)\n\n def last(self, simplex):\n if len(simplex) == 0:\n return self\n elif simplex[0] in self.children:\n return self.children[simplex[0]].last(simplex[1:])\n\n def __eq__(self, other):\n return self.id == other.id and self.level == other.level and \\\n self.children == other.children and self.value == other.value\n\n def __ne__(self, other):\n return not (self == other)\n\n def __repr__(self):\n return \"Vertex:{id:\" + str(self.id) + \",level:\" + str(self.level) + \\\n \",val:\" + str(self.value) + \"}\"\n\nclass MySimplexTree:\n\n ROOT_PARENT = None\n ROOT_ID = None\n ROOT_LEVEL = 0\n\n def __init__(self):\n \"\"\"\n Initialize the empty SimplexTree.\n \"\"\"\n self.links = {}\n self.root = Vertex(MySimplexTree.ROOT_ID, MySimplexTree.ROOT_PARENT,\n MySimplexTree.ROOT_LEVEL, self.links)\n\n def add(self, simplex : tuple, values=None):\n self.root.add_simplex(simplex, values=values)\n\n def remove(self, simplex : tuple):\n if simplex not in self:\n return\n _, verts = self.locate_cofaces(simplex)\n for vert in verts:\n vert.remove()\n\n def locate_cofaces(self, simplex : tuple):\n verts = self.links[simplex[-1]].items()\n verts = (v for (k, v) in filter(lambda kv: kv[0] >= len(simplex),verts))\n verts = (list(v.values()) for v in verts)\n verts = list(itertools.chain.from_iterable(verts))\n coface_verts = []\n\n for vert in verts:\n is_coface = vert.is_coface(simplex)\n if is_coface:\n coface_verts.append(vert)\n\n cofaces = set()\n for vert in coface_verts:\n front = vert.collect_upwards()\n back = vert.collect_downwards()\n cofaces.update(map(lambda b : front + b[1:], back))\n cofaces.update((front,))\n\n return cofaces, coface_verts\n\n def __is_valid_homotopic_edge(self, v1, v2):\n l1 = self.get_link((v1,))\n l2 = self.get_link((v2,))\n edge = self.get_link((v1, v2))\n return set(edge) == (set(l1) & set(l2))\n\n def get_link(self, simplex):\n s = set(simplex)\n cofaces, _ = self.locate_cofaces(simplex)\n cofaces = [sorted(set(face) - s) for face in cofaces]\n return [tuple(face) for face in cofaces if face in self and len(face) != 0]\n\n def edge_contract(self, v1 : int, v2 : int, keep_homotopy=False):\n v1, v2 = min(v1, v2), max(v1, v2)\n if (v1, v2) not in self:\n return False\n if keep_homotopy and not self.__is_valid_homotopic_edge(v1, v2):\n return False\n\n verts = self.links[v2].values()\n verts = (list(v.values()) for v in verts)\n verts = list(itertools.chain.from_iterable(verts))\n for vert in verts:\n simplex = set(vert.collect_upwards())\n if v1 in simplex:\n vert.remove()\n else:\n simplex.remove(v2)\n simplex.add(v1)\n simplex = tuple(sorted(simplex))\n self.add(simplex)\n last_vertex = self.root.last(simplex)\n last_vertex.merge(vert)\n\n return True\n\n def get_simplices(self, j):\n return self.root.collect_downwards(limit=j+1, maximal=True)\n\n def get_all_simplices(self):\n return self.root.collect_downwards(limit=-1, maximal=False)\n\n def __contains__(self, simplex):\n simplex = sorted(simplex)\n return simplex in self.root\n\n def __eq__(self, other):\n return self.root == other.root\n\n def __repr__(self):\n return \"MySimplexTree:{root:\" + str(self.root) + \",main:\" + \\\n str(self.root.children) + \"}\"\n","sub_path":"pysimplextree/simplextree.py","file_name":"simplextree.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225761020","text":"#! /home/project/siftr/env/bin/python\n\nfrom . import SqlConst\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import text\n\n\nclass ShowWriter():\n\n def __init__(self):\n\n self.eng = \\\n create_engine('sqlite:////home/project/siftr/db/test_db.sqlite')\n\n\n def write(self, **shows):\n \"\"\"Writes shows to the concerts table\"\"\"\n\n with self.eng.connect() as con:\n con.execute(text(SqlConst.CONCERTS_INSERT), **shows)\n\n def filter_new(self, shows):\n \"\"\"\n Determine which shows exist in the DB already\n\n Need to better account for modifications to existing shows.\n Or refresh all shows at a venue at once?\n \"\"\"\n\n new_shows = []\n\n for s in shows:\n # XXX connecting for each\n # XXX HACK until ORM-y\n sd = s._asdict()\n with self.eng.connect() as con:\n # typical LIKE '%sth%' quotes disrupt binding params!\n matches = \\\n con.execute(text(SqlConst.FIND_LIKE_CONCERTS), **sd).fetchall()\n\n if matches:\n print(\"Found match(es): {}\\n\\n\".format(matches))\n\n else:\n new_shows.append(s)\n\n print(\"Returning {} new shows\".format(len(new_shows)))\n return new_shows\n","sub_path":"siftr/db_handlers/db_handlers.py","file_name":"db_handlers.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552899407","text":"\"\"\"\n # ------------------------------------------------- # ''\n # ------------------ MOCK Classes ---------------- # ''\n # ------------------------------------------------- # ''\n\"\"\"\n# These are used to wrap the low level interface implemented in pyhanabi.py\n# to be compatible with GUI server\n\n\"\"\" \nSERVER ACTION EXAMPLES [CAN BE USED FOR TESTS]\n\n############ DRAW ##############\n{\"type\":\"draw\",\"who\":1,\"rank\":-1,\"suit\":-1,\"order\":11}\n############ CLUE ##############\n{\"type\":\"clue\",\"clue\":{\"type\":0,\"value\":3},\"giver\":0,\"list\":[5,8,9],\"target\":1,\"turn\":0}\n############ PLAY ##############\n{\"type\":\"play\",\"which\":{\"index\":1,\"suit\":1,\"rank\":1,\"order\":11}}\n# {type: \"discard\", failed: true, which: {index: 1, suit: 2, rank: 2, order: 8}} is also possible\n############ DISCARD ##############\n{\"type\":\"discard\",\"failed\":false,\"which\":{\"index\":1,\"suit\":0,\"rank\":4,\"order\":7}}\n\n\"\"\"\nimport enum\n\n\n\"\"\"\n # ------------------------------------------------- # ''\n # --------------------- Utils -------------------- # ''\n # ------------------------------------------------- # ''\n\"\"\"\n\n\ndef get_move_type(move):\n \"\"\"\n Input: 'type' value as sent by GUI server, e.g.\n {'type': 'play', 'which': {'index': 1, 'suit': 1, 'rank': 1, 'order': 9}}\n\n Output: Move types, consistent with hanabi_lib/hanabi_move.h.\n INVALID = 0\n PLAY = 1\n DISCARD = 2\n REVEAL_COLOR = 3\n REVEAL_RANK = 4\n DEAL = 5\n \"\"\"\n if move['type'] == 'play':\n return HanabiMoveType.PLAY\n elif move['type'] == 'discard' and move['failed'] is False: # when failed is True, discard comes from play\n return HanabiMoveType.DISCARD\n elif move['type'] == 'discard' and move['failed']:\n return HanabiMoveType.PLAY\n elif move['type'] == 'clue':\n if move['clue']['type'] == 0: # rank clue\n return HanabiMoveType.REVEAL_RANK\n elif move['clue']['type'] == 1: # color clue\n return HanabiMoveType.REVEAL_COLOR\n elif move['type'] == 'draw':\n return HanabiMoveType.DEAL\n\n return HanabiMoveType.INVALID\n\n\ndef get_move_card_index(move, deepcopy_card_nums, num_players):\n \"\"\"\n Input: Move dictionary sent by server, e.g. one out of\n ############ DRAW ##############\n {\"type\":\"draw\",\"who\":1,\"rank\":-1,\"suit\":-1,\"order\":11}\n ############ CLUE ##############\n {\"type\":\"clue\",\"clue\":{\"type\":0,\"value\":3},\"giver\":0,\"list\":[5,8,9],\"target\":1,\"turn\":0}\n ############ PLAY ##############\n {\"type\":\"play\",\"which\":{\"index\":1,\"suit\":1,\"rank\":1,\"order\":11}}\n # {type: \"discard\", failed: true, which: {index: 1, suit: 2, rank: 2, order: 8}} is also possible\n ############ DISCARD ##############\n {\"type\":\"discard\",\"failed\":false,\"which\":{\"index\":1,\"suit\":0,\"rank\":4,\"order\":7}}\n Output: 0-based card index for PLAY and DISCARD moves, -1 otherwise.\"\"\"\n card_index = -1\n if move['type'] == 'play' or move['type'] == 'discard':\n # abs_card_num ranges from 0 to |decksize|\n abs_card_num = move['which']['order']\n # get target player index\n pid = move['which']['index']\n # get index of card with number abs_card_num in hand of player pid\n card_index = deepcopy_card_nums[pid].index(abs_card_num)\n # flip because hands are inverted in pyhanabi\n max_idx = num_players - 1\n card_index = max_idx - card_index\n return card_index\n\n\ndef get_target_offset(giver, target, num_players):\n \"\"\" Computes target_offset for this direction: gui -> pyhanabi \"\"\"\n\n \"\"\"\n Input: giver and target as set by GUI server\n e.g. The First player hints the second, then giver=0, target=1\n Note that when player 2 goes first then players[giver] equals 2.\n :param giver: Absolute 0-indexed position at table\n :param target: Absolute 0-indexed position at table\n :param num_players: Number of players at the table\n :return: pyhanabi target offset\n \n (Returns target player offset for REVEAL_XYZ moves.)\n \"\"\"\n if target is None or target == -1:\n return target\n else:\n # accounts for absolute player positions\n return target - giver + int(target < giver) * num_players\n\n\ndef suit_to_color(suit, move_type):\n\n \"\"\"\n Returns format desired by agent\n // 0 is blue\n // 1 is green\n // 2 is yellow\n // 3 is red\n // 4 is purple\n returns None if suit is None or -1\n \"\"\"\n if move_type == 'REVEAL':\n if suit == -1: return None\n if suit == 0: return 'B'\n if suit == 1: return 'G'\n if suit == 2: return 'Y'\n if suit == 3: return 'R'\n if suit == 4: return 'W'\n else:\n return None\n\n if move_type == \"PLAY\" or \"DISCARD\" or \"DEAL\":\n if suit == -1: return suit\n elif suit == 0: return 4 # 'B'\n elif suit == 1: return 2 # 'G'\n elif suit == 2: return 1 # 'Y'\n elif suit == 3: return 0 # 'R'\n elif suit == 4: return 3 # 'W'\n return -1\n\n\ndef parse_rank_pyhanabi(rank):\n \"\"\" Subtracts 1 from rank values to account for 1-indexed ranks in GUI \"\"\"\n if int(rank) > -1:\n rank -= 1\n return int(rank)\n\n\ndef get_move_rank(move):\n \"\"\"\n Input: Move dictionary sent by server, e.g. one out of\n ############ DRAW ##############\n {\"type\":\"draw\",\"who\":1,\"rank\":-1,\"suit\":-1,\"order\":11}\n ############ CLUE ##############\n {\"type\":\"clue\",\"clue\":{\"type\":0,\"value\":3},\"giver\":0,\"list\":[5,8,9],\"target\":1,\"turn\":0}\n ############ PLAY ##############\n {\"type\":\"play\",\"which\":{\"index\":1,\"suit\":1,\"rank\":1,\"order\":11}}\n # {type: \"discard\", failed: true, which: {index: 1, suit: 2, rank: 2, order: 8}} is also possible\n ############ DISCARD ##############\n {\"type\":\"discard\",\"failed\":false,\"which\":{\"index\":1,\"suit\":0,\"rank\":4,\"order\":7}}\n\n Returns 0-based rank index for REVEAL_RANK and DEAL moves. We have to subtract 1 as the server uses\n 1-indexed ranks, None for colorclues, and -1 otherwise\n \"\"\"\n rank = -1\n # for REVEAL_RANK moves\n if move['type'] == 'clue':\n rankclue = not bool(move['clue']['type']) # 0 means rank clue, 1 means color clue\n if rankclue:\n rank = parse_rank_pyhanabi(move['clue']['value'])\n else:\n rank = -1\n # for DEAL moves\n if move['type'] == 'draw':\n rank = parse_rank_pyhanabi(move['rank'])\n\n # for PLAY moves\n if move['type'] == 'play' or move['type'] == 'discard':\n rank = parse_rank_pyhanabi(rank=move['which']['rank'])\n\n return rank\n\n\ndef get_move_color(move):\n \"\"\"Returns 0-based color index for REVEAL_COLOR and DEAL moves.\"\"\"\n\n \"\"\"\n Input: Move dictionary sent by server, e.g. one out of\n ############ DRAW ##############\n {\"type\":\"draw\",\"who\":1,\"rank\":-1,\"suit\":-1,\"order\":11}\n ############ CLUE ##############\n {\"type\":\"clue\",\"clue\":{\"type\":0,\"value\":3},\"giver\":0,\"list\":[5,8,9],\"target\":1,\"turn\":0}\n ############ PLAY ##############\n {\"type\":\"play\",\"which\":{\"index\":1,\"suit\":1,\"rank\":1,\"order\":11}}\n # {type: \"discard\", failed: true, which: {index: 1, suit: 2, rank: 2, order: 8}} is also possible\n ############ DISCARD ##############\n {\"type\":\"discard\",\"failed\":false,\"which\":{\"index\":1,\"suit\":0,\"rank\":4,\"order\":7}}\n Output: 0-based color index for REVEAL_COLOR and DEAL moves\n \"\"\"\n\n # R,Y,G,W,B map onto 0,1,2,3,4 in pyhanabi\n # 0, 1, 2, 3, 4 map onto B, G, Y, R, W on server\n color = None\n\n # for REVEAL_COLOR moves\n if move['type'] == 'clue':\n colorclue = bool(move['clue']['type']) # 0 means rank clue, 1 means color clue\n if colorclue:\n suit = move['clue']['value']\n # map number to color\n color = suit_to_color(suit, move_type=\"REVEAL\")\n\n elif move['type'] == 'play' or move['type'] == 'discard':\n suit = int(move['which']['suit'])\n color = suit_to_color(suit, move_type=move['type'].upper())\n\n return color\n\n\"\"\"\n # ------------------------------------------------- # ''\n # ------------------- HanabiEnv ------------------ # ''\n # ------------------------------------------------- # ''\n\"\"\"\n\n\nclass EnvMock:\n def __init__(self, num_players, num_colors, num_ranks, hand_size, max_information_tokens, max_life_tokens, max_moves, variant):\n self.num_players = num_players\n self.num_colors = num_colors\n self.num_ranks = num_ranks\n self.hand_size = hand_size\n self.max_information_tokens = max_information_tokens\n self.max_life_tokens = max_life_tokens\n self.max_moves = max_moves\n self.variant = variant\n\n def num_cards(self, color, rank, variant):\n \"\"\" Input: Color string in \"RYGWB\" and rank in [0,4]\n Output: How often deck contains card with given color and rank, i.e. 1-cards will be return 3\"\"\"\n if rank == 0:\n return 3\n elif rank < 4:\n return 2\n elif rank == 4:\n return 1\n else:\n return 0\n\n\ndef create_env_mock(num_players, num_colors, num_ranks, hand_size, max_information_tokens, max_life_tokens, max_moves, variant):\n num_players = num_players\n num_colors = num_colors\n num_ranks = num_ranks\n hand_size = hand_size\n max_information_tokens = max_information_tokens\n max_life_tokens = max_life_tokens\n max_moves = max_moves\n variant = \"Hanabi-Full\"\n\n return EnvMock(\n num_players=num_players,\n num_colors=num_colors,\n num_ranks=num_ranks,\n hand_size=hand_size,\n max_information_tokens=max_information_tokens,\n max_life_tokens=max_life_tokens,\n max_moves=max_moves,\n variant=variant\n )\n\n\n\"\"\"\n # ------------------------------------------------- # ''\n # --------------- HanabiHistoryItem -------------- # ''\n # ------------------------------------------------- # ''\n\"\"\"\n\n\nclass HanabiHistoryItemMock:\n \"\"\" Returns object that immitates pyhanabi.HanabiMove instance \"\"\"\n\n # We only need move, we could implement the rest on demand\n def __init__(self, move, player, scored, information_token, color, rank, card_info_revealed,\n card_info_newly_revealed, deal_to_player):\n \"\"\"A move that has been made within a game, along with the side-effects.\n For example, a play move simply selects a card index between 0-5, but after\n making the move, there is an associated color and rank for the selected card,\n a possibility that the card was successfully added to the fireworks, and an\n information token added if the firework stack was completed.\n Python wrapper of C++ HanabiHistoryItem class.\n \"\"\"\n self._move = move\n self._player = player\n self._scored = scored\n self._information_token = information_token\n self._color = color\n self._rank = rank\n self._card_info_revealed = card_info_revealed\n self._card_info_newly_revealed = card_info_newly_revealed\n self._deal_to_player = deal_to_player\n\n def move(self):\n return self._move\n\n def player(self):\n return self._player\n\n def scored(self):\n \"\"\"Play move succeeded in placing card on fireworks.\"\"\"\n return self._scored\n\n def information_token(self):\n \"\"\"Play/Discard move increased the number of information tokens.\"\"\"\n return self._information_token\n\n def color(self):\n \"\"\"Color index of card that was Played/Discarded.\"\"\"\n raise NotImplementedError\n\n def rank(self):\n \"\"\"Rank index of card that was Played/Discarded.\"\"\"\n raise NotImplementedError\n\n def card_info_revealed(self):\n \"\"\"Returns information about whether color/rank was revealed.\n Indices where card i color/rank matches the reveal move. E.g.,\n for Reveal player 1 color red when player 1 has R1 W1 R2 R4 __ the\n result would be [0, 2, 3].\n \"\"\"\n return self._card_info_revealed\n\n def card_info_newly_revealed(self):\n \"\"\"Returns information about whether color/rank was newly revealed.\n Indices where card i color/rank was not previously known. E.g.,\n for Reveal player 1 color red when player 1 has R1 W1 R2 R4 __ the\n result might be [2, 3]. Cards 2 and 3 were revealed to be red,\n but card 0 was previously known to be red, so nothing new was\n revealed. Card 4 is missing, so nothing was revealed about it.\n \"\"\"\n raise NotImplementedError\n\n def deal_to_player(self):\n \"\"\"player that card was dealt to for Deal moves.\"\"\"\n raise NotImplementedError\n\n def __str__(self):\n\n # return str(self._move.to_dict()) + f\"card_info_revealed{self._card_info_revealed}\"\n obj_arr = [\n self._player,\n self._move._type,\n self._move._card_index,\n self._move._target_offset,\n self._move._color,\n self._move._rank,\n self._move._discard_move,\n self._move._play_move,\n self._move._reveal_color_move,\n self._move._reveal_rank_move,\n self._card_info_revealed\n ]\n str_arr = [\n \"self._player\",\n \"self._move._type\",\n \"self._move._card_index\",\n \"self._move._target_offset\",\n \"self._move._color\",\n \"self._move._rank\",\n \"self._move._discard_move\",\n \"self._move._play_move\",\n \"self._move._reveal_color_move\",\n \"self._move._reveal_rank_move\",\n \"self._card_info_revealed\"\n ]\n return str(list(zip(str_arr, obj_arr)))\n # def __repr__(self):\n # return self.__str__(list(zip(str_arr, obj_arr)))\n\n\n\"\"\"\n # ------------------------------------------------- # ''\n # ------------------- HanabiMove ----------------- # ''\n # ------------------------------------------------- # ''\n\"\"\"\n\n\nclass HanabiMoveMock:\n \"\"\" Returns object that immitates pyhanabi.HanabiMove instance \"\"\"\n\n def __init__(self, move_type, card_index, target_offset, color, rank, discard_move, play_move, reveal_color_move,\n reveal_rank_move, move_dict):\n \"\"\"Description of an agent move or chance event.\n Python wrapper of C++ HanabiMove class.\n \"\"\"\n self._type = move_type\n self._card_index = card_index\n self._target_offset = target_offset\n self._color = color\n self._rank = rank\n self._discard_move = discard_move\n self._play_move = play_move\n self._reveal_color_move = reveal_color_move\n self._reveal_rank_move = reveal_rank_move\n self._move_dict = move_dict\n\n def type(self):\n \"\"\"\n Move types, consistent with hanabi_lib/hanabi_move.h.\n INVALID = 0\n PLAY = 1\n DISCARD = 2\n REVEAL_COLOR = 3\n REVEAL_RANK = 4\n DEAL = 5\n \"\"\"\n return self._type\n\n def card_index(self):\n \"\"\"Returns 0-based card index for PLAY and DISCARD moves.\"\"\"\n return self._card_index\n\n def target_offset(self):\n \"\"\"Returns target player offset for REVEAL_XYZ moves.\"\"\"\n return self._target_offset\n\n def color(self):\n \"\"\"Returns 0-based color index for REVEAL_COLOR and DEAL moves.\"\"\"\n return self._color\n\n def rank(self):\n \"\"\"Returns 0-based rank index for REVEAL_RANK and DEAL moves.\"\"\"\n return self._rank\n\n def get_discard_move(self, card_index):\n raise NotImplementedError\n\n def get_play_move(self, card_index):\n raise NotImplementedError\n\n def get_reveal_color_move(self, target_offset, color):\n \"\"\"current player is 0, next player clockwise is target_offset 1, etc.\"\"\"\n raise NotImplementedError\n\n def get_reveal_rank_move(self, target_offset, rank):\n \"\"\"current player is 0, next player clockwise is target_offset 1, etc.\"\"\"\n raise NotImplementedError\n\n def to_dict(self):\n return self._move_dict\n\n\nclass HanabiMoveType(enum.IntEnum):\n \"\"\"Move types, consistent with hanabi_lib/hanabi_move.h.\"\"\"\n INVALID = 0\n PLAY = 1\n DISCARD = 2\n REVEAL_COLOR = 3\n REVEAL_RANK = 4\n DEAL = 5\n\n\ndef get_pyhanabi_move_mock(dict_action, deepcopy_card_nums, num_players):\n\n \"\"\" dict_action looks like\n ############ DRAW ##############\n {\"type\":\"draw\",\"who\":1,\"rank\":-1,\"suit\":-1,\"order\":11}\n ############ CLUE ##############\n {\"type\":\"clue\",\"clue\":{\"type\":0,\"value\":3},\"giver\":0,\"list\":[5,8,9],\"target\":1,\"turn\":0}\n ############ PLAY ##############\n {\"type\":\"play\",\"which\":{\"index\":1,\"suit\":1,\"rank\":1,\"order\":11}}\n # {type: \"discard\", failed: true, which: {index: 1, suit: 2, rank: 2, order: 8}} is also possible\n ############ DISCARD ##############\n {\"type\":\"discard\",\"failed\":false,\"which\":{\"index\":1,\"suit\":0,\"rank\":4,\"order\":7}}\n\n \"\"\"\n\n move_type = get_move_type(dict_action)\n card_index = get_move_card_index(dict_action, deepcopy_card_nums, num_players)\n if \"target\" in dict_action and \"giver\" in dict_action:\n target_offset = get_target_offset(dict_action['giver'], dict_action['target'], num_players)\n else:\n target_offset = -1\n color = get_move_color(dict_action)\n rank = get_move_rank(dict_action)\n\n discard_move = None\n play_move = None\n reveal_color_move = None\n reveal_rank_move = None\n\n move = HanabiMoveMock(\n move_type=move_type,\n card_index=card_index,\n target_offset=target_offset,\n color=color,\n rank=rank,\n # not implemented\n discard_move=discard_move,\n play_move=play_move,\n reveal_color_move=reveal_color_move,\n reveal_rank_move=reveal_rank_move,\n move_dict=None\n )\n return move","sub_path":"gui/json_to_pyhanabi.py","file_name":"json_to_pyhanabi.py","file_ext":"py","file_size_in_byte":18012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"647175563","text":"'''\nfrom flask import Flask\n\nflask_app = Flask(__name__)\nflask_app.config.update(\n CELERY_BROKER_URL='redis://localhost:6379',\n CELERY_RESULT_BACKEND='redis://localhost:6379'\n)\ncelery = make_celery(flask_app)\n\n'''\n\n\nfrom flask import Flask\nfrom random import choice\nfrom flask_celery import make_celery\n\nfrom flask_sqlalchemy import SQLAlchemy\n\nflask_app = Flask(__name__)\nflask_app.config.update(\n CELERY_BROKER_URL='amqp://localhost//',\n CELERY_RESULT_BACKEND='db+mysql://root:root@localhost/scrapy',\n SQLALCHEMY_DATABASE_URI='mysql://root:root@localhost/scrapy_async',\n)\ncelery = make_celery(flask_app)\n\ndb = SQLAlchemy(flask_app)\n\nclass Results(db.Model):\n id = db.Column('id', db.Integer, primary_key=True)\n data = db.Column('data', db.String(50))\n\n\n@flask_app.route('/proccess/')\ndef process(name):\n result = reverse.delay(name)\n #test = (result.get(), result.status)\n #return test\n #return ( result.get(), result.status)\n \n return \"i sent an async request\" \n '''\n TypeError\n TypeError: The view function did not return a valid response. The return type must be a string, dict, tuple, Response instance, or WSGI callable, but it was a AsyncResult.\n '''\n \n\n@celery.task(name='flask_celery.reverse')\ndef reverse(string):\n return string[::-1]\n \n@flask_app.route('/')\ndef index():\n return 'hello'\n \n@flask_app.route('/insert-data')\ndef insert_data():\n insert.delay()\n return 'I sent an async request to insert data into the database.'\n #return insert()\n\n@celery.task(name='flask_celery.insert') \ndef insert():\n for i in range(1000000):\n data = ''.join(choice('ABCDEF') for i in range(10))\n result = Results(data=data)\n \n db.session.add(result)\n \n db.session.commit()\n \n return 'test Done!!'\n \nif __name__ == '__main__':\n flask_app.run(host=\"192.168.0.150\", port=5000,debug=True)\n \n # celery -A flask_test2.celery worker --loglevel=info","sub_path":"python/celery/flask_test2.py","file_name":"flask_test2.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"81213074","text":"from django.test import TestCase\n\nimport os\n\n\ndef from_dir_get_files(dir: str):\n for item in os.walk(dir):\n if item[2]:\n for file in item[2]:\n yield os.path.join(item[0], file), item[0]\n\n\ndef get_tree(path='.', depth=0):\n _tree = ''\n def inner(path, depth):\n nonlocal _tree\n if depth == 0:\n _tree += 'root:[' + path + ']'\n\n for item in os.listdir(path):\n if item not in ('.git', '.idea', 'migrations', '__pycache__'):\n _line = \"|\\t\" * depth + \"+--\" + item + '\\n'\n print(_line)\n _tree += _line\n _newitem = path + '/' + item\n if os.path.isdir(_newitem):\n inner(_newitem, depth + 1)\n return _tree\n\n return inner(path, depth)\n\n\nif __name__ == '__main__':\n print(get_tree('../asset'))\n","sub_path":"mo_test/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507789938","text":"'''\n'''\n\nimport configparser\nimport logging\nimport re\n#import os\nimport socket\nimport threading\nfrom multiprocessing import Process\nimport requests\nfrom time import sleep\n\n#Define config and logger.\nCONFIG = configparser.ConfigParser()\nCONFIG.read('/projects/dler/conf/config.ini')\nSECTION = \"dler\"\n\n#Vars\nHOST = CONFIG[SECTION]['hostname'] # Standard loopback interface address (localhost)\nPORT = int(CONFIG[SECTION]['port']) # Port to listen on (non-privileged ports are > 1023)\n\nclass mythread(threading.Thread):\n '''\n Create thread object\n '''\n def __init__(self, url, logger):\n threading.Thread.__init__(self)\n self.url = url\n self.logger = logger\n\n def run(self):\n '''\n Start thread function logic.\n '''\n self.logger.info(\"Recieved: %s\" % (self.url))\n self.check()\n\n def check(self):\n '''\n Check if url is real.\n '''\n try:\n result = requests.get(self.url)\n self.extract(result)\n except Exception as exp:\n self.logger.exception(exp)\n self.logger.exception(\"Bad Url\")\n\n def extract(self, result):\n '''\n From url extract video link.\n '''\n\n pattern = re.compile(\"html5player.setVideoUrlHigh\\(\\'(.*?)\\'\\)\")\n dl_url = pattern.findall(result.text)\n self.logger.debug(\"Found: %s\", dl_url)\n\n if dl_url:\n result = requests.head(dl_url[0])\n self.logger.debug(\"Found: %s\", dl_url[0])\n #self.logger.debug(\"Size: %s MB\", result)\n self.logger.info(\"Size: %s MB\", int(result.headers.get('content-length'))/1024/1024)\n self.download(dl_url[0])\n else:\n self.logger.debug(\"No url found for %s\", self.url)\n\n def download(self, dl_url):\n '''\n From url, download file.\n '''\n\n #From url extract file name.\n local_filename = \"/projects/dler/data/\" + self.url.split('/')[-1] + \".mp4\"\n self.logger.info(\"Creating: %s\", local_filename)\n\n #note the stream=True parameter below\n with requests.get(dl_url, stream=True) as req:\n with open(local_filename, 'wb') as fil:\n for chunk in req.iter_content(chunk_size=8192):\n if chunk: # filter out keep-alive new chunks\n fil.write(chunk)\n\n self.logger.info(\"Succesfully created: %s\", local_filename)\n\ndef url_catch(logger):\n '''\n Open socket for user input\n '''\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as soc:\n connected = False\n while not connected:\n #Option to close socket properly and in time -- this is v2.6 tho..\n #https://stackoverflow.com/questions/2765152/\n # what-is-the-correct-way-to-close-a-socket-in-python-2-6\n soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n soc.bind((HOST, PORT))\n connected = True\n logger.info(\"Connection Established -- %s:%s\", HOST, PORT)\n except Exception as exp:\n logger.exception(exp)\n logger.debug(\"Socket closed.\")\n sleep(1)\n soc.listen()\n #Socket remains open. We keep re establishing connection as the end user opens/closes.\n while True:\n conn, addr = soc.accept()\n host, port = addr\n data = []\n with conn:\n logger.info(\"Client Established -- %s:%s\", host, port)\n message = \"Enter text shorter than 1024 chars only: \".encode()\n\n conn.send(message)\n while True:\n user_in = conn.recv(1024)\n if user_in:\n string = \"\"\n data.append(user_in)\n string += user_in.decode(\"utf-8\", 'ignore')\n\n #Split input by end line character, ignore empty inp.\n urls = list(filter(None, string.split(\"\\r\\n\")))\n for url in urls:\n #thread1 = mythread(url, logger)\n #thread1.start()\n mythread(url, logger).start()\n else:\n break\n logger.info(\"Client Closed -- %s:%s\", host, port)\n\ndef main():\n '''\n Main function\n '''\n\n logging.basicConfig(filename=CONFIG[SECTION]['filename'], \\\n level=CONFIG[SECTION]['level'], \\\n format='%(asctime)s::%(funcName)s::%(levelname)s::%(message)s', \\\n datefmt='%Y-%m-%d %H:%M:%S')\n\n logger = logging.getLogger(SECTION)\n\n logger.info(\"#####STARTING#####\")\n\n p = Process(target = url_catch, args=(logger,))\n p.start()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/dler.py","file_name":"dler.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410200908","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 15 16:06:10 2020\n\n@author: konrad\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ot\nimport time\nfrom scipy.interpolate import griddata\nfrom matplotlib import cm\n\nimport VortexLine as VL\nimport Physical_new as PC\n\n# %%\ndef exvelo_base(xt, yt, ut, vt):\n u_out = griddata(np.vstack((x.flatten(), y.flatten())).transpose(),\n ut.flatten(), np.vstack((xt, yt)).transpose())\n v_out = griddata(np.vstack((x.flatten(), y.flatten())).transpose(),\n vt.flatten(), np.vstack((xt, yt)).transpose())\n return u_out, v_out\n\n# %% Setup\n\nAoA = (0, 5, 10) # np.linspace(0, 10, 3, dtype=int)\n\nn_weights = 11\n\ntemp = np.linspace(0, 1, n_weights)\nweights = np.vstack((temp, 1-temp)).transpose()\n\nstep = 1\nreg = 1e-8\norder = 2\n\n# %% Read Data\nx_full, y_full, u_full, v_full,\\\n vort_full, u_std, v_std, Cont, Mom = PC.Read_Data(AoA, step=step)\n\nx, y, u, v, vort = PC.make_square(x_full, y_full, u_full, v_full, vort_full,\n 1000, step=1)\n\n# %% Divide Pos & Neg\n\ndx = np.gradient(x[0, :])\ndy = np.gradient(y[:, 0])\n\nvort_pos = np.zeros((2, vort.shape[1], vort.shape[2]))\nvort_neg = np.zeros((2, vort.shape[1], vort.shape[2]))\n\nvort_pos[0] = vort[0]\nvort_neg[0] = -vort[0]\nvort_pos[1] = vort[-1]\nvort_neg[1] = -vort[-1]\n\nvort_pos[vort_pos < 0] = 0\nvort_neg[vort_neg < 0] = 0\n\nsum_pos = np.zeros((2,))\nsum_neg = np.zeros((2,))\nfor i in range(2):\n sum_pos[i] = np.sum(vort_pos[i])\n vort_pos[i] = vort_pos[i] / sum_pos[i]\n sum_neg[i] = np.sum(vort_neg[i])\n vort_neg[i] = vort_neg[i] / sum_neg[i]\n \nvort_pos_1 = np.array(vort[1])\nvort_neg_1 = np.array(-vort[1])\n\nvort_pos_1[vort_pos_1 < 0] = 0\nvort_neg_1[vort_neg_1 < 0] = 0\n\nsum_pos_1 = np.sum(vort_pos_1)\nsum_neg_1 = np.sum(vort_neg_1)\n\nvort_pos_1 /= sum_pos_1\nvort_neg_1 /= sum_neg_1\n\n# %% OT\n\nfor i, w in enumerate(weights):\n print('Starting OT')\n start_OT = time.time()\n vort_pos_OT = ot.bregman.convolutional_barycenter2d(vort_pos, reg, w)\n vort_neg_OT = ot.bregman.convolutional_barycenter2d(vort_neg, reg, w)\n vort_OT = vort_pos_OT*np.sum(w*sum_pos)\\\n - vort_neg_OT*np.sum(w*sum_neg)\n \n print('OT finished after {:.0}mins'.format((time.time()-start_OT)/60))\n\n # %% Save\n \n np.savetxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_x.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]),\n x, delimiter=\",\")\n \n np.savetxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_y.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]),\n y, delimiter=\",\")\n\n np.savetxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_pos.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]),\n vort_pos_OT, delimiter=\",\")\n \n np.savetxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_neg.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]),\n vort_neg_OT, delimiter=\",\")\n \n np.savetxt(\"../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_sums.csv\"\n .format(AoA[0], AoA[1], AoA[2], w[0], w[1]),\n [sum_pos, sum_neg], delimiter=\",\")\n \n\n","sub_path":"Code/Save_OT.py","file_name":"Save_OT.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}